op_helper.c 103 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804280528062807280828092810281128122813281428152816281728182819282028212822282328242825282628272828282928302831283228332834283528362837283828392840284128422843284428452846284728482849285028512852285328542855285628572858285928602861286228632864286528662867286828692870287128722873287428752876287728782879288028812882288328842885288628872888288928902891289228932894289528962897289828992900290129022903290429052906290729082909291029112912291329142915291629172918291929202921292229232924292529262927292829292930293129322933293429352936293729382939294029412942294329442945294629472948294929502951295229532954295529562957295829592960296129622963296429652966296729682969297029712972297329742975297629772978297929802981298229832984298529862987298829892990299129922993299429952996299729982999300030013002300330043005300630073008300930103011301230133014301530163017301830193020302130223023302430253026302730283029303030313032303330343035303630373038303930403041304230433044304530463047304830493050305130523053305430553056305730583059306030613062306330643065306630673068306930703071307230733074307530763077307830793080308130823083308430853086308730883089309030913092309330943095309630973098309931003101310231033104310531063107310831093110311131123113311431153116311731183119312031213122312331243125312631273128312931303131313231333134313531363137313831393140314131423143314431453146314731483149315031513152315331543155315631573158315931603161316231633164316531663167316831693170317131723173317431753176317731783179318031813182318331843185318631873188318931903191319231933194319531963197319831993200320132023203320432053206320732083209
  1. /*
  2. * MIPS emulation helpers for qemu.
  3. *
  4. * Copyright (c) 2004-2005 Jocelyn Mayer
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <stdlib.h>
  20. #include "cpu.h"
  21. #include "qemu/host-utils.h"
  22. #include "helper.h"
  23. #if !defined(CONFIG_USER_ONLY)
  24. #include "exec/softmmu_exec.h"
  25. #endif /* !defined(CONFIG_USER_ONLY) */
  26. #ifndef CONFIG_USER_ONLY
  27. static inline void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global);
  28. #endif
  29. /*****************************************************************************/
  30. /* Exceptions processing helpers */
  31. static inline void QEMU_NORETURN do_raise_exception_err(CPUMIPSState *env,
  32. uint32_t exception,
  33. int error_code,
  34. uintptr_t pc)
  35. {
  36. if (exception < EXCP_SC) {
  37. qemu_log("%s: %d %d\n", __func__, exception, error_code);
  38. }
  39. env->exception_index = exception;
  40. env->error_code = error_code;
  41. if (pc) {
  42. /* now we have a real cpu fault */
  43. cpu_restore_state(env, pc);
  44. }
  45. cpu_loop_exit(env);
  46. }
  47. static inline void QEMU_NORETURN do_raise_exception(CPUMIPSState *env,
  48. uint32_t exception,
  49. uintptr_t pc)
  50. {
  51. do_raise_exception_err(env, exception, 0, pc);
  52. }
  53. void helper_raise_exception_err(CPUMIPSState *env, uint32_t exception,
  54. int error_code)
  55. {
  56. do_raise_exception_err(env, exception, error_code, 0);
  57. }
  58. void helper_raise_exception(CPUMIPSState *env, uint32_t exception)
  59. {
  60. do_raise_exception(env, exception, 0);
  61. }
  62. #if defined(CONFIG_USER_ONLY)
  63. #define HELPER_LD(name, insn, type) \
  64. static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
  65. int mem_idx) \
  66. { \
  67. return (type) insn##_raw(addr); \
  68. }
  69. #else
  70. #define HELPER_LD(name, insn, type) \
  71. static inline type do_##name(CPUMIPSState *env, target_ulong addr, \
  72. int mem_idx) \
  73. { \
  74. switch (mem_idx) \
  75. { \
  76. case 0: return (type) cpu_##insn##_kernel(env, addr); break; \
  77. case 1: return (type) cpu_##insn##_super(env, addr); break; \
  78. default: \
  79. case 2: return (type) cpu_##insn##_user(env, addr); break; \
  80. } \
  81. }
  82. #endif
  83. HELPER_LD(lbu, ldub, uint8_t)
  84. HELPER_LD(lw, ldl, int32_t)
  85. #ifdef TARGET_MIPS64
  86. HELPER_LD(ld, ldq, int64_t)
  87. #endif
  88. #undef HELPER_LD
  89. #if defined(CONFIG_USER_ONLY)
  90. #define HELPER_ST(name, insn, type) \
  91. static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
  92. type val, int mem_idx) \
  93. { \
  94. insn##_raw(addr, val); \
  95. }
  96. #else
  97. #define HELPER_ST(name, insn, type) \
  98. static inline void do_##name(CPUMIPSState *env, target_ulong addr, \
  99. type val, int mem_idx) \
  100. { \
  101. switch (mem_idx) \
  102. { \
  103. case 0: cpu_##insn##_kernel(env, addr, val); break; \
  104. case 1: cpu_##insn##_super(env, addr, val); break; \
  105. default: \
  106. case 2: cpu_##insn##_user(env, addr, val); break; \
  107. } \
  108. }
  109. #endif
  110. HELPER_ST(sb, stb, uint8_t)
  111. HELPER_ST(sw, stl, uint32_t)
  112. #ifdef TARGET_MIPS64
  113. HELPER_ST(sd, stq, uint64_t)
  114. #endif
  115. #undef HELPER_ST
  116. target_ulong helper_clo (target_ulong arg1)
  117. {
  118. return clo32(arg1);
  119. }
  120. target_ulong helper_clz (target_ulong arg1)
  121. {
  122. return clz32(arg1);
  123. }
  124. #if defined(TARGET_MIPS64)
  125. target_ulong helper_dclo (target_ulong arg1)
  126. {
  127. return clo64(arg1);
  128. }
  129. target_ulong helper_dclz (target_ulong arg1)
  130. {
  131. return clz64(arg1);
  132. }
  133. #endif /* TARGET_MIPS64 */
  134. /* 64 bits arithmetic for 32 bits hosts */
  135. static inline uint64_t get_HILO(CPUMIPSState *env)
  136. {
  137. return ((uint64_t)(env->active_tc.HI[0]) << 32) | (uint32_t)env->active_tc.LO[0];
  138. }
  139. static inline target_ulong set_HIT0_LO(CPUMIPSState *env, uint64_t HILO)
  140. {
  141. target_ulong tmp;
  142. env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
  143. tmp = env->active_tc.HI[0] = (int32_t)(HILO >> 32);
  144. return tmp;
  145. }
  146. static inline target_ulong set_HI_LOT0(CPUMIPSState *env, uint64_t HILO)
  147. {
  148. target_ulong tmp = env->active_tc.LO[0] = (int32_t)(HILO & 0xFFFFFFFF);
  149. env->active_tc.HI[0] = (int32_t)(HILO >> 32);
  150. return tmp;
  151. }
  152. /* Multiplication variants of the vr54xx. */
  153. target_ulong helper_muls(CPUMIPSState *env, target_ulong arg1,
  154. target_ulong arg2)
  155. {
  156. return set_HI_LOT0(env, 0 - ((int64_t)(int32_t)arg1 *
  157. (int64_t)(int32_t)arg2));
  158. }
  159. target_ulong helper_mulsu(CPUMIPSState *env, target_ulong arg1,
  160. target_ulong arg2)
  161. {
  162. return set_HI_LOT0(env, 0 - (uint64_t)(uint32_t)arg1 *
  163. (uint64_t)(uint32_t)arg2);
  164. }
  165. target_ulong helper_macc(CPUMIPSState *env, target_ulong arg1,
  166. target_ulong arg2)
  167. {
  168. return set_HI_LOT0(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
  169. (int64_t)(int32_t)arg2);
  170. }
  171. target_ulong helper_macchi(CPUMIPSState *env, target_ulong arg1,
  172. target_ulong arg2)
  173. {
  174. return set_HIT0_LO(env, (int64_t)get_HILO(env) + (int64_t)(int32_t)arg1 *
  175. (int64_t)(int32_t)arg2);
  176. }
  177. target_ulong helper_maccu(CPUMIPSState *env, target_ulong arg1,
  178. target_ulong arg2)
  179. {
  180. return set_HI_LOT0(env, (uint64_t)get_HILO(env) +
  181. (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
  182. }
  183. target_ulong helper_macchiu(CPUMIPSState *env, target_ulong arg1,
  184. target_ulong arg2)
  185. {
  186. return set_HIT0_LO(env, (uint64_t)get_HILO(env) +
  187. (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
  188. }
  189. target_ulong helper_msac(CPUMIPSState *env, target_ulong arg1,
  190. target_ulong arg2)
  191. {
  192. return set_HI_LOT0(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
  193. (int64_t)(int32_t)arg2);
  194. }
  195. target_ulong helper_msachi(CPUMIPSState *env, target_ulong arg1,
  196. target_ulong arg2)
  197. {
  198. return set_HIT0_LO(env, (int64_t)get_HILO(env) - (int64_t)(int32_t)arg1 *
  199. (int64_t)(int32_t)arg2);
  200. }
  201. target_ulong helper_msacu(CPUMIPSState *env, target_ulong arg1,
  202. target_ulong arg2)
  203. {
  204. return set_HI_LOT0(env, (uint64_t)get_HILO(env) -
  205. (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
  206. }
  207. target_ulong helper_msachiu(CPUMIPSState *env, target_ulong arg1,
  208. target_ulong arg2)
  209. {
  210. return set_HIT0_LO(env, (uint64_t)get_HILO(env) -
  211. (uint64_t)(uint32_t)arg1 * (uint64_t)(uint32_t)arg2);
  212. }
  213. target_ulong helper_mulhi(CPUMIPSState *env, target_ulong arg1,
  214. target_ulong arg2)
  215. {
  216. return set_HIT0_LO(env, (int64_t)(int32_t)arg1 * (int64_t)(int32_t)arg2);
  217. }
  218. target_ulong helper_mulhiu(CPUMIPSState *env, target_ulong arg1,
  219. target_ulong arg2)
  220. {
  221. return set_HIT0_LO(env, (uint64_t)(uint32_t)arg1 *
  222. (uint64_t)(uint32_t)arg2);
  223. }
  224. target_ulong helper_mulshi(CPUMIPSState *env, target_ulong arg1,
  225. target_ulong arg2)
  226. {
  227. return set_HIT0_LO(env, 0 - (int64_t)(int32_t)arg1 *
  228. (int64_t)(int32_t)arg2);
  229. }
  230. target_ulong helper_mulshiu(CPUMIPSState *env, target_ulong arg1,
  231. target_ulong arg2)
  232. {
  233. return set_HIT0_LO(env, 0 - (uint64_t)(uint32_t)arg1 *
  234. (uint64_t)(uint32_t)arg2);
  235. }
  236. #ifdef TARGET_MIPS64
  237. void helper_dmult(CPUMIPSState *env, target_ulong arg1,
  238. target_ulong arg2, int acc)
  239. {
  240. muls64(&(env->active_tc.LO[acc]), &(env->active_tc.HI[acc]), arg1, arg2);
  241. }
  242. void helper_dmultu(CPUMIPSState *env, target_ulong arg1,
  243. target_ulong arg2, int acc)
  244. {
  245. mulu64(&(env->active_tc.LO[acc]), &(env->active_tc.HI[acc]), arg1, arg2);
  246. }
  247. #endif
  248. #ifndef CONFIG_USER_ONLY
  249. static inline hwaddr do_translate_address(CPUMIPSState *env,
  250. target_ulong address,
  251. int rw)
  252. {
  253. hwaddr lladdr;
  254. lladdr = cpu_mips_translate_address(env, address, rw);
  255. if (lladdr == -1LL) {
  256. cpu_loop_exit(env);
  257. } else {
  258. return lladdr;
  259. }
  260. }
  261. #define HELPER_LD_ATOMIC(name, insn) \
  262. target_ulong helper_##name(CPUMIPSState *env, target_ulong arg, int mem_idx) \
  263. { \
  264. env->lladdr = do_translate_address(env, arg, 0); \
  265. env->llval = do_##insn(env, arg, mem_idx); \
  266. return env->llval; \
  267. }
  268. HELPER_LD_ATOMIC(ll, lw)
  269. #ifdef TARGET_MIPS64
  270. HELPER_LD_ATOMIC(lld, ld)
  271. #endif
  272. #undef HELPER_LD_ATOMIC
  273. #define HELPER_ST_ATOMIC(name, ld_insn, st_insn, almask) \
  274. target_ulong helper_##name(CPUMIPSState *env, target_ulong arg1, \
  275. target_ulong arg2, int mem_idx) \
  276. { \
  277. target_long tmp; \
  278. \
  279. if (arg2 & almask) { \
  280. env->CP0_BadVAddr = arg2; \
  281. helper_raise_exception(env, EXCP_AdES); \
  282. } \
  283. if (do_translate_address(env, arg2, 1) == env->lladdr) { \
  284. tmp = do_##ld_insn(env, arg2, mem_idx); \
  285. if (tmp == env->llval) { \
  286. do_##st_insn(env, arg2, arg1, mem_idx); \
  287. return 1; \
  288. } \
  289. } \
  290. return 0; \
  291. }
  292. HELPER_ST_ATOMIC(sc, lw, sw, 0x3)
  293. #ifdef TARGET_MIPS64
  294. HELPER_ST_ATOMIC(scd, ld, sd, 0x7)
  295. #endif
  296. #undef HELPER_ST_ATOMIC
  297. #endif
  298. #ifdef TARGET_WORDS_BIGENDIAN
  299. #define GET_LMASK(v) ((v) & 3)
  300. #define GET_OFFSET(addr, offset) (addr + (offset))
  301. #else
  302. #define GET_LMASK(v) (((v) & 3) ^ 3)
  303. #define GET_OFFSET(addr, offset) (addr - (offset))
  304. #endif
  305. void helper_swl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
  306. int mem_idx)
  307. {
  308. do_sb(env, arg2, (uint8_t)(arg1 >> 24), mem_idx);
  309. if (GET_LMASK(arg2) <= 2)
  310. do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 16), mem_idx);
  311. if (GET_LMASK(arg2) <= 1)
  312. do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 8), mem_idx);
  313. if (GET_LMASK(arg2) == 0)
  314. do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)arg1, mem_idx);
  315. }
  316. void helper_swr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
  317. int mem_idx)
  318. {
  319. do_sb(env, arg2, (uint8_t)arg1, mem_idx);
  320. if (GET_LMASK(arg2) >= 1)
  321. do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
  322. if (GET_LMASK(arg2) >= 2)
  323. do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
  324. if (GET_LMASK(arg2) == 3)
  325. do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
  326. }
  327. #if defined(TARGET_MIPS64)
  328. /* "half" load and stores. We must do the memory access inline,
  329. or fault handling won't work. */
  330. #ifdef TARGET_WORDS_BIGENDIAN
  331. #define GET_LMASK64(v) ((v) & 7)
  332. #else
  333. #define GET_LMASK64(v) (((v) & 7) ^ 7)
  334. #endif
  335. void helper_sdl(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
  336. int mem_idx)
  337. {
  338. do_sb(env, arg2, (uint8_t)(arg1 >> 56), mem_idx);
  339. if (GET_LMASK64(arg2) <= 6)
  340. do_sb(env, GET_OFFSET(arg2, 1), (uint8_t)(arg1 >> 48), mem_idx);
  341. if (GET_LMASK64(arg2) <= 5)
  342. do_sb(env, GET_OFFSET(arg2, 2), (uint8_t)(arg1 >> 40), mem_idx);
  343. if (GET_LMASK64(arg2) <= 4)
  344. do_sb(env, GET_OFFSET(arg2, 3), (uint8_t)(arg1 >> 32), mem_idx);
  345. if (GET_LMASK64(arg2) <= 3)
  346. do_sb(env, GET_OFFSET(arg2, 4), (uint8_t)(arg1 >> 24), mem_idx);
  347. if (GET_LMASK64(arg2) <= 2)
  348. do_sb(env, GET_OFFSET(arg2, 5), (uint8_t)(arg1 >> 16), mem_idx);
  349. if (GET_LMASK64(arg2) <= 1)
  350. do_sb(env, GET_OFFSET(arg2, 6), (uint8_t)(arg1 >> 8), mem_idx);
  351. if (GET_LMASK64(arg2) <= 0)
  352. do_sb(env, GET_OFFSET(arg2, 7), (uint8_t)arg1, mem_idx);
  353. }
  354. void helper_sdr(CPUMIPSState *env, target_ulong arg1, target_ulong arg2,
  355. int mem_idx)
  356. {
  357. do_sb(env, arg2, (uint8_t)arg1, mem_idx);
  358. if (GET_LMASK64(arg2) >= 1)
  359. do_sb(env, GET_OFFSET(arg2, -1), (uint8_t)(arg1 >> 8), mem_idx);
  360. if (GET_LMASK64(arg2) >= 2)
  361. do_sb(env, GET_OFFSET(arg2, -2), (uint8_t)(arg1 >> 16), mem_idx);
  362. if (GET_LMASK64(arg2) >= 3)
  363. do_sb(env, GET_OFFSET(arg2, -3), (uint8_t)(arg1 >> 24), mem_idx);
  364. if (GET_LMASK64(arg2) >= 4)
  365. do_sb(env, GET_OFFSET(arg2, -4), (uint8_t)(arg1 >> 32), mem_idx);
  366. if (GET_LMASK64(arg2) >= 5)
  367. do_sb(env, GET_OFFSET(arg2, -5), (uint8_t)(arg1 >> 40), mem_idx);
  368. if (GET_LMASK64(arg2) >= 6)
  369. do_sb(env, GET_OFFSET(arg2, -6), (uint8_t)(arg1 >> 48), mem_idx);
  370. if (GET_LMASK64(arg2) == 7)
  371. do_sb(env, GET_OFFSET(arg2, -7), (uint8_t)(arg1 >> 56), mem_idx);
  372. }
  373. #endif /* TARGET_MIPS64 */
  374. static const int multiple_regs[] = { 16, 17, 18, 19, 20, 21, 22, 23, 30 };
  375. void helper_lwm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
  376. uint32_t mem_idx)
  377. {
  378. target_ulong base_reglist = reglist & 0xf;
  379. target_ulong do_r31 = reglist & 0x10;
  380. if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
  381. target_ulong i;
  382. for (i = 0; i < base_reglist; i++) {
  383. env->active_tc.gpr[multiple_regs[i]] =
  384. (target_long)do_lw(env, addr, mem_idx);
  385. addr += 4;
  386. }
  387. }
  388. if (do_r31) {
  389. env->active_tc.gpr[31] = (target_long)do_lw(env, addr, mem_idx);
  390. }
  391. }
  392. void helper_swm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
  393. uint32_t mem_idx)
  394. {
  395. target_ulong base_reglist = reglist & 0xf;
  396. target_ulong do_r31 = reglist & 0x10;
  397. if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
  398. target_ulong i;
  399. for (i = 0; i < base_reglist; i++) {
  400. do_sw(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx);
  401. addr += 4;
  402. }
  403. }
  404. if (do_r31) {
  405. do_sw(env, addr, env->active_tc.gpr[31], mem_idx);
  406. }
  407. }
  408. #if defined(TARGET_MIPS64)
  409. void helper_ldm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
  410. uint32_t mem_idx)
  411. {
  412. target_ulong base_reglist = reglist & 0xf;
  413. target_ulong do_r31 = reglist & 0x10;
  414. if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
  415. target_ulong i;
  416. for (i = 0; i < base_reglist; i++) {
  417. env->active_tc.gpr[multiple_regs[i]] = do_ld(env, addr, mem_idx);
  418. addr += 8;
  419. }
  420. }
  421. if (do_r31) {
  422. env->active_tc.gpr[31] = do_ld(env, addr, mem_idx);
  423. }
  424. }
  425. void helper_sdm(CPUMIPSState *env, target_ulong addr, target_ulong reglist,
  426. uint32_t mem_idx)
  427. {
  428. target_ulong base_reglist = reglist & 0xf;
  429. target_ulong do_r31 = reglist & 0x10;
  430. if (base_reglist > 0 && base_reglist <= ARRAY_SIZE (multiple_regs)) {
  431. target_ulong i;
  432. for (i = 0; i < base_reglist; i++) {
  433. do_sd(env, addr, env->active_tc.gpr[multiple_regs[i]], mem_idx);
  434. addr += 8;
  435. }
  436. }
  437. if (do_r31) {
  438. do_sd(env, addr, env->active_tc.gpr[31], mem_idx);
  439. }
  440. }
  441. #endif
  442. #ifndef CONFIG_USER_ONLY
  443. /* SMP helpers. */
  444. static bool mips_vpe_is_wfi(MIPSCPU *c)
  445. {
  446. CPUMIPSState *env = &c->env;
  447. /* If the VPE is halted but otherwise active, it means it's waiting for
  448. an interrupt. */
  449. return env->halted && mips_vpe_active(env);
  450. }
  451. static inline void mips_vpe_wake(CPUMIPSState *c)
  452. {
  453. /* Dont set ->halted = 0 directly, let it be done via cpu_has_work
  454. because there might be other conditions that state that c should
  455. be sleeping. */
  456. cpu_interrupt(c, CPU_INTERRUPT_WAKE);
  457. }
  458. static inline void mips_vpe_sleep(MIPSCPU *cpu)
  459. {
  460. CPUMIPSState *c = &cpu->env;
  461. /* The VPE was shut off, really go to bed.
  462. Reset any old _WAKE requests. */
  463. c->halted = 1;
  464. cpu_reset_interrupt(c, CPU_INTERRUPT_WAKE);
  465. }
  466. static inline void mips_tc_wake(MIPSCPU *cpu, int tc)
  467. {
  468. CPUMIPSState *c = &cpu->env;
  469. /* FIXME: TC reschedule. */
  470. if (mips_vpe_active(c) && !mips_vpe_is_wfi(cpu)) {
  471. mips_vpe_wake(c);
  472. }
  473. }
  474. static inline void mips_tc_sleep(MIPSCPU *cpu, int tc)
  475. {
  476. CPUMIPSState *c = &cpu->env;
  477. /* FIXME: TC reschedule. */
  478. if (!mips_vpe_active(c)) {
  479. mips_vpe_sleep(cpu);
  480. }
  481. }
  482. /**
  483. * mips_cpu_map_tc:
  484. * @env: CPU from which mapping is performed.
  485. * @tc: Should point to an int with the value of the global TC index.
  486. *
  487. * This function will transform @tc into a local index within the
  488. * returned #CPUMIPSState.
  489. */
  490. /* FIXME: This code assumes that all VPEs have the same number of TCs,
  491. which depends on runtime setup. Can probably be fixed by
  492. walking the list of CPUMIPSStates. */
  493. static CPUMIPSState *mips_cpu_map_tc(CPUMIPSState *env, int *tc)
  494. {
  495. MIPSCPU *cpu;
  496. CPUState *cs;
  497. CPUState *other_cs;
  498. int vpe_idx;
  499. int tc_idx = *tc;
  500. if (!(env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))) {
  501. /* Not allowed to address other CPUs. */
  502. *tc = env->current_tc;
  503. return env;
  504. }
  505. cs = CPU(mips_env_get_cpu(env));
  506. vpe_idx = tc_idx / cs->nr_threads;
  507. *tc = tc_idx % cs->nr_threads;
  508. other_cs = qemu_get_cpu(vpe_idx);
  509. if (other_cs == NULL) {
  510. return env;
  511. }
  512. cpu = MIPS_CPU(other_cs);
  513. return &cpu->env;
  514. }
  515. /* The per VPE CP0_Status register shares some fields with the per TC
  516. CP0_TCStatus registers. These fields are wired to the same registers,
  517. so changes to either of them should be reflected on both registers.
  518. Also, EntryHi shares the bottom 8 bit ASID with TCStauts.
  519. These helper call synchronizes the regs for a given cpu. */
  520. /* Called for updates to CP0_Status. */
  521. static void sync_c0_status(CPUMIPSState *env, CPUMIPSState *cpu, int tc)
  522. {
  523. int32_t tcstatus, *tcst;
  524. uint32_t v = cpu->CP0_Status;
  525. uint32_t cu, mx, asid, ksu;
  526. uint32_t mask = ((1 << CP0TCSt_TCU3)
  527. | (1 << CP0TCSt_TCU2)
  528. | (1 << CP0TCSt_TCU1)
  529. | (1 << CP0TCSt_TCU0)
  530. | (1 << CP0TCSt_TMX)
  531. | (3 << CP0TCSt_TKSU)
  532. | (0xff << CP0TCSt_TASID));
  533. cu = (v >> CP0St_CU0) & 0xf;
  534. mx = (v >> CP0St_MX) & 0x1;
  535. ksu = (v >> CP0St_KSU) & 0x3;
  536. asid = env->CP0_EntryHi & 0xff;
  537. tcstatus = cu << CP0TCSt_TCU0;
  538. tcstatus |= mx << CP0TCSt_TMX;
  539. tcstatus |= ksu << CP0TCSt_TKSU;
  540. tcstatus |= asid;
  541. if (tc == cpu->current_tc) {
  542. tcst = &cpu->active_tc.CP0_TCStatus;
  543. } else {
  544. tcst = &cpu->tcs[tc].CP0_TCStatus;
  545. }
  546. *tcst &= ~mask;
  547. *tcst |= tcstatus;
  548. compute_hflags(cpu);
  549. }
  550. /* Called for updates to CP0_TCStatus. */
  551. static void sync_c0_tcstatus(CPUMIPSState *cpu, int tc,
  552. target_ulong v)
  553. {
  554. uint32_t status;
  555. uint32_t tcu, tmx, tasid, tksu;
  556. uint32_t mask = ((1 << CP0St_CU3)
  557. | (1 << CP0St_CU2)
  558. | (1 << CP0St_CU1)
  559. | (1 << CP0St_CU0)
  560. | (1 << CP0St_MX)
  561. | (3 << CP0St_KSU));
  562. tcu = (v >> CP0TCSt_TCU0) & 0xf;
  563. tmx = (v >> CP0TCSt_TMX) & 0x1;
  564. tasid = v & 0xff;
  565. tksu = (v >> CP0TCSt_TKSU) & 0x3;
  566. status = tcu << CP0St_CU0;
  567. status |= tmx << CP0St_MX;
  568. status |= tksu << CP0St_KSU;
  569. cpu->CP0_Status &= ~mask;
  570. cpu->CP0_Status |= status;
  571. /* Sync the TASID with EntryHi. */
  572. cpu->CP0_EntryHi &= ~0xff;
  573. cpu->CP0_EntryHi = tasid;
  574. compute_hflags(cpu);
  575. }
  576. /* Called for updates to CP0_EntryHi. */
  577. static void sync_c0_entryhi(CPUMIPSState *cpu, int tc)
  578. {
  579. int32_t *tcst;
  580. uint32_t asid, v = cpu->CP0_EntryHi;
  581. asid = v & 0xff;
  582. if (tc == cpu->current_tc) {
  583. tcst = &cpu->active_tc.CP0_TCStatus;
  584. } else {
  585. tcst = &cpu->tcs[tc].CP0_TCStatus;
  586. }
  587. *tcst &= ~0xff;
  588. *tcst |= asid;
  589. }
  590. /* CP0 helpers */
  591. target_ulong helper_mfc0_mvpcontrol(CPUMIPSState *env)
  592. {
  593. return env->mvp->CP0_MVPControl;
  594. }
  595. target_ulong helper_mfc0_mvpconf0(CPUMIPSState *env)
  596. {
  597. return env->mvp->CP0_MVPConf0;
  598. }
  599. target_ulong helper_mfc0_mvpconf1(CPUMIPSState *env)
  600. {
  601. return env->mvp->CP0_MVPConf1;
  602. }
  603. target_ulong helper_mfc0_random(CPUMIPSState *env)
  604. {
  605. return (int32_t)cpu_mips_get_random(env);
  606. }
  607. target_ulong helper_mfc0_tcstatus(CPUMIPSState *env)
  608. {
  609. return env->active_tc.CP0_TCStatus;
  610. }
  611. target_ulong helper_mftc0_tcstatus(CPUMIPSState *env)
  612. {
  613. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  614. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  615. if (other_tc == other->current_tc)
  616. return other->active_tc.CP0_TCStatus;
  617. else
  618. return other->tcs[other_tc].CP0_TCStatus;
  619. }
  620. target_ulong helper_mfc0_tcbind(CPUMIPSState *env)
  621. {
  622. return env->active_tc.CP0_TCBind;
  623. }
  624. target_ulong helper_mftc0_tcbind(CPUMIPSState *env)
  625. {
  626. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  627. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  628. if (other_tc == other->current_tc)
  629. return other->active_tc.CP0_TCBind;
  630. else
  631. return other->tcs[other_tc].CP0_TCBind;
  632. }
  633. target_ulong helper_mfc0_tcrestart(CPUMIPSState *env)
  634. {
  635. return env->active_tc.PC;
  636. }
  637. target_ulong helper_mftc0_tcrestart(CPUMIPSState *env)
  638. {
  639. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  640. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  641. if (other_tc == other->current_tc)
  642. return other->active_tc.PC;
  643. else
  644. return other->tcs[other_tc].PC;
  645. }
  646. target_ulong helper_mfc0_tchalt(CPUMIPSState *env)
  647. {
  648. return env->active_tc.CP0_TCHalt;
  649. }
  650. target_ulong helper_mftc0_tchalt(CPUMIPSState *env)
  651. {
  652. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  653. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  654. if (other_tc == other->current_tc)
  655. return other->active_tc.CP0_TCHalt;
  656. else
  657. return other->tcs[other_tc].CP0_TCHalt;
  658. }
  659. target_ulong helper_mfc0_tccontext(CPUMIPSState *env)
  660. {
  661. return env->active_tc.CP0_TCContext;
  662. }
  663. target_ulong helper_mftc0_tccontext(CPUMIPSState *env)
  664. {
  665. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  666. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  667. if (other_tc == other->current_tc)
  668. return other->active_tc.CP0_TCContext;
  669. else
  670. return other->tcs[other_tc].CP0_TCContext;
  671. }
  672. target_ulong helper_mfc0_tcschedule(CPUMIPSState *env)
  673. {
  674. return env->active_tc.CP0_TCSchedule;
  675. }
  676. target_ulong helper_mftc0_tcschedule(CPUMIPSState *env)
  677. {
  678. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  679. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  680. if (other_tc == other->current_tc)
  681. return other->active_tc.CP0_TCSchedule;
  682. else
  683. return other->tcs[other_tc].CP0_TCSchedule;
  684. }
  685. target_ulong helper_mfc0_tcschefback(CPUMIPSState *env)
  686. {
  687. return env->active_tc.CP0_TCScheFBack;
  688. }
  689. target_ulong helper_mftc0_tcschefback(CPUMIPSState *env)
  690. {
  691. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  692. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  693. if (other_tc == other->current_tc)
  694. return other->active_tc.CP0_TCScheFBack;
  695. else
  696. return other->tcs[other_tc].CP0_TCScheFBack;
  697. }
  698. target_ulong helper_mfc0_count(CPUMIPSState *env)
  699. {
  700. return (int32_t)cpu_mips_get_count(env);
  701. }
  702. target_ulong helper_mftc0_entryhi(CPUMIPSState *env)
  703. {
  704. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  705. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  706. return other->CP0_EntryHi;
  707. }
  708. target_ulong helper_mftc0_cause(CPUMIPSState *env)
  709. {
  710. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  711. int32_t tccause;
  712. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  713. if (other_tc == other->current_tc) {
  714. tccause = other->CP0_Cause;
  715. } else {
  716. tccause = other->CP0_Cause;
  717. }
  718. return tccause;
  719. }
  720. target_ulong helper_mftc0_status(CPUMIPSState *env)
  721. {
  722. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  723. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  724. return other->CP0_Status;
  725. }
  726. target_ulong helper_mfc0_lladdr(CPUMIPSState *env)
  727. {
  728. return (int32_t)(env->lladdr >> env->CP0_LLAddr_shift);
  729. }
  730. target_ulong helper_mfc0_watchlo(CPUMIPSState *env, uint32_t sel)
  731. {
  732. return (int32_t)env->CP0_WatchLo[sel];
  733. }
  734. target_ulong helper_mfc0_watchhi(CPUMIPSState *env, uint32_t sel)
  735. {
  736. return env->CP0_WatchHi[sel];
  737. }
  738. target_ulong helper_mfc0_debug(CPUMIPSState *env)
  739. {
  740. target_ulong t0 = env->CP0_Debug;
  741. if (env->hflags & MIPS_HFLAG_DM)
  742. t0 |= 1 << CP0DB_DM;
  743. return t0;
  744. }
  745. target_ulong helper_mftc0_debug(CPUMIPSState *env)
  746. {
  747. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  748. int32_t tcstatus;
  749. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  750. if (other_tc == other->current_tc)
  751. tcstatus = other->active_tc.CP0_Debug_tcstatus;
  752. else
  753. tcstatus = other->tcs[other_tc].CP0_Debug_tcstatus;
  754. /* XXX: Might be wrong, check with EJTAG spec. */
  755. return (other->CP0_Debug & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
  756. (tcstatus & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
  757. }
  758. #if defined(TARGET_MIPS64)
  759. target_ulong helper_dmfc0_tcrestart(CPUMIPSState *env)
  760. {
  761. return env->active_tc.PC;
  762. }
  763. target_ulong helper_dmfc0_tchalt(CPUMIPSState *env)
  764. {
  765. return env->active_tc.CP0_TCHalt;
  766. }
  767. target_ulong helper_dmfc0_tccontext(CPUMIPSState *env)
  768. {
  769. return env->active_tc.CP0_TCContext;
  770. }
  771. target_ulong helper_dmfc0_tcschedule(CPUMIPSState *env)
  772. {
  773. return env->active_tc.CP0_TCSchedule;
  774. }
  775. target_ulong helper_dmfc0_tcschefback(CPUMIPSState *env)
  776. {
  777. return env->active_tc.CP0_TCScheFBack;
  778. }
  779. target_ulong helper_dmfc0_lladdr(CPUMIPSState *env)
  780. {
  781. return env->lladdr >> env->CP0_LLAddr_shift;
  782. }
  783. target_ulong helper_dmfc0_watchlo(CPUMIPSState *env, uint32_t sel)
  784. {
  785. return env->CP0_WatchLo[sel];
  786. }
  787. #endif /* TARGET_MIPS64 */
  788. void helper_mtc0_index(CPUMIPSState *env, target_ulong arg1)
  789. {
  790. int num = 1;
  791. unsigned int tmp = env->tlb->nb_tlb;
  792. do {
  793. tmp >>= 1;
  794. num <<= 1;
  795. } while (tmp);
  796. env->CP0_Index = (env->CP0_Index & 0x80000000) | (arg1 & (num - 1));
  797. }
  798. void helper_mtc0_mvpcontrol(CPUMIPSState *env, target_ulong arg1)
  799. {
  800. uint32_t mask = 0;
  801. uint32_t newval;
  802. if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP))
  803. mask |= (1 << CP0MVPCo_CPA) | (1 << CP0MVPCo_VPC) |
  804. (1 << CP0MVPCo_EVP);
  805. if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
  806. mask |= (1 << CP0MVPCo_STLB);
  807. newval = (env->mvp->CP0_MVPControl & ~mask) | (arg1 & mask);
  808. // TODO: Enable/disable shared TLB, enable/disable VPEs.
  809. env->mvp->CP0_MVPControl = newval;
  810. }
  811. void helper_mtc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
  812. {
  813. uint32_t mask;
  814. uint32_t newval;
  815. mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
  816. (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
  817. newval = (env->CP0_VPEControl & ~mask) | (arg1 & mask);
  818. /* Yield scheduler intercept not implemented. */
  819. /* Gating storage scheduler intercept not implemented. */
  820. // TODO: Enable/disable TCs.
  821. env->CP0_VPEControl = newval;
  822. }
  823. void helper_mttc0_vpecontrol(CPUMIPSState *env, target_ulong arg1)
  824. {
  825. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  826. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  827. uint32_t mask;
  828. uint32_t newval;
  829. mask = (1 << CP0VPECo_YSI) | (1 << CP0VPECo_GSI) |
  830. (1 << CP0VPECo_TE) | (0xff << CP0VPECo_TargTC);
  831. newval = (other->CP0_VPEControl & ~mask) | (arg1 & mask);
  832. /* TODO: Enable/disable TCs. */
  833. other->CP0_VPEControl = newval;
  834. }
  835. target_ulong helper_mftc0_vpecontrol(CPUMIPSState *env)
  836. {
  837. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  838. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  839. /* FIXME: Mask away return zero on read bits. */
  840. return other->CP0_VPEControl;
  841. }
  842. target_ulong helper_mftc0_vpeconf0(CPUMIPSState *env)
  843. {
  844. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  845. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  846. return other->CP0_VPEConf0;
  847. }
  848. void helper_mtc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
  849. {
  850. uint32_t mask = 0;
  851. uint32_t newval;
  852. if (env->CP0_VPEConf0 & (1 << CP0VPEC0_MVP)) {
  853. if (env->CP0_VPEConf0 & (1 << CP0VPEC0_VPA))
  854. mask |= (0xff << CP0VPEC0_XTC);
  855. mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
  856. }
  857. newval = (env->CP0_VPEConf0 & ~mask) | (arg1 & mask);
  858. // TODO: TC exclusive handling due to ERL/EXL.
  859. env->CP0_VPEConf0 = newval;
  860. }
  861. void helper_mttc0_vpeconf0(CPUMIPSState *env, target_ulong arg1)
  862. {
  863. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  864. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  865. uint32_t mask = 0;
  866. uint32_t newval;
  867. mask |= (1 << CP0VPEC0_MVP) | (1 << CP0VPEC0_VPA);
  868. newval = (other->CP0_VPEConf0 & ~mask) | (arg1 & mask);
  869. /* TODO: TC exclusive handling due to ERL/EXL. */
  870. other->CP0_VPEConf0 = newval;
  871. }
  872. void helper_mtc0_vpeconf1(CPUMIPSState *env, target_ulong arg1)
  873. {
  874. uint32_t mask = 0;
  875. uint32_t newval;
  876. if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
  877. mask |= (0xff << CP0VPEC1_NCX) | (0xff << CP0VPEC1_NCP2) |
  878. (0xff << CP0VPEC1_NCP1);
  879. newval = (env->CP0_VPEConf1 & ~mask) | (arg1 & mask);
  880. /* UDI not implemented. */
  881. /* CP2 not implemented. */
  882. // TODO: Handle FPU (CP1) binding.
  883. env->CP0_VPEConf1 = newval;
  884. }
  885. void helper_mtc0_yqmask(CPUMIPSState *env, target_ulong arg1)
  886. {
  887. /* Yield qualifier inputs not implemented. */
  888. env->CP0_YQMask = 0x00000000;
  889. }
  890. void helper_mtc0_vpeopt(CPUMIPSState *env, target_ulong arg1)
  891. {
  892. env->CP0_VPEOpt = arg1 & 0x0000ffff;
  893. }
  894. void helper_mtc0_entrylo0(CPUMIPSState *env, target_ulong arg1)
  895. {
  896. /* Large physaddr (PABITS) not implemented */
  897. /* 1k pages not implemented */
  898. env->CP0_EntryLo0 = arg1 & 0x3FFFFFFF;
  899. }
  900. void helper_mtc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
  901. {
  902. uint32_t mask = env->CP0_TCStatus_rw_bitmask;
  903. uint32_t newval;
  904. newval = (env->active_tc.CP0_TCStatus & ~mask) | (arg1 & mask);
  905. env->active_tc.CP0_TCStatus = newval;
  906. sync_c0_tcstatus(env, env->current_tc, newval);
  907. }
  908. void helper_mttc0_tcstatus(CPUMIPSState *env, target_ulong arg1)
  909. {
  910. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  911. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  912. if (other_tc == other->current_tc)
  913. other->active_tc.CP0_TCStatus = arg1;
  914. else
  915. other->tcs[other_tc].CP0_TCStatus = arg1;
  916. sync_c0_tcstatus(other, other_tc, arg1);
  917. }
  918. void helper_mtc0_tcbind(CPUMIPSState *env, target_ulong arg1)
  919. {
  920. uint32_t mask = (1 << CP0TCBd_TBE);
  921. uint32_t newval;
  922. if (env->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
  923. mask |= (1 << CP0TCBd_CurVPE);
  924. newval = (env->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
  925. env->active_tc.CP0_TCBind = newval;
  926. }
  927. void helper_mttc0_tcbind(CPUMIPSState *env, target_ulong arg1)
  928. {
  929. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  930. uint32_t mask = (1 << CP0TCBd_TBE);
  931. uint32_t newval;
  932. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  933. if (other->mvp->CP0_MVPControl & (1 << CP0MVPCo_VPC))
  934. mask |= (1 << CP0TCBd_CurVPE);
  935. if (other_tc == other->current_tc) {
  936. newval = (other->active_tc.CP0_TCBind & ~mask) | (arg1 & mask);
  937. other->active_tc.CP0_TCBind = newval;
  938. } else {
  939. newval = (other->tcs[other_tc].CP0_TCBind & ~mask) | (arg1 & mask);
  940. other->tcs[other_tc].CP0_TCBind = newval;
  941. }
  942. }
  943. void helper_mtc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
  944. {
  945. env->active_tc.PC = arg1;
  946. env->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
  947. env->lladdr = 0ULL;
  948. /* MIPS16 not implemented. */
  949. }
  950. void helper_mttc0_tcrestart(CPUMIPSState *env, target_ulong arg1)
  951. {
  952. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  953. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  954. if (other_tc == other->current_tc) {
  955. other->active_tc.PC = arg1;
  956. other->active_tc.CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
  957. other->lladdr = 0ULL;
  958. /* MIPS16 not implemented. */
  959. } else {
  960. other->tcs[other_tc].PC = arg1;
  961. other->tcs[other_tc].CP0_TCStatus &= ~(1 << CP0TCSt_TDS);
  962. other->lladdr = 0ULL;
  963. /* MIPS16 not implemented. */
  964. }
  965. }
  966. void helper_mtc0_tchalt(CPUMIPSState *env, target_ulong arg1)
  967. {
  968. MIPSCPU *cpu = mips_env_get_cpu(env);
  969. env->active_tc.CP0_TCHalt = arg1 & 0x1;
  970. // TODO: Halt TC / Restart (if allocated+active) TC.
  971. if (env->active_tc.CP0_TCHalt & 1) {
  972. mips_tc_sleep(cpu, env->current_tc);
  973. } else {
  974. mips_tc_wake(cpu, env->current_tc);
  975. }
  976. }
  977. void helper_mttc0_tchalt(CPUMIPSState *env, target_ulong arg1)
  978. {
  979. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  980. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  981. MIPSCPU *other_cpu = mips_env_get_cpu(other);
  982. // TODO: Halt TC / Restart (if allocated+active) TC.
  983. if (other_tc == other->current_tc)
  984. other->active_tc.CP0_TCHalt = arg1;
  985. else
  986. other->tcs[other_tc].CP0_TCHalt = arg1;
  987. if (arg1 & 1) {
  988. mips_tc_sleep(other_cpu, other_tc);
  989. } else {
  990. mips_tc_wake(other_cpu, other_tc);
  991. }
  992. }
  993. void helper_mtc0_tccontext(CPUMIPSState *env, target_ulong arg1)
  994. {
  995. env->active_tc.CP0_TCContext = arg1;
  996. }
  997. void helper_mttc0_tccontext(CPUMIPSState *env, target_ulong arg1)
  998. {
  999. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1000. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1001. if (other_tc == other->current_tc)
  1002. other->active_tc.CP0_TCContext = arg1;
  1003. else
  1004. other->tcs[other_tc].CP0_TCContext = arg1;
  1005. }
  1006. void helper_mtc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
  1007. {
  1008. env->active_tc.CP0_TCSchedule = arg1;
  1009. }
  1010. void helper_mttc0_tcschedule(CPUMIPSState *env, target_ulong arg1)
  1011. {
  1012. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1013. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1014. if (other_tc == other->current_tc)
  1015. other->active_tc.CP0_TCSchedule = arg1;
  1016. else
  1017. other->tcs[other_tc].CP0_TCSchedule = arg1;
  1018. }
  1019. void helper_mtc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
  1020. {
  1021. env->active_tc.CP0_TCScheFBack = arg1;
  1022. }
  1023. void helper_mttc0_tcschefback(CPUMIPSState *env, target_ulong arg1)
  1024. {
  1025. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1026. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1027. if (other_tc == other->current_tc)
  1028. other->active_tc.CP0_TCScheFBack = arg1;
  1029. else
  1030. other->tcs[other_tc].CP0_TCScheFBack = arg1;
  1031. }
  1032. void helper_mtc0_entrylo1(CPUMIPSState *env, target_ulong arg1)
  1033. {
  1034. /* Large physaddr (PABITS) not implemented */
  1035. /* 1k pages not implemented */
  1036. env->CP0_EntryLo1 = arg1 & 0x3FFFFFFF;
  1037. }
  1038. void helper_mtc0_context(CPUMIPSState *env, target_ulong arg1)
  1039. {
  1040. env->CP0_Context = (env->CP0_Context & 0x007FFFFF) | (arg1 & ~0x007FFFFF);
  1041. }
  1042. void helper_mtc0_pagemask(CPUMIPSState *env, target_ulong arg1)
  1043. {
  1044. /* 1k pages not implemented */
  1045. env->CP0_PageMask = arg1 & (0x1FFFFFFF & (TARGET_PAGE_MASK << 1));
  1046. }
  1047. void helper_mtc0_pagegrain(CPUMIPSState *env, target_ulong arg1)
  1048. {
  1049. /* SmartMIPS not implemented */
  1050. /* Large physaddr (PABITS) not implemented */
  1051. /* 1k pages not implemented */
  1052. env->CP0_PageGrain = 0;
  1053. }
  1054. void helper_mtc0_wired(CPUMIPSState *env, target_ulong arg1)
  1055. {
  1056. env->CP0_Wired = arg1 % env->tlb->nb_tlb;
  1057. }
  1058. void helper_mtc0_srsconf0(CPUMIPSState *env, target_ulong arg1)
  1059. {
  1060. env->CP0_SRSConf0 |= arg1 & env->CP0_SRSConf0_rw_bitmask;
  1061. }
  1062. void helper_mtc0_srsconf1(CPUMIPSState *env, target_ulong arg1)
  1063. {
  1064. env->CP0_SRSConf1 |= arg1 & env->CP0_SRSConf1_rw_bitmask;
  1065. }
  1066. void helper_mtc0_srsconf2(CPUMIPSState *env, target_ulong arg1)
  1067. {
  1068. env->CP0_SRSConf2 |= arg1 & env->CP0_SRSConf2_rw_bitmask;
  1069. }
  1070. void helper_mtc0_srsconf3(CPUMIPSState *env, target_ulong arg1)
  1071. {
  1072. env->CP0_SRSConf3 |= arg1 & env->CP0_SRSConf3_rw_bitmask;
  1073. }
  1074. void helper_mtc0_srsconf4(CPUMIPSState *env, target_ulong arg1)
  1075. {
  1076. env->CP0_SRSConf4 |= arg1 & env->CP0_SRSConf4_rw_bitmask;
  1077. }
  1078. void helper_mtc0_hwrena(CPUMIPSState *env, target_ulong arg1)
  1079. {
  1080. env->CP0_HWREna = arg1 & 0x0000000F;
  1081. }
  1082. void helper_mtc0_count(CPUMIPSState *env, target_ulong arg1)
  1083. {
  1084. cpu_mips_store_count(env, arg1);
  1085. }
  1086. void helper_mtc0_entryhi(CPUMIPSState *env, target_ulong arg1)
  1087. {
  1088. target_ulong old, val;
  1089. /* 1k pages not implemented */
  1090. val = arg1 & ((TARGET_PAGE_MASK << 1) | 0xFF);
  1091. #if defined(TARGET_MIPS64)
  1092. val &= env->SEGMask;
  1093. #endif
  1094. old = env->CP0_EntryHi;
  1095. env->CP0_EntryHi = val;
  1096. if (env->CP0_Config3 & (1 << CP0C3_MT)) {
  1097. sync_c0_entryhi(env, env->current_tc);
  1098. }
  1099. /* If the ASID changes, flush qemu's TLB. */
  1100. if ((old & 0xFF) != (val & 0xFF))
  1101. cpu_mips_tlb_flush(env, 1);
  1102. }
  1103. void helper_mttc0_entryhi(CPUMIPSState *env, target_ulong arg1)
  1104. {
  1105. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1106. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1107. other->CP0_EntryHi = arg1;
  1108. sync_c0_entryhi(other, other_tc);
  1109. }
  1110. void helper_mtc0_compare(CPUMIPSState *env, target_ulong arg1)
  1111. {
  1112. cpu_mips_store_compare(env, arg1);
  1113. }
  1114. void helper_mtc0_status(CPUMIPSState *env, target_ulong arg1)
  1115. {
  1116. uint32_t val, old;
  1117. uint32_t mask = env->CP0_Status_rw_bitmask;
  1118. val = arg1 & mask;
  1119. old = env->CP0_Status;
  1120. env->CP0_Status = (env->CP0_Status & ~mask) | val;
  1121. if (env->CP0_Config3 & (1 << CP0C3_MT)) {
  1122. sync_c0_status(env, env, env->current_tc);
  1123. } else {
  1124. compute_hflags(env);
  1125. }
  1126. if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
  1127. qemu_log("Status %08x (%08x) => %08x (%08x) Cause %08x",
  1128. old, old & env->CP0_Cause & CP0Ca_IP_mask,
  1129. val, val & env->CP0_Cause & CP0Ca_IP_mask,
  1130. env->CP0_Cause);
  1131. switch (env->hflags & MIPS_HFLAG_KSU) {
  1132. case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
  1133. case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
  1134. case MIPS_HFLAG_KM: qemu_log("\n"); break;
  1135. default: cpu_abort(env, "Invalid MMU mode!\n"); break;
  1136. }
  1137. }
  1138. }
  1139. void helper_mttc0_status(CPUMIPSState *env, target_ulong arg1)
  1140. {
  1141. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1142. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1143. other->CP0_Status = arg1 & ~0xf1000018;
  1144. sync_c0_status(env, other, other_tc);
  1145. }
  1146. void helper_mtc0_intctl(CPUMIPSState *env, target_ulong arg1)
  1147. {
  1148. /* vectored interrupts not implemented, no performance counters. */
  1149. env->CP0_IntCtl = (env->CP0_IntCtl & ~0x000003e0) | (arg1 & 0x000003e0);
  1150. }
  1151. void helper_mtc0_srsctl(CPUMIPSState *env, target_ulong arg1)
  1152. {
  1153. uint32_t mask = (0xf << CP0SRSCtl_ESS) | (0xf << CP0SRSCtl_PSS);
  1154. env->CP0_SRSCtl = (env->CP0_SRSCtl & ~mask) | (arg1 & mask);
  1155. }
  1156. static void mtc0_cause(CPUMIPSState *cpu, target_ulong arg1)
  1157. {
  1158. uint32_t mask = 0x00C00300;
  1159. uint32_t old = cpu->CP0_Cause;
  1160. int i;
  1161. if (cpu->insn_flags & ISA_MIPS32R2) {
  1162. mask |= 1 << CP0Ca_DC;
  1163. }
  1164. cpu->CP0_Cause = (cpu->CP0_Cause & ~mask) | (arg1 & mask);
  1165. if ((old ^ cpu->CP0_Cause) & (1 << CP0Ca_DC)) {
  1166. if (cpu->CP0_Cause & (1 << CP0Ca_DC)) {
  1167. cpu_mips_stop_count(cpu);
  1168. } else {
  1169. cpu_mips_start_count(cpu);
  1170. }
  1171. }
  1172. /* Set/reset software interrupts */
  1173. for (i = 0 ; i < 2 ; i++) {
  1174. if ((old ^ cpu->CP0_Cause) & (1 << (CP0Ca_IP + i))) {
  1175. cpu_mips_soft_irq(cpu, i, cpu->CP0_Cause & (1 << (CP0Ca_IP + i)));
  1176. }
  1177. }
  1178. }
  1179. void helper_mtc0_cause(CPUMIPSState *env, target_ulong arg1)
  1180. {
  1181. mtc0_cause(env, arg1);
  1182. }
  1183. void helper_mttc0_cause(CPUMIPSState *env, target_ulong arg1)
  1184. {
  1185. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1186. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1187. mtc0_cause(other, arg1);
  1188. }
  1189. target_ulong helper_mftc0_epc(CPUMIPSState *env)
  1190. {
  1191. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1192. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1193. return other->CP0_EPC;
  1194. }
  1195. target_ulong helper_mftc0_ebase(CPUMIPSState *env)
  1196. {
  1197. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1198. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1199. return other->CP0_EBase;
  1200. }
  1201. void helper_mtc0_ebase(CPUMIPSState *env, target_ulong arg1)
  1202. {
  1203. /* vectored interrupts not implemented */
  1204. env->CP0_EBase = (env->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
  1205. }
  1206. void helper_mttc0_ebase(CPUMIPSState *env, target_ulong arg1)
  1207. {
  1208. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1209. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1210. other->CP0_EBase = (other->CP0_EBase & ~0x3FFFF000) | (arg1 & 0x3FFFF000);
  1211. }
  1212. target_ulong helper_mftc0_configx(CPUMIPSState *env, target_ulong idx)
  1213. {
  1214. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1215. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1216. switch (idx) {
  1217. case 0: return other->CP0_Config0;
  1218. case 1: return other->CP0_Config1;
  1219. case 2: return other->CP0_Config2;
  1220. case 3: return other->CP0_Config3;
  1221. /* 4 and 5 are reserved. */
  1222. case 6: return other->CP0_Config6;
  1223. case 7: return other->CP0_Config7;
  1224. default:
  1225. break;
  1226. }
  1227. return 0;
  1228. }
  1229. void helper_mtc0_config0(CPUMIPSState *env, target_ulong arg1)
  1230. {
  1231. env->CP0_Config0 = (env->CP0_Config0 & 0x81FFFFF8) | (arg1 & 0x00000007);
  1232. }
  1233. void helper_mtc0_config2(CPUMIPSState *env, target_ulong arg1)
  1234. {
  1235. /* tertiary/secondary caches not implemented */
  1236. env->CP0_Config2 = (env->CP0_Config2 & 0x8FFF0FFF);
  1237. }
  1238. void helper_mtc0_lladdr(CPUMIPSState *env, target_ulong arg1)
  1239. {
  1240. target_long mask = env->CP0_LLAddr_rw_bitmask;
  1241. arg1 = arg1 << env->CP0_LLAddr_shift;
  1242. env->lladdr = (env->lladdr & ~mask) | (arg1 & mask);
  1243. }
  1244. void helper_mtc0_watchlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
  1245. {
  1246. /* Watch exceptions for instructions, data loads, data stores
  1247. not implemented. */
  1248. env->CP0_WatchLo[sel] = (arg1 & ~0x7);
  1249. }
  1250. void helper_mtc0_watchhi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
  1251. {
  1252. env->CP0_WatchHi[sel] = (arg1 & 0x40FF0FF8);
  1253. env->CP0_WatchHi[sel] &= ~(env->CP0_WatchHi[sel] & arg1 & 0x7);
  1254. }
  1255. void helper_mtc0_xcontext(CPUMIPSState *env, target_ulong arg1)
  1256. {
  1257. target_ulong mask = (1ULL << (env->SEGBITS - 7)) - 1;
  1258. env->CP0_XContext = (env->CP0_XContext & mask) | (arg1 & ~mask);
  1259. }
  1260. void helper_mtc0_framemask(CPUMIPSState *env, target_ulong arg1)
  1261. {
  1262. env->CP0_Framemask = arg1; /* XXX */
  1263. }
  1264. void helper_mtc0_debug(CPUMIPSState *env, target_ulong arg1)
  1265. {
  1266. env->CP0_Debug = (env->CP0_Debug & 0x8C03FC1F) | (arg1 & 0x13300120);
  1267. if (arg1 & (1 << CP0DB_DM))
  1268. env->hflags |= MIPS_HFLAG_DM;
  1269. else
  1270. env->hflags &= ~MIPS_HFLAG_DM;
  1271. }
  1272. void helper_mttc0_debug(CPUMIPSState *env, target_ulong arg1)
  1273. {
  1274. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1275. uint32_t val = arg1 & ((1 << CP0DB_SSt) | (1 << CP0DB_Halt));
  1276. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1277. /* XXX: Might be wrong, check with EJTAG spec. */
  1278. if (other_tc == other->current_tc)
  1279. other->active_tc.CP0_Debug_tcstatus = val;
  1280. else
  1281. other->tcs[other_tc].CP0_Debug_tcstatus = val;
  1282. other->CP0_Debug = (other->CP0_Debug &
  1283. ((1 << CP0DB_SSt) | (1 << CP0DB_Halt))) |
  1284. (arg1 & ~((1 << CP0DB_SSt) | (1 << CP0DB_Halt)));
  1285. }
  1286. void helper_mtc0_performance0(CPUMIPSState *env, target_ulong arg1)
  1287. {
  1288. env->CP0_Performance0 = arg1 & 0x000007ff;
  1289. }
  1290. void helper_mtc0_taglo(CPUMIPSState *env, target_ulong arg1)
  1291. {
  1292. env->CP0_TagLo = arg1 & 0xFFFFFCF6;
  1293. }
  1294. void helper_mtc0_datalo(CPUMIPSState *env, target_ulong arg1)
  1295. {
  1296. env->CP0_DataLo = arg1; /* XXX */
  1297. }
  1298. void helper_mtc0_taghi(CPUMIPSState *env, target_ulong arg1)
  1299. {
  1300. env->CP0_TagHi = arg1; /* XXX */
  1301. }
  1302. void helper_mtc0_datahi(CPUMIPSState *env, target_ulong arg1)
  1303. {
  1304. env->CP0_DataHi = arg1; /* XXX */
  1305. }
  1306. /* MIPS MT functions */
  1307. target_ulong helper_mftgpr(CPUMIPSState *env, uint32_t sel)
  1308. {
  1309. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1310. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1311. if (other_tc == other->current_tc)
  1312. return other->active_tc.gpr[sel];
  1313. else
  1314. return other->tcs[other_tc].gpr[sel];
  1315. }
  1316. target_ulong helper_mftlo(CPUMIPSState *env, uint32_t sel)
  1317. {
  1318. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1319. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1320. if (other_tc == other->current_tc)
  1321. return other->active_tc.LO[sel];
  1322. else
  1323. return other->tcs[other_tc].LO[sel];
  1324. }
  1325. target_ulong helper_mfthi(CPUMIPSState *env, uint32_t sel)
  1326. {
  1327. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1328. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1329. if (other_tc == other->current_tc)
  1330. return other->active_tc.HI[sel];
  1331. else
  1332. return other->tcs[other_tc].HI[sel];
  1333. }
  1334. target_ulong helper_mftacx(CPUMIPSState *env, uint32_t sel)
  1335. {
  1336. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1337. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1338. if (other_tc == other->current_tc)
  1339. return other->active_tc.ACX[sel];
  1340. else
  1341. return other->tcs[other_tc].ACX[sel];
  1342. }
  1343. target_ulong helper_mftdsp(CPUMIPSState *env)
  1344. {
  1345. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1346. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1347. if (other_tc == other->current_tc)
  1348. return other->active_tc.DSPControl;
  1349. else
  1350. return other->tcs[other_tc].DSPControl;
  1351. }
  1352. void helper_mttgpr(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
  1353. {
  1354. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1355. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1356. if (other_tc == other->current_tc)
  1357. other->active_tc.gpr[sel] = arg1;
  1358. else
  1359. other->tcs[other_tc].gpr[sel] = arg1;
  1360. }
  1361. void helper_mttlo(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
  1362. {
  1363. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1364. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1365. if (other_tc == other->current_tc)
  1366. other->active_tc.LO[sel] = arg1;
  1367. else
  1368. other->tcs[other_tc].LO[sel] = arg1;
  1369. }
  1370. void helper_mtthi(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
  1371. {
  1372. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1373. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1374. if (other_tc == other->current_tc)
  1375. other->active_tc.HI[sel] = arg1;
  1376. else
  1377. other->tcs[other_tc].HI[sel] = arg1;
  1378. }
  1379. void helper_mttacx(CPUMIPSState *env, target_ulong arg1, uint32_t sel)
  1380. {
  1381. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1382. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1383. if (other_tc == other->current_tc)
  1384. other->active_tc.ACX[sel] = arg1;
  1385. else
  1386. other->tcs[other_tc].ACX[sel] = arg1;
  1387. }
  1388. void helper_mttdsp(CPUMIPSState *env, target_ulong arg1)
  1389. {
  1390. int other_tc = env->CP0_VPEControl & (0xff << CP0VPECo_TargTC);
  1391. CPUMIPSState *other = mips_cpu_map_tc(env, &other_tc);
  1392. if (other_tc == other->current_tc)
  1393. other->active_tc.DSPControl = arg1;
  1394. else
  1395. other->tcs[other_tc].DSPControl = arg1;
  1396. }
  1397. /* MIPS MT functions */
  1398. target_ulong helper_dmt(void)
  1399. {
  1400. // TODO
  1401. return 0;
  1402. }
  1403. target_ulong helper_emt(void)
  1404. {
  1405. // TODO
  1406. return 0;
  1407. }
  1408. target_ulong helper_dvpe(CPUMIPSState *env)
  1409. {
  1410. CPUMIPSState *other_cpu_env = first_cpu;
  1411. target_ulong prev = env->mvp->CP0_MVPControl;
  1412. do {
  1413. /* Turn off all VPEs except the one executing the dvpe. */
  1414. if (other_cpu_env != env) {
  1415. MIPSCPU *other_cpu = mips_env_get_cpu(other_cpu_env);
  1416. other_cpu_env->mvp->CP0_MVPControl &= ~(1 << CP0MVPCo_EVP);
  1417. mips_vpe_sleep(other_cpu);
  1418. }
  1419. other_cpu_env = other_cpu_env->next_cpu;
  1420. } while (other_cpu_env);
  1421. return prev;
  1422. }
  1423. target_ulong helper_evpe(CPUMIPSState *env)
  1424. {
  1425. CPUMIPSState *other_cpu_env = first_cpu;
  1426. target_ulong prev = env->mvp->CP0_MVPControl;
  1427. do {
  1428. MIPSCPU *other_cpu = mips_env_get_cpu(other_cpu_env);
  1429. if (other_cpu_env != env
  1430. /* If the VPE is WFI, don't disturb its sleep. */
  1431. && !mips_vpe_is_wfi(other_cpu)) {
  1432. /* Enable the VPE. */
  1433. other_cpu_env->mvp->CP0_MVPControl |= (1 << CP0MVPCo_EVP);
  1434. mips_vpe_wake(other_cpu_env); /* And wake it up. */
  1435. }
  1436. other_cpu_env = other_cpu_env->next_cpu;
  1437. } while (other_cpu_env);
  1438. return prev;
  1439. }
  1440. #endif /* !CONFIG_USER_ONLY */
  1441. void helper_fork(target_ulong arg1, target_ulong arg2)
  1442. {
  1443. // arg1 = rt, arg2 = rs
  1444. arg1 = 0;
  1445. // TODO: store to TC register
  1446. }
  1447. target_ulong helper_yield(CPUMIPSState *env, target_ulong arg)
  1448. {
  1449. target_long arg1 = arg;
  1450. if (arg1 < 0) {
  1451. /* No scheduling policy implemented. */
  1452. if (arg1 != -2) {
  1453. if (env->CP0_VPEControl & (1 << CP0VPECo_YSI) &&
  1454. env->active_tc.CP0_TCStatus & (1 << CP0TCSt_DT)) {
  1455. env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
  1456. env->CP0_VPEControl |= 4 << CP0VPECo_EXCPT;
  1457. helper_raise_exception(env, EXCP_THREAD);
  1458. }
  1459. }
  1460. } else if (arg1 == 0) {
  1461. if (0 /* TODO: TC underflow */) {
  1462. env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
  1463. helper_raise_exception(env, EXCP_THREAD);
  1464. } else {
  1465. // TODO: Deallocate TC
  1466. }
  1467. } else if (arg1 > 0) {
  1468. /* Yield qualifier inputs not implemented. */
  1469. env->CP0_VPEControl &= ~(0x7 << CP0VPECo_EXCPT);
  1470. env->CP0_VPEControl |= 2 << CP0VPECo_EXCPT;
  1471. helper_raise_exception(env, EXCP_THREAD);
  1472. }
  1473. return env->CP0_YQMask;
  1474. }
  1475. #ifndef CONFIG_USER_ONLY
  1476. /* TLB management */
  1477. static void cpu_mips_tlb_flush (CPUMIPSState *env, int flush_global)
  1478. {
  1479. /* Flush qemu's TLB and discard all shadowed entries. */
  1480. tlb_flush (env, flush_global);
  1481. env->tlb->tlb_in_use = env->tlb->nb_tlb;
  1482. }
  1483. static void r4k_mips_tlb_flush_extra (CPUMIPSState *env, int first)
  1484. {
  1485. /* Discard entries from env->tlb[first] onwards. */
  1486. while (env->tlb->tlb_in_use > first) {
  1487. r4k_invalidate_tlb(env, --env->tlb->tlb_in_use, 0);
  1488. }
  1489. }
  1490. static void r4k_fill_tlb(CPUMIPSState *env, int idx)
  1491. {
  1492. r4k_tlb_t *tlb;
  1493. /* XXX: detect conflicting TLBs and raise a MCHECK exception when needed */
  1494. tlb = &env->tlb->mmu.r4k.tlb[idx];
  1495. tlb->VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
  1496. #if defined(TARGET_MIPS64)
  1497. tlb->VPN &= env->SEGMask;
  1498. #endif
  1499. tlb->ASID = env->CP0_EntryHi & 0xFF;
  1500. tlb->PageMask = env->CP0_PageMask;
  1501. tlb->G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
  1502. tlb->V0 = (env->CP0_EntryLo0 & 2) != 0;
  1503. tlb->D0 = (env->CP0_EntryLo0 & 4) != 0;
  1504. tlb->C0 = (env->CP0_EntryLo0 >> 3) & 0x7;
  1505. tlb->PFN[0] = (env->CP0_EntryLo0 >> 6) << 12;
  1506. tlb->V1 = (env->CP0_EntryLo1 & 2) != 0;
  1507. tlb->D1 = (env->CP0_EntryLo1 & 4) != 0;
  1508. tlb->C1 = (env->CP0_EntryLo1 >> 3) & 0x7;
  1509. tlb->PFN[1] = (env->CP0_EntryLo1 >> 6) << 12;
  1510. }
  1511. void r4k_helper_tlbwi(CPUMIPSState *env)
  1512. {
  1513. r4k_tlb_t *tlb;
  1514. int idx;
  1515. target_ulong VPN;
  1516. uint8_t ASID;
  1517. bool G, V0, D0, V1, D1;
  1518. idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
  1519. tlb = &env->tlb->mmu.r4k.tlb[idx];
  1520. VPN = env->CP0_EntryHi & (TARGET_PAGE_MASK << 1);
  1521. #if defined(TARGET_MIPS64)
  1522. VPN &= env->SEGMask;
  1523. #endif
  1524. ASID = env->CP0_EntryHi & 0xff;
  1525. G = env->CP0_EntryLo0 & env->CP0_EntryLo1 & 1;
  1526. V0 = (env->CP0_EntryLo0 & 2) != 0;
  1527. D0 = (env->CP0_EntryLo0 & 4) != 0;
  1528. V1 = (env->CP0_EntryLo1 & 2) != 0;
  1529. D1 = (env->CP0_EntryLo1 & 4) != 0;
  1530. /* Discard cached TLB entries, unless tlbwi is just upgrading access
  1531. permissions on the current entry. */
  1532. if (tlb->VPN != VPN || tlb->ASID != ASID || tlb->G != G ||
  1533. (tlb->V0 && !V0) || (tlb->D0 && !D0) ||
  1534. (tlb->V1 && !V1) || (tlb->D1 && !D1)) {
  1535. r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
  1536. }
  1537. r4k_invalidate_tlb(env, idx, 0);
  1538. r4k_fill_tlb(env, idx);
  1539. }
  1540. void r4k_helper_tlbwr(CPUMIPSState *env)
  1541. {
  1542. int r = cpu_mips_get_random(env);
  1543. r4k_invalidate_tlb(env, r, 1);
  1544. r4k_fill_tlb(env, r);
  1545. }
  1546. void r4k_helper_tlbp(CPUMIPSState *env)
  1547. {
  1548. r4k_tlb_t *tlb;
  1549. target_ulong mask;
  1550. target_ulong tag;
  1551. target_ulong VPN;
  1552. uint8_t ASID;
  1553. int i;
  1554. ASID = env->CP0_EntryHi & 0xFF;
  1555. for (i = 0; i < env->tlb->nb_tlb; i++) {
  1556. tlb = &env->tlb->mmu.r4k.tlb[i];
  1557. /* 1k pages are not supported. */
  1558. mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
  1559. tag = env->CP0_EntryHi & ~mask;
  1560. VPN = tlb->VPN & ~mask;
  1561. #if defined(TARGET_MIPS64)
  1562. tag &= env->SEGMask;
  1563. #endif
  1564. /* Check ASID, virtual page number & size */
  1565. if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
  1566. /* TLB match */
  1567. env->CP0_Index = i;
  1568. break;
  1569. }
  1570. }
  1571. if (i == env->tlb->nb_tlb) {
  1572. /* No match. Discard any shadow entries, if any of them match. */
  1573. for (i = env->tlb->nb_tlb; i < env->tlb->tlb_in_use; i++) {
  1574. tlb = &env->tlb->mmu.r4k.tlb[i];
  1575. /* 1k pages are not supported. */
  1576. mask = tlb->PageMask | ~(TARGET_PAGE_MASK << 1);
  1577. tag = env->CP0_EntryHi & ~mask;
  1578. VPN = tlb->VPN & ~mask;
  1579. #if defined(TARGET_MIPS64)
  1580. tag &= env->SEGMask;
  1581. #endif
  1582. /* Check ASID, virtual page number & size */
  1583. if ((tlb->G == 1 || tlb->ASID == ASID) && VPN == tag) {
  1584. r4k_mips_tlb_flush_extra (env, i);
  1585. break;
  1586. }
  1587. }
  1588. env->CP0_Index |= 0x80000000;
  1589. }
  1590. }
  1591. void r4k_helper_tlbr(CPUMIPSState *env)
  1592. {
  1593. r4k_tlb_t *tlb;
  1594. uint8_t ASID;
  1595. int idx;
  1596. ASID = env->CP0_EntryHi & 0xFF;
  1597. idx = (env->CP0_Index & ~0x80000000) % env->tlb->nb_tlb;
  1598. tlb = &env->tlb->mmu.r4k.tlb[idx];
  1599. /* If this will change the current ASID, flush qemu's TLB. */
  1600. if (ASID != tlb->ASID)
  1601. cpu_mips_tlb_flush (env, 1);
  1602. r4k_mips_tlb_flush_extra(env, env->tlb->nb_tlb);
  1603. env->CP0_EntryHi = tlb->VPN | tlb->ASID;
  1604. env->CP0_PageMask = tlb->PageMask;
  1605. env->CP0_EntryLo0 = tlb->G | (tlb->V0 << 1) | (tlb->D0 << 2) |
  1606. (tlb->C0 << 3) | (tlb->PFN[0] >> 6);
  1607. env->CP0_EntryLo1 = tlb->G | (tlb->V1 << 1) | (tlb->D1 << 2) |
  1608. (tlb->C1 << 3) | (tlb->PFN[1] >> 6);
  1609. }
  1610. void helper_tlbwi(CPUMIPSState *env)
  1611. {
  1612. env->tlb->helper_tlbwi(env);
  1613. }
  1614. void helper_tlbwr(CPUMIPSState *env)
  1615. {
  1616. env->tlb->helper_tlbwr(env);
  1617. }
  1618. void helper_tlbp(CPUMIPSState *env)
  1619. {
  1620. env->tlb->helper_tlbp(env);
  1621. }
  1622. void helper_tlbr(CPUMIPSState *env)
  1623. {
  1624. env->tlb->helper_tlbr(env);
  1625. }
  1626. /* Specials */
  1627. target_ulong helper_di(CPUMIPSState *env)
  1628. {
  1629. target_ulong t0 = env->CP0_Status;
  1630. env->CP0_Status = t0 & ~(1 << CP0St_IE);
  1631. return t0;
  1632. }
  1633. target_ulong helper_ei(CPUMIPSState *env)
  1634. {
  1635. target_ulong t0 = env->CP0_Status;
  1636. env->CP0_Status = t0 | (1 << CP0St_IE);
  1637. return t0;
  1638. }
  1639. static void debug_pre_eret(CPUMIPSState *env)
  1640. {
  1641. if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
  1642. qemu_log("ERET: PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
  1643. env->active_tc.PC, env->CP0_EPC);
  1644. if (env->CP0_Status & (1 << CP0St_ERL))
  1645. qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
  1646. if (env->hflags & MIPS_HFLAG_DM)
  1647. qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
  1648. qemu_log("\n");
  1649. }
  1650. }
  1651. static void debug_post_eret(CPUMIPSState *env)
  1652. {
  1653. if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
  1654. qemu_log(" => PC " TARGET_FMT_lx " EPC " TARGET_FMT_lx,
  1655. env->active_tc.PC, env->CP0_EPC);
  1656. if (env->CP0_Status & (1 << CP0St_ERL))
  1657. qemu_log(" ErrorEPC " TARGET_FMT_lx, env->CP0_ErrorEPC);
  1658. if (env->hflags & MIPS_HFLAG_DM)
  1659. qemu_log(" DEPC " TARGET_FMT_lx, env->CP0_DEPC);
  1660. switch (env->hflags & MIPS_HFLAG_KSU) {
  1661. case MIPS_HFLAG_UM: qemu_log(", UM\n"); break;
  1662. case MIPS_HFLAG_SM: qemu_log(", SM\n"); break;
  1663. case MIPS_HFLAG_KM: qemu_log("\n"); break;
  1664. default: cpu_abort(env, "Invalid MMU mode!\n"); break;
  1665. }
  1666. }
  1667. }
  1668. static void set_pc(CPUMIPSState *env, target_ulong error_pc)
  1669. {
  1670. env->active_tc.PC = error_pc & ~(target_ulong)1;
  1671. if (error_pc & 1) {
  1672. env->hflags |= MIPS_HFLAG_M16;
  1673. } else {
  1674. env->hflags &= ~(MIPS_HFLAG_M16);
  1675. }
  1676. }
  1677. void helper_eret(CPUMIPSState *env)
  1678. {
  1679. debug_pre_eret(env);
  1680. if (env->CP0_Status & (1 << CP0St_ERL)) {
  1681. set_pc(env, env->CP0_ErrorEPC);
  1682. env->CP0_Status &= ~(1 << CP0St_ERL);
  1683. } else {
  1684. set_pc(env, env->CP0_EPC);
  1685. env->CP0_Status &= ~(1 << CP0St_EXL);
  1686. }
  1687. compute_hflags(env);
  1688. debug_post_eret(env);
  1689. env->lladdr = 1;
  1690. }
  1691. void helper_deret(CPUMIPSState *env)
  1692. {
  1693. debug_pre_eret(env);
  1694. set_pc(env, env->CP0_DEPC);
  1695. env->hflags &= MIPS_HFLAG_DM;
  1696. compute_hflags(env);
  1697. debug_post_eret(env);
  1698. env->lladdr = 1;
  1699. }
  1700. #endif /* !CONFIG_USER_ONLY */
  1701. target_ulong helper_rdhwr_cpunum(CPUMIPSState *env)
  1702. {
  1703. if ((env->hflags & MIPS_HFLAG_CP0) ||
  1704. (env->CP0_HWREna & (1 << 0)))
  1705. return env->CP0_EBase & 0x3ff;
  1706. else
  1707. helper_raise_exception(env, EXCP_RI);
  1708. return 0;
  1709. }
  1710. target_ulong helper_rdhwr_synci_step(CPUMIPSState *env)
  1711. {
  1712. if ((env->hflags & MIPS_HFLAG_CP0) ||
  1713. (env->CP0_HWREna & (1 << 1)))
  1714. return env->SYNCI_Step;
  1715. else
  1716. helper_raise_exception(env, EXCP_RI);
  1717. return 0;
  1718. }
  1719. target_ulong helper_rdhwr_cc(CPUMIPSState *env)
  1720. {
  1721. if ((env->hflags & MIPS_HFLAG_CP0) ||
  1722. (env->CP0_HWREna & (1 << 2)))
  1723. return env->CP0_Count;
  1724. else
  1725. helper_raise_exception(env, EXCP_RI);
  1726. return 0;
  1727. }
  1728. target_ulong helper_rdhwr_ccres(CPUMIPSState *env)
  1729. {
  1730. if ((env->hflags & MIPS_HFLAG_CP0) ||
  1731. (env->CP0_HWREna & (1 << 3)))
  1732. return env->CCRes;
  1733. else
  1734. helper_raise_exception(env, EXCP_RI);
  1735. return 0;
  1736. }
  1737. void helper_pmon(CPUMIPSState *env, int function)
  1738. {
  1739. function /= 2;
  1740. switch (function) {
  1741. case 2: /* TODO: char inbyte(int waitflag); */
  1742. if (env->active_tc.gpr[4] == 0)
  1743. env->active_tc.gpr[2] = -1;
  1744. /* Fall through */
  1745. case 11: /* TODO: char inbyte (void); */
  1746. env->active_tc.gpr[2] = -1;
  1747. break;
  1748. case 3:
  1749. case 12:
  1750. printf("%c", (char)(env->active_tc.gpr[4] & 0xFF));
  1751. break;
  1752. case 17:
  1753. break;
  1754. case 158:
  1755. {
  1756. unsigned char *fmt = (void *)(uintptr_t)env->active_tc.gpr[4];
  1757. printf("%s", fmt);
  1758. }
  1759. break;
  1760. }
  1761. }
  1762. void helper_wait(CPUMIPSState *env)
  1763. {
  1764. env->halted = 1;
  1765. cpu_reset_interrupt(env, CPU_INTERRUPT_WAKE);
  1766. helper_raise_exception(env, EXCP_HLT);
  1767. }
  1768. #if !defined(CONFIG_USER_ONLY)
  1769. static void QEMU_NORETURN do_unaligned_access(CPUMIPSState *env,
  1770. target_ulong addr, int is_write,
  1771. int is_user, uintptr_t retaddr);
  1772. #define MMUSUFFIX _mmu
  1773. #define ALIGNED_ONLY
  1774. #define SHIFT 0
  1775. #include "exec/softmmu_template.h"
  1776. #define SHIFT 1
  1777. #include "exec/softmmu_template.h"
  1778. #define SHIFT 2
  1779. #include "exec/softmmu_template.h"
  1780. #define SHIFT 3
  1781. #include "exec/softmmu_template.h"
  1782. static void do_unaligned_access(CPUMIPSState *env, target_ulong addr,
  1783. int is_write, int is_user, uintptr_t retaddr)
  1784. {
  1785. env->CP0_BadVAddr = addr;
  1786. do_raise_exception(env, (is_write == 1) ? EXCP_AdES : EXCP_AdEL, retaddr);
  1787. }
  1788. void tlb_fill(CPUMIPSState *env, target_ulong addr, int is_write, int mmu_idx,
  1789. uintptr_t retaddr)
  1790. {
  1791. int ret;
  1792. ret = cpu_mips_handle_mmu_fault(env, addr, is_write, mmu_idx);
  1793. if (ret) {
  1794. do_raise_exception_err(env, env->exception_index,
  1795. env->error_code, retaddr);
  1796. }
  1797. }
  1798. void cpu_unassigned_access(CPUMIPSState *env, hwaddr addr,
  1799. int is_write, int is_exec, int unused, int size)
  1800. {
  1801. if (is_exec)
  1802. helper_raise_exception(env, EXCP_IBE);
  1803. else
  1804. helper_raise_exception(env, EXCP_DBE);
  1805. }
  1806. #endif /* !CONFIG_USER_ONLY */
  1807. /* Complex FPU operations which may need stack space. */
  1808. #define FLOAT_TWO32 make_float32(1 << 30)
  1809. #define FLOAT_TWO64 make_float64(1ULL << 62)
  1810. #define FP_TO_INT32_OVERFLOW 0x7fffffff
  1811. #define FP_TO_INT64_OVERFLOW 0x7fffffffffffffffULL
  1812. /* convert MIPS rounding mode in FCR31 to IEEE library */
  1813. static unsigned int ieee_rm[] = {
  1814. float_round_nearest_even,
  1815. float_round_to_zero,
  1816. float_round_up,
  1817. float_round_down
  1818. };
  1819. static inline void restore_rounding_mode(CPUMIPSState *env)
  1820. {
  1821. set_float_rounding_mode(ieee_rm[env->active_fpu.fcr31 & 3],
  1822. &env->active_fpu.fp_status);
  1823. }
  1824. static inline void restore_flush_mode(CPUMIPSState *env)
  1825. {
  1826. set_flush_to_zero((env->active_fpu.fcr31 & (1 << 24)) != 0,
  1827. &env->active_fpu.fp_status);
  1828. }
  1829. target_ulong helper_cfc1(CPUMIPSState *env, uint32_t reg)
  1830. {
  1831. target_ulong arg1;
  1832. switch (reg) {
  1833. case 0:
  1834. arg1 = (int32_t)env->active_fpu.fcr0;
  1835. break;
  1836. case 25:
  1837. arg1 = ((env->active_fpu.fcr31 >> 24) & 0xfe) | ((env->active_fpu.fcr31 >> 23) & 0x1);
  1838. break;
  1839. case 26:
  1840. arg1 = env->active_fpu.fcr31 & 0x0003f07c;
  1841. break;
  1842. case 28:
  1843. arg1 = (env->active_fpu.fcr31 & 0x00000f83) | ((env->active_fpu.fcr31 >> 22) & 0x4);
  1844. break;
  1845. default:
  1846. arg1 = (int32_t)env->active_fpu.fcr31;
  1847. break;
  1848. }
  1849. return arg1;
  1850. }
  1851. void helper_ctc1(CPUMIPSState *env, target_ulong arg1, uint32_t reg)
  1852. {
  1853. switch(reg) {
  1854. case 25:
  1855. if (arg1 & 0xffffff00)
  1856. return;
  1857. env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0x017fffff) | ((arg1 & 0xfe) << 24) |
  1858. ((arg1 & 0x1) << 23);
  1859. break;
  1860. case 26:
  1861. if (arg1 & 0x007c0000)
  1862. return;
  1863. env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfffc0f83) | (arg1 & 0x0003f07c);
  1864. break;
  1865. case 28:
  1866. if (arg1 & 0x007c0000)
  1867. return;
  1868. env->active_fpu.fcr31 = (env->active_fpu.fcr31 & 0xfefff07c) | (arg1 & 0x00000f83) |
  1869. ((arg1 & 0x4) << 22);
  1870. break;
  1871. case 31:
  1872. if (arg1 & 0x007c0000)
  1873. return;
  1874. env->active_fpu.fcr31 = arg1;
  1875. break;
  1876. default:
  1877. return;
  1878. }
  1879. /* set rounding mode */
  1880. restore_rounding_mode(env);
  1881. /* set flush-to-zero mode */
  1882. restore_flush_mode(env);
  1883. set_float_exception_flags(0, &env->active_fpu.fp_status);
  1884. if ((GET_FP_ENABLE(env->active_fpu.fcr31) | 0x20) & GET_FP_CAUSE(env->active_fpu.fcr31))
  1885. do_raise_exception(env, EXCP_FPE, GETPC());
  1886. }
  1887. static inline int ieee_ex_to_mips(int xcpt)
  1888. {
  1889. int ret = 0;
  1890. if (xcpt) {
  1891. if (xcpt & float_flag_invalid) {
  1892. ret |= FP_INVALID;
  1893. }
  1894. if (xcpt & float_flag_overflow) {
  1895. ret |= FP_OVERFLOW;
  1896. }
  1897. if (xcpt & float_flag_underflow) {
  1898. ret |= FP_UNDERFLOW;
  1899. }
  1900. if (xcpt & float_flag_divbyzero) {
  1901. ret |= FP_DIV0;
  1902. }
  1903. if (xcpt & float_flag_inexact) {
  1904. ret |= FP_INEXACT;
  1905. }
  1906. }
  1907. return ret;
  1908. }
  1909. static inline void update_fcr31(CPUMIPSState *env, uintptr_t pc)
  1910. {
  1911. int tmp = ieee_ex_to_mips(get_float_exception_flags(&env->active_fpu.fp_status));
  1912. SET_FP_CAUSE(env->active_fpu.fcr31, tmp);
  1913. if (tmp) {
  1914. set_float_exception_flags(0, &env->active_fpu.fp_status);
  1915. if (GET_FP_ENABLE(env->active_fpu.fcr31) & tmp) {
  1916. do_raise_exception(env, EXCP_FPE, pc);
  1917. } else {
  1918. UPDATE_FP_FLAGS(env->active_fpu.fcr31, tmp);
  1919. }
  1920. }
  1921. }
  1922. /* Float support.
  1923. Single precition routines have a "s" suffix, double precision a
  1924. "d" suffix, 32bit integer "w", 64bit integer "l", paired single "ps",
  1925. paired single lower "pl", paired single upper "pu". */
  1926. /* unary operations, modifying fp status */
  1927. uint64_t helper_float_sqrt_d(CPUMIPSState *env, uint64_t fdt0)
  1928. {
  1929. fdt0 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
  1930. update_fcr31(env, GETPC());
  1931. return fdt0;
  1932. }
  1933. uint32_t helper_float_sqrt_s(CPUMIPSState *env, uint32_t fst0)
  1934. {
  1935. fst0 = float32_sqrt(fst0, &env->active_fpu.fp_status);
  1936. update_fcr31(env, GETPC());
  1937. return fst0;
  1938. }
  1939. uint64_t helper_float_cvtd_s(CPUMIPSState *env, uint32_t fst0)
  1940. {
  1941. uint64_t fdt2;
  1942. fdt2 = float32_to_float64(fst0, &env->active_fpu.fp_status);
  1943. update_fcr31(env, GETPC());
  1944. return fdt2;
  1945. }
  1946. uint64_t helper_float_cvtd_w(CPUMIPSState *env, uint32_t wt0)
  1947. {
  1948. uint64_t fdt2;
  1949. fdt2 = int32_to_float64(wt0, &env->active_fpu.fp_status);
  1950. update_fcr31(env, GETPC());
  1951. return fdt2;
  1952. }
  1953. uint64_t helper_float_cvtd_l(CPUMIPSState *env, uint64_t dt0)
  1954. {
  1955. uint64_t fdt2;
  1956. fdt2 = int64_to_float64(dt0, &env->active_fpu.fp_status);
  1957. update_fcr31(env, GETPC());
  1958. return fdt2;
  1959. }
  1960. uint64_t helper_float_cvtl_d(CPUMIPSState *env, uint64_t fdt0)
  1961. {
  1962. uint64_t dt2;
  1963. dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
  1964. if (get_float_exception_flags(&env->active_fpu.fp_status)
  1965. & (float_flag_invalid | float_flag_overflow)) {
  1966. dt2 = FP_TO_INT64_OVERFLOW;
  1967. }
  1968. update_fcr31(env, GETPC());
  1969. return dt2;
  1970. }
  1971. uint64_t helper_float_cvtl_s(CPUMIPSState *env, uint32_t fst0)
  1972. {
  1973. uint64_t dt2;
  1974. dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
  1975. if (get_float_exception_flags(&env->active_fpu.fp_status)
  1976. & (float_flag_invalid | float_flag_overflow)) {
  1977. dt2 = FP_TO_INT64_OVERFLOW;
  1978. }
  1979. update_fcr31(env, GETPC());
  1980. return dt2;
  1981. }
  1982. uint64_t helper_float_cvtps_pw(CPUMIPSState *env, uint64_t dt0)
  1983. {
  1984. uint32_t fst2;
  1985. uint32_t fsth2;
  1986. fst2 = int32_to_float32(dt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
  1987. fsth2 = int32_to_float32(dt0 >> 32, &env->active_fpu.fp_status);
  1988. update_fcr31(env, GETPC());
  1989. return ((uint64_t)fsth2 << 32) | fst2;
  1990. }
  1991. uint64_t helper_float_cvtpw_ps(CPUMIPSState *env, uint64_t fdt0)
  1992. {
  1993. uint32_t wt2;
  1994. uint32_t wth2;
  1995. int excp, excph;
  1996. wt2 = float32_to_int32(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
  1997. excp = get_float_exception_flags(&env->active_fpu.fp_status);
  1998. if (excp & (float_flag_overflow | float_flag_invalid)) {
  1999. wt2 = FP_TO_INT32_OVERFLOW;
  2000. }
  2001. set_float_exception_flags(0, &env->active_fpu.fp_status);
  2002. wth2 = float32_to_int32(fdt0 >> 32, &env->active_fpu.fp_status);
  2003. excph = get_float_exception_flags(&env->active_fpu.fp_status);
  2004. if (excph & (float_flag_overflow | float_flag_invalid)) {
  2005. wth2 = FP_TO_INT32_OVERFLOW;
  2006. }
  2007. set_float_exception_flags(excp | excph, &env->active_fpu.fp_status);
  2008. update_fcr31(env, GETPC());
  2009. return ((uint64_t)wth2 << 32) | wt2;
  2010. }
  2011. uint32_t helper_float_cvts_d(CPUMIPSState *env, uint64_t fdt0)
  2012. {
  2013. uint32_t fst2;
  2014. fst2 = float64_to_float32(fdt0, &env->active_fpu.fp_status);
  2015. update_fcr31(env, GETPC());
  2016. return fst2;
  2017. }
  2018. uint32_t helper_float_cvts_w(CPUMIPSState *env, uint32_t wt0)
  2019. {
  2020. uint32_t fst2;
  2021. fst2 = int32_to_float32(wt0, &env->active_fpu.fp_status);
  2022. update_fcr31(env, GETPC());
  2023. return fst2;
  2024. }
  2025. uint32_t helper_float_cvts_l(CPUMIPSState *env, uint64_t dt0)
  2026. {
  2027. uint32_t fst2;
  2028. fst2 = int64_to_float32(dt0, &env->active_fpu.fp_status);
  2029. update_fcr31(env, GETPC());
  2030. return fst2;
  2031. }
  2032. uint32_t helper_float_cvts_pl(CPUMIPSState *env, uint32_t wt0)
  2033. {
  2034. uint32_t wt2;
  2035. wt2 = wt0;
  2036. update_fcr31(env, GETPC());
  2037. return wt2;
  2038. }
  2039. uint32_t helper_float_cvts_pu(CPUMIPSState *env, uint32_t wth0)
  2040. {
  2041. uint32_t wt2;
  2042. wt2 = wth0;
  2043. update_fcr31(env, GETPC());
  2044. return wt2;
  2045. }
  2046. uint32_t helper_float_cvtw_s(CPUMIPSState *env, uint32_t fst0)
  2047. {
  2048. uint32_t wt2;
  2049. wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
  2050. update_fcr31(env, GETPC());
  2051. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2052. & (float_flag_invalid | float_flag_overflow)) {
  2053. wt2 = FP_TO_INT32_OVERFLOW;
  2054. }
  2055. return wt2;
  2056. }
  2057. uint32_t helper_float_cvtw_d(CPUMIPSState *env, uint64_t fdt0)
  2058. {
  2059. uint32_t wt2;
  2060. wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
  2061. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2062. & (float_flag_invalid | float_flag_overflow)) {
  2063. wt2 = FP_TO_INT32_OVERFLOW;
  2064. }
  2065. update_fcr31(env, GETPC());
  2066. return wt2;
  2067. }
  2068. uint64_t helper_float_roundl_d(CPUMIPSState *env, uint64_t fdt0)
  2069. {
  2070. uint64_t dt2;
  2071. set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
  2072. dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
  2073. restore_rounding_mode(env);
  2074. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2075. & (float_flag_invalid | float_flag_overflow)) {
  2076. dt2 = FP_TO_INT64_OVERFLOW;
  2077. }
  2078. update_fcr31(env, GETPC());
  2079. return dt2;
  2080. }
  2081. uint64_t helper_float_roundl_s(CPUMIPSState *env, uint32_t fst0)
  2082. {
  2083. uint64_t dt2;
  2084. set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
  2085. dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
  2086. restore_rounding_mode(env);
  2087. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2088. & (float_flag_invalid | float_flag_overflow)) {
  2089. dt2 = FP_TO_INT64_OVERFLOW;
  2090. }
  2091. update_fcr31(env, GETPC());
  2092. return dt2;
  2093. }
  2094. uint32_t helper_float_roundw_d(CPUMIPSState *env, uint64_t fdt0)
  2095. {
  2096. uint32_t wt2;
  2097. set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
  2098. wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
  2099. restore_rounding_mode(env);
  2100. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2101. & (float_flag_invalid | float_flag_overflow)) {
  2102. wt2 = FP_TO_INT32_OVERFLOW;
  2103. }
  2104. update_fcr31(env, GETPC());
  2105. return wt2;
  2106. }
  2107. uint32_t helper_float_roundw_s(CPUMIPSState *env, uint32_t fst0)
  2108. {
  2109. uint32_t wt2;
  2110. set_float_rounding_mode(float_round_nearest_even, &env->active_fpu.fp_status);
  2111. wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
  2112. restore_rounding_mode(env);
  2113. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2114. & (float_flag_invalid | float_flag_overflow)) {
  2115. wt2 = FP_TO_INT32_OVERFLOW;
  2116. }
  2117. update_fcr31(env, GETPC());
  2118. return wt2;
  2119. }
  2120. uint64_t helper_float_truncl_d(CPUMIPSState *env, uint64_t fdt0)
  2121. {
  2122. uint64_t dt2;
  2123. dt2 = float64_to_int64_round_to_zero(fdt0, &env->active_fpu.fp_status);
  2124. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2125. & (float_flag_invalid | float_flag_overflow)) {
  2126. dt2 = FP_TO_INT64_OVERFLOW;
  2127. }
  2128. update_fcr31(env, GETPC());
  2129. return dt2;
  2130. }
  2131. uint64_t helper_float_truncl_s(CPUMIPSState *env, uint32_t fst0)
  2132. {
  2133. uint64_t dt2;
  2134. dt2 = float32_to_int64_round_to_zero(fst0, &env->active_fpu.fp_status);
  2135. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2136. & (float_flag_invalid | float_flag_overflow)) {
  2137. dt2 = FP_TO_INT64_OVERFLOW;
  2138. }
  2139. update_fcr31(env, GETPC());
  2140. return dt2;
  2141. }
  2142. uint32_t helper_float_truncw_d(CPUMIPSState *env, uint64_t fdt0)
  2143. {
  2144. uint32_t wt2;
  2145. wt2 = float64_to_int32_round_to_zero(fdt0, &env->active_fpu.fp_status);
  2146. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2147. & (float_flag_invalid | float_flag_overflow)) {
  2148. wt2 = FP_TO_INT32_OVERFLOW;
  2149. }
  2150. update_fcr31(env, GETPC());
  2151. return wt2;
  2152. }
  2153. uint32_t helper_float_truncw_s(CPUMIPSState *env, uint32_t fst0)
  2154. {
  2155. uint32_t wt2;
  2156. wt2 = float32_to_int32_round_to_zero(fst0, &env->active_fpu.fp_status);
  2157. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2158. & (float_flag_invalid | float_flag_overflow)) {
  2159. wt2 = FP_TO_INT32_OVERFLOW;
  2160. }
  2161. update_fcr31(env, GETPC());
  2162. return wt2;
  2163. }
  2164. uint64_t helper_float_ceill_d(CPUMIPSState *env, uint64_t fdt0)
  2165. {
  2166. uint64_t dt2;
  2167. set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
  2168. dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
  2169. restore_rounding_mode(env);
  2170. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2171. & (float_flag_invalid | float_flag_overflow)) {
  2172. dt2 = FP_TO_INT64_OVERFLOW;
  2173. }
  2174. update_fcr31(env, GETPC());
  2175. return dt2;
  2176. }
  2177. uint64_t helper_float_ceill_s(CPUMIPSState *env, uint32_t fst0)
  2178. {
  2179. uint64_t dt2;
  2180. set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
  2181. dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
  2182. restore_rounding_mode(env);
  2183. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2184. & (float_flag_invalid | float_flag_overflow)) {
  2185. dt2 = FP_TO_INT64_OVERFLOW;
  2186. }
  2187. update_fcr31(env, GETPC());
  2188. return dt2;
  2189. }
  2190. uint32_t helper_float_ceilw_d(CPUMIPSState *env, uint64_t fdt0)
  2191. {
  2192. uint32_t wt2;
  2193. set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
  2194. wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
  2195. restore_rounding_mode(env);
  2196. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2197. & (float_flag_invalid | float_flag_overflow)) {
  2198. wt2 = FP_TO_INT32_OVERFLOW;
  2199. }
  2200. update_fcr31(env, GETPC());
  2201. return wt2;
  2202. }
  2203. uint32_t helper_float_ceilw_s(CPUMIPSState *env, uint32_t fst0)
  2204. {
  2205. uint32_t wt2;
  2206. set_float_rounding_mode(float_round_up, &env->active_fpu.fp_status);
  2207. wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
  2208. restore_rounding_mode(env);
  2209. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2210. & (float_flag_invalid | float_flag_overflow)) {
  2211. wt2 = FP_TO_INT32_OVERFLOW;
  2212. }
  2213. update_fcr31(env, GETPC());
  2214. return wt2;
  2215. }
  2216. uint64_t helper_float_floorl_d(CPUMIPSState *env, uint64_t fdt0)
  2217. {
  2218. uint64_t dt2;
  2219. set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
  2220. dt2 = float64_to_int64(fdt0, &env->active_fpu.fp_status);
  2221. restore_rounding_mode(env);
  2222. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2223. & (float_flag_invalid | float_flag_overflow)) {
  2224. dt2 = FP_TO_INT64_OVERFLOW;
  2225. }
  2226. update_fcr31(env, GETPC());
  2227. return dt2;
  2228. }
  2229. uint64_t helper_float_floorl_s(CPUMIPSState *env, uint32_t fst0)
  2230. {
  2231. uint64_t dt2;
  2232. set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
  2233. dt2 = float32_to_int64(fst0, &env->active_fpu.fp_status);
  2234. restore_rounding_mode(env);
  2235. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2236. & (float_flag_invalid | float_flag_overflow)) {
  2237. dt2 = FP_TO_INT64_OVERFLOW;
  2238. }
  2239. update_fcr31(env, GETPC());
  2240. return dt2;
  2241. }
  2242. uint32_t helper_float_floorw_d(CPUMIPSState *env, uint64_t fdt0)
  2243. {
  2244. uint32_t wt2;
  2245. set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
  2246. wt2 = float64_to_int32(fdt0, &env->active_fpu.fp_status);
  2247. restore_rounding_mode(env);
  2248. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2249. & (float_flag_invalid | float_flag_overflow)) {
  2250. wt2 = FP_TO_INT32_OVERFLOW;
  2251. }
  2252. update_fcr31(env, GETPC());
  2253. return wt2;
  2254. }
  2255. uint32_t helper_float_floorw_s(CPUMIPSState *env, uint32_t fst0)
  2256. {
  2257. uint32_t wt2;
  2258. set_float_rounding_mode(float_round_down, &env->active_fpu.fp_status);
  2259. wt2 = float32_to_int32(fst0, &env->active_fpu.fp_status);
  2260. restore_rounding_mode(env);
  2261. if (get_float_exception_flags(&env->active_fpu.fp_status)
  2262. & (float_flag_invalid | float_flag_overflow)) {
  2263. wt2 = FP_TO_INT32_OVERFLOW;
  2264. }
  2265. update_fcr31(env, GETPC());
  2266. return wt2;
  2267. }
  2268. /* unary operations, not modifying fp status */
  2269. #define FLOAT_UNOP(name) \
  2270. uint64_t helper_float_ ## name ## _d(uint64_t fdt0) \
  2271. { \
  2272. return float64_ ## name(fdt0); \
  2273. } \
  2274. uint32_t helper_float_ ## name ## _s(uint32_t fst0) \
  2275. { \
  2276. return float32_ ## name(fst0); \
  2277. } \
  2278. uint64_t helper_float_ ## name ## _ps(uint64_t fdt0) \
  2279. { \
  2280. uint32_t wt0; \
  2281. uint32_t wth0; \
  2282. \
  2283. wt0 = float32_ ## name(fdt0 & 0XFFFFFFFF); \
  2284. wth0 = float32_ ## name(fdt0 >> 32); \
  2285. return ((uint64_t)wth0 << 32) | wt0; \
  2286. }
  2287. FLOAT_UNOP(abs)
  2288. FLOAT_UNOP(chs)
  2289. #undef FLOAT_UNOP
  2290. /* MIPS specific unary operations */
  2291. uint64_t helper_float_recip_d(CPUMIPSState *env, uint64_t fdt0)
  2292. {
  2293. uint64_t fdt2;
  2294. fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
  2295. update_fcr31(env, GETPC());
  2296. return fdt2;
  2297. }
  2298. uint32_t helper_float_recip_s(CPUMIPSState *env, uint32_t fst0)
  2299. {
  2300. uint32_t fst2;
  2301. fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
  2302. update_fcr31(env, GETPC());
  2303. return fst2;
  2304. }
  2305. uint64_t helper_float_rsqrt_d(CPUMIPSState *env, uint64_t fdt0)
  2306. {
  2307. uint64_t fdt2;
  2308. fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
  2309. fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
  2310. update_fcr31(env, GETPC());
  2311. return fdt2;
  2312. }
  2313. uint32_t helper_float_rsqrt_s(CPUMIPSState *env, uint32_t fst0)
  2314. {
  2315. uint32_t fst2;
  2316. fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
  2317. fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
  2318. update_fcr31(env, GETPC());
  2319. return fst2;
  2320. }
  2321. uint64_t helper_float_recip1_d(CPUMIPSState *env, uint64_t fdt0)
  2322. {
  2323. uint64_t fdt2;
  2324. fdt2 = float64_div(float64_one, fdt0, &env->active_fpu.fp_status);
  2325. update_fcr31(env, GETPC());
  2326. return fdt2;
  2327. }
  2328. uint32_t helper_float_recip1_s(CPUMIPSState *env, uint32_t fst0)
  2329. {
  2330. uint32_t fst2;
  2331. fst2 = float32_div(float32_one, fst0, &env->active_fpu.fp_status);
  2332. update_fcr31(env, GETPC());
  2333. return fst2;
  2334. }
  2335. uint64_t helper_float_recip1_ps(CPUMIPSState *env, uint64_t fdt0)
  2336. {
  2337. uint32_t fst2;
  2338. uint32_t fsth2;
  2339. fst2 = float32_div(float32_one, fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
  2340. fsth2 = float32_div(float32_one, fdt0 >> 32, &env->active_fpu.fp_status);
  2341. update_fcr31(env, GETPC());
  2342. return ((uint64_t)fsth2 << 32) | fst2;
  2343. }
  2344. uint64_t helper_float_rsqrt1_d(CPUMIPSState *env, uint64_t fdt0)
  2345. {
  2346. uint64_t fdt2;
  2347. fdt2 = float64_sqrt(fdt0, &env->active_fpu.fp_status);
  2348. fdt2 = float64_div(float64_one, fdt2, &env->active_fpu.fp_status);
  2349. update_fcr31(env, GETPC());
  2350. return fdt2;
  2351. }
  2352. uint32_t helper_float_rsqrt1_s(CPUMIPSState *env, uint32_t fst0)
  2353. {
  2354. uint32_t fst2;
  2355. fst2 = float32_sqrt(fst0, &env->active_fpu.fp_status);
  2356. fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
  2357. update_fcr31(env, GETPC());
  2358. return fst2;
  2359. }
  2360. uint64_t helper_float_rsqrt1_ps(CPUMIPSState *env, uint64_t fdt0)
  2361. {
  2362. uint32_t fst2;
  2363. uint32_t fsth2;
  2364. fst2 = float32_sqrt(fdt0 & 0XFFFFFFFF, &env->active_fpu.fp_status);
  2365. fsth2 = float32_sqrt(fdt0 >> 32, &env->active_fpu.fp_status);
  2366. fst2 = float32_div(float32_one, fst2, &env->active_fpu.fp_status);
  2367. fsth2 = float32_div(float32_one, fsth2, &env->active_fpu.fp_status);
  2368. update_fcr31(env, GETPC());
  2369. return ((uint64_t)fsth2 << 32) | fst2;
  2370. }
  2371. #define FLOAT_OP(name, p) void helper_float_##name##_##p(CPUMIPSState *env)
  2372. /* binary operations */
  2373. #define FLOAT_BINOP(name) \
  2374. uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
  2375. uint64_t fdt0, uint64_t fdt1) \
  2376. { \
  2377. uint64_t dt2; \
  2378. \
  2379. dt2 = float64_ ## name (fdt0, fdt1, &env->active_fpu.fp_status); \
  2380. update_fcr31(env, GETPC()); \
  2381. return dt2; \
  2382. } \
  2383. \
  2384. uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
  2385. uint32_t fst0, uint32_t fst1) \
  2386. { \
  2387. uint32_t wt2; \
  2388. \
  2389. wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
  2390. update_fcr31(env, GETPC()); \
  2391. return wt2; \
  2392. } \
  2393. \
  2394. uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
  2395. uint64_t fdt0, \
  2396. uint64_t fdt1) \
  2397. { \
  2398. uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
  2399. uint32_t fsth0 = fdt0 >> 32; \
  2400. uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
  2401. uint32_t fsth1 = fdt1 >> 32; \
  2402. uint32_t wt2; \
  2403. uint32_t wth2; \
  2404. \
  2405. wt2 = float32_ ## name (fst0, fst1, &env->active_fpu.fp_status); \
  2406. wth2 = float32_ ## name (fsth0, fsth1, &env->active_fpu.fp_status); \
  2407. update_fcr31(env, GETPC()); \
  2408. return ((uint64_t)wth2 << 32) | wt2; \
  2409. }
  2410. FLOAT_BINOP(add)
  2411. FLOAT_BINOP(sub)
  2412. FLOAT_BINOP(mul)
  2413. FLOAT_BINOP(div)
  2414. #undef FLOAT_BINOP
  2415. #define UNFUSED_FMA(prefix, a, b, c, flags) \
  2416. { \
  2417. a = prefix##_mul(a, b, &env->active_fpu.fp_status); \
  2418. if ((flags) & float_muladd_negate_c) { \
  2419. a = prefix##_sub(a, c, &env->active_fpu.fp_status); \
  2420. } else { \
  2421. a = prefix##_add(a, c, &env->active_fpu.fp_status); \
  2422. } \
  2423. if ((flags) & float_muladd_negate_result) { \
  2424. a = prefix##_chs(a); \
  2425. } \
  2426. }
  2427. /* FMA based operations */
  2428. #define FLOAT_FMA(name, type) \
  2429. uint64_t helper_float_ ## name ## _d(CPUMIPSState *env, \
  2430. uint64_t fdt0, uint64_t fdt1, \
  2431. uint64_t fdt2) \
  2432. { \
  2433. UNFUSED_FMA(float64, fdt0, fdt1, fdt2, type); \
  2434. update_fcr31(env, GETPC()); \
  2435. return fdt0; \
  2436. } \
  2437. \
  2438. uint32_t helper_float_ ## name ## _s(CPUMIPSState *env, \
  2439. uint32_t fst0, uint32_t fst1, \
  2440. uint32_t fst2) \
  2441. { \
  2442. UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
  2443. update_fcr31(env, GETPC()); \
  2444. return fst0; \
  2445. } \
  2446. \
  2447. uint64_t helper_float_ ## name ## _ps(CPUMIPSState *env, \
  2448. uint64_t fdt0, uint64_t fdt1, \
  2449. uint64_t fdt2) \
  2450. { \
  2451. uint32_t fst0 = fdt0 & 0XFFFFFFFF; \
  2452. uint32_t fsth0 = fdt0 >> 32; \
  2453. uint32_t fst1 = fdt1 & 0XFFFFFFFF; \
  2454. uint32_t fsth1 = fdt1 >> 32; \
  2455. uint32_t fst2 = fdt2 & 0XFFFFFFFF; \
  2456. uint32_t fsth2 = fdt2 >> 32; \
  2457. \
  2458. UNFUSED_FMA(float32, fst0, fst1, fst2, type); \
  2459. UNFUSED_FMA(float32, fsth0, fsth1, fsth2, type); \
  2460. update_fcr31(env, GETPC()); \
  2461. return ((uint64_t)fsth0 << 32) | fst0; \
  2462. }
  2463. FLOAT_FMA(madd, 0)
  2464. FLOAT_FMA(msub, float_muladd_negate_c)
  2465. FLOAT_FMA(nmadd, float_muladd_negate_result)
  2466. FLOAT_FMA(nmsub, float_muladd_negate_result | float_muladd_negate_c)
  2467. #undef FLOAT_FMA
  2468. /* MIPS specific binary operations */
  2469. uint64_t helper_float_recip2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
  2470. {
  2471. fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
  2472. fdt2 = float64_chs(float64_sub(fdt2, float64_one, &env->active_fpu.fp_status));
  2473. update_fcr31(env, GETPC());
  2474. return fdt2;
  2475. }
  2476. uint32_t helper_float_recip2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
  2477. {
  2478. fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
  2479. fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status));
  2480. update_fcr31(env, GETPC());
  2481. return fst2;
  2482. }
  2483. uint64_t helper_float_recip2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
  2484. {
  2485. uint32_t fst0 = fdt0 & 0XFFFFFFFF;
  2486. uint32_t fsth0 = fdt0 >> 32;
  2487. uint32_t fst2 = fdt2 & 0XFFFFFFFF;
  2488. uint32_t fsth2 = fdt2 >> 32;
  2489. fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
  2490. fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
  2491. fst2 = float32_chs(float32_sub(fst2, float32_one, &env->active_fpu.fp_status));
  2492. fsth2 = float32_chs(float32_sub(fsth2, float32_one, &env->active_fpu.fp_status));
  2493. update_fcr31(env, GETPC());
  2494. return ((uint64_t)fsth2 << 32) | fst2;
  2495. }
  2496. uint64_t helper_float_rsqrt2_d(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
  2497. {
  2498. fdt2 = float64_mul(fdt0, fdt2, &env->active_fpu.fp_status);
  2499. fdt2 = float64_sub(fdt2, float64_one, &env->active_fpu.fp_status);
  2500. fdt2 = float64_chs(float64_div(fdt2, FLOAT_TWO64, &env->active_fpu.fp_status));
  2501. update_fcr31(env, GETPC());
  2502. return fdt2;
  2503. }
  2504. uint32_t helper_float_rsqrt2_s(CPUMIPSState *env, uint32_t fst0, uint32_t fst2)
  2505. {
  2506. fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
  2507. fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
  2508. fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
  2509. update_fcr31(env, GETPC());
  2510. return fst2;
  2511. }
  2512. uint64_t helper_float_rsqrt2_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt2)
  2513. {
  2514. uint32_t fst0 = fdt0 & 0XFFFFFFFF;
  2515. uint32_t fsth0 = fdt0 >> 32;
  2516. uint32_t fst2 = fdt2 & 0XFFFFFFFF;
  2517. uint32_t fsth2 = fdt2 >> 32;
  2518. fst2 = float32_mul(fst0, fst2, &env->active_fpu.fp_status);
  2519. fsth2 = float32_mul(fsth0, fsth2, &env->active_fpu.fp_status);
  2520. fst2 = float32_sub(fst2, float32_one, &env->active_fpu.fp_status);
  2521. fsth2 = float32_sub(fsth2, float32_one, &env->active_fpu.fp_status);
  2522. fst2 = float32_chs(float32_div(fst2, FLOAT_TWO32, &env->active_fpu.fp_status));
  2523. fsth2 = float32_chs(float32_div(fsth2, FLOAT_TWO32, &env->active_fpu.fp_status));
  2524. update_fcr31(env, GETPC());
  2525. return ((uint64_t)fsth2 << 32) | fst2;
  2526. }
  2527. uint64_t helper_float_addr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
  2528. {
  2529. uint32_t fst0 = fdt0 & 0XFFFFFFFF;
  2530. uint32_t fsth0 = fdt0 >> 32;
  2531. uint32_t fst1 = fdt1 & 0XFFFFFFFF;
  2532. uint32_t fsth1 = fdt1 >> 32;
  2533. uint32_t fst2;
  2534. uint32_t fsth2;
  2535. fst2 = float32_add (fst0, fsth0, &env->active_fpu.fp_status);
  2536. fsth2 = float32_add (fst1, fsth1, &env->active_fpu.fp_status);
  2537. update_fcr31(env, GETPC());
  2538. return ((uint64_t)fsth2 << 32) | fst2;
  2539. }
  2540. uint64_t helper_float_mulr_ps(CPUMIPSState *env, uint64_t fdt0, uint64_t fdt1)
  2541. {
  2542. uint32_t fst0 = fdt0 & 0XFFFFFFFF;
  2543. uint32_t fsth0 = fdt0 >> 32;
  2544. uint32_t fst1 = fdt1 & 0XFFFFFFFF;
  2545. uint32_t fsth1 = fdt1 >> 32;
  2546. uint32_t fst2;
  2547. uint32_t fsth2;
  2548. fst2 = float32_mul (fst0, fsth0, &env->active_fpu.fp_status);
  2549. fsth2 = float32_mul (fst1, fsth1, &env->active_fpu.fp_status);
  2550. update_fcr31(env, GETPC());
  2551. return ((uint64_t)fsth2 << 32) | fst2;
  2552. }
  2553. /* compare operations */
  2554. #define FOP_COND_D(op, cond) \
  2555. void helper_cmp_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
  2556. uint64_t fdt1, int cc) \
  2557. { \
  2558. int c; \
  2559. c = cond; \
  2560. update_fcr31(env, GETPC()); \
  2561. if (c) \
  2562. SET_FP_COND(cc, env->active_fpu); \
  2563. else \
  2564. CLEAR_FP_COND(cc, env->active_fpu); \
  2565. } \
  2566. void helper_cmpabs_d_ ## op(CPUMIPSState *env, uint64_t fdt0, \
  2567. uint64_t fdt1, int cc) \
  2568. { \
  2569. int c; \
  2570. fdt0 = float64_abs(fdt0); \
  2571. fdt1 = float64_abs(fdt1); \
  2572. c = cond; \
  2573. update_fcr31(env, GETPC()); \
  2574. if (c) \
  2575. SET_FP_COND(cc, env->active_fpu); \
  2576. else \
  2577. CLEAR_FP_COND(cc, env->active_fpu); \
  2578. }
  2579. /* NOTE: the comma operator will make "cond" to eval to false,
  2580. * but float64_unordered_quiet() is still called. */
  2581. FOP_COND_D(f, (float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status), 0))
  2582. FOP_COND_D(un, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status))
  2583. FOP_COND_D(eq, float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
  2584. FOP_COND_D(ueq, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
  2585. FOP_COND_D(olt, float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
  2586. FOP_COND_D(ult, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
  2587. FOP_COND_D(ole, float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
  2588. FOP_COND_D(ule, float64_unordered_quiet(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le_quiet(fdt0, fdt1, &env->active_fpu.fp_status))
  2589. /* NOTE: the comma operator will make "cond" to eval to false,
  2590. * but float64_unordered() is still called. */
  2591. FOP_COND_D(sf, (float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status), 0))
  2592. FOP_COND_D(ngle,float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status))
  2593. FOP_COND_D(seq, float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
  2594. FOP_COND_D(ngl, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_eq(fdt0, fdt1, &env->active_fpu.fp_status))
  2595. FOP_COND_D(lt, float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
  2596. FOP_COND_D(nge, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_lt(fdt0, fdt1, &env->active_fpu.fp_status))
  2597. FOP_COND_D(le, float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
  2598. FOP_COND_D(ngt, float64_unordered(fdt1, fdt0, &env->active_fpu.fp_status) || float64_le(fdt0, fdt1, &env->active_fpu.fp_status))
  2599. #define FOP_COND_S(op, cond) \
  2600. void helper_cmp_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
  2601. uint32_t fst1, int cc) \
  2602. { \
  2603. int c; \
  2604. c = cond; \
  2605. update_fcr31(env, GETPC()); \
  2606. if (c) \
  2607. SET_FP_COND(cc, env->active_fpu); \
  2608. else \
  2609. CLEAR_FP_COND(cc, env->active_fpu); \
  2610. } \
  2611. void helper_cmpabs_s_ ## op(CPUMIPSState *env, uint32_t fst0, \
  2612. uint32_t fst1, int cc) \
  2613. { \
  2614. int c; \
  2615. fst0 = float32_abs(fst0); \
  2616. fst1 = float32_abs(fst1); \
  2617. c = cond; \
  2618. update_fcr31(env, GETPC()); \
  2619. if (c) \
  2620. SET_FP_COND(cc, env->active_fpu); \
  2621. else \
  2622. CLEAR_FP_COND(cc, env->active_fpu); \
  2623. }
  2624. /* NOTE: the comma operator will make "cond" to eval to false,
  2625. * but float32_unordered_quiet() is still called. */
  2626. FOP_COND_S(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0))
  2627. FOP_COND_S(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status))
  2628. FOP_COND_S(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
  2629. FOP_COND_S(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status))
  2630. FOP_COND_S(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
  2631. FOP_COND_S(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status))
  2632. FOP_COND_S(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
  2633. FOP_COND_S(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status))
  2634. /* NOTE: the comma operator will make "cond" to eval to false,
  2635. * but float32_unordered() is still called. */
  2636. FOP_COND_S(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0))
  2637. FOP_COND_S(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status))
  2638. FOP_COND_S(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status))
  2639. FOP_COND_S(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status))
  2640. FOP_COND_S(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status))
  2641. FOP_COND_S(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status))
  2642. FOP_COND_S(le, float32_le(fst0, fst1, &env->active_fpu.fp_status))
  2643. FOP_COND_S(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status))
  2644. #define FOP_COND_PS(op, condl, condh) \
  2645. void helper_cmp_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
  2646. uint64_t fdt1, int cc) \
  2647. { \
  2648. uint32_t fst0, fsth0, fst1, fsth1; \
  2649. int ch, cl; \
  2650. fst0 = fdt0 & 0XFFFFFFFF; \
  2651. fsth0 = fdt0 >> 32; \
  2652. fst1 = fdt1 & 0XFFFFFFFF; \
  2653. fsth1 = fdt1 >> 32; \
  2654. cl = condl; \
  2655. ch = condh; \
  2656. update_fcr31(env, GETPC()); \
  2657. if (cl) \
  2658. SET_FP_COND(cc, env->active_fpu); \
  2659. else \
  2660. CLEAR_FP_COND(cc, env->active_fpu); \
  2661. if (ch) \
  2662. SET_FP_COND(cc + 1, env->active_fpu); \
  2663. else \
  2664. CLEAR_FP_COND(cc + 1, env->active_fpu); \
  2665. } \
  2666. void helper_cmpabs_ps_ ## op(CPUMIPSState *env, uint64_t fdt0, \
  2667. uint64_t fdt1, int cc) \
  2668. { \
  2669. uint32_t fst0, fsth0, fst1, fsth1; \
  2670. int ch, cl; \
  2671. fst0 = float32_abs(fdt0 & 0XFFFFFFFF); \
  2672. fsth0 = float32_abs(fdt0 >> 32); \
  2673. fst1 = float32_abs(fdt1 & 0XFFFFFFFF); \
  2674. fsth1 = float32_abs(fdt1 >> 32); \
  2675. cl = condl; \
  2676. ch = condh; \
  2677. update_fcr31(env, GETPC()); \
  2678. if (cl) \
  2679. SET_FP_COND(cc, env->active_fpu); \
  2680. else \
  2681. CLEAR_FP_COND(cc, env->active_fpu); \
  2682. if (ch) \
  2683. SET_FP_COND(cc + 1, env->active_fpu); \
  2684. else \
  2685. CLEAR_FP_COND(cc + 1, env->active_fpu); \
  2686. }
  2687. /* NOTE: the comma operator will make "cond" to eval to false,
  2688. * but float32_unordered_quiet() is still called. */
  2689. FOP_COND_PS(f, (float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status), 0),
  2690. (float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status), 0))
  2691. FOP_COND_PS(un, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status),
  2692. float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status))
  2693. FOP_COND_PS(eq, float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
  2694. float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
  2695. FOP_COND_PS(ueq, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_eq_quiet(fst0, fst1, &env->active_fpu.fp_status),
  2696. float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
  2697. FOP_COND_PS(olt, float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
  2698. float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
  2699. FOP_COND_PS(ult, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_lt_quiet(fst0, fst1, &env->active_fpu.fp_status),
  2700. float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
  2701. FOP_COND_PS(ole, float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
  2702. float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
  2703. FOP_COND_PS(ule, float32_unordered_quiet(fst1, fst0, &env->active_fpu.fp_status) || float32_le_quiet(fst0, fst1, &env->active_fpu.fp_status),
  2704. float32_unordered_quiet(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le_quiet(fsth0, fsth1, &env->active_fpu.fp_status))
  2705. /* NOTE: the comma operator will make "cond" to eval to false,
  2706. * but float32_unordered() is still called. */
  2707. FOP_COND_PS(sf, (float32_unordered(fst1, fst0, &env->active_fpu.fp_status), 0),
  2708. (float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status), 0))
  2709. FOP_COND_PS(ngle,float32_unordered(fst1, fst0, &env->active_fpu.fp_status),
  2710. float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status))
  2711. FOP_COND_PS(seq, float32_eq(fst0, fst1, &env->active_fpu.fp_status),
  2712. float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
  2713. FOP_COND_PS(ngl, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_eq(fst0, fst1, &env->active_fpu.fp_status),
  2714. float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_eq(fsth0, fsth1, &env->active_fpu.fp_status))
  2715. FOP_COND_PS(lt, float32_lt(fst0, fst1, &env->active_fpu.fp_status),
  2716. float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
  2717. FOP_COND_PS(nge, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_lt(fst0, fst1, &env->active_fpu.fp_status),
  2718. float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_lt(fsth0, fsth1, &env->active_fpu.fp_status))
  2719. FOP_COND_PS(le, float32_le(fst0, fst1, &env->active_fpu.fp_status),
  2720. float32_le(fsth0, fsth1, &env->active_fpu.fp_status))
  2721. FOP_COND_PS(ngt, float32_unordered(fst1, fst0, &env->active_fpu.fp_status) || float32_le(fst0, fst1, &env->active_fpu.fp_status),
  2722. float32_unordered(fsth1, fsth0, &env->active_fpu.fp_status) || float32_le(fsth0, fsth1, &env->active_fpu.fp_status))