translate.c 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102
  1. /*
  2. * UniCore32 translation
  3. *
  4. * Copyright (C) 2010-2012 Guan Xuetao
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation, or (at your option) any
  9. * later version. See the COPYING file in the top-level directory.
  10. */
  11. #include <stdarg.h>
  12. #include <stdlib.h>
  13. #include <stdio.h>
  14. #include <string.h>
  15. #include <inttypes.h>
  16. #include "cpu.h"
  17. #include "disas/disas.h"
  18. #include "tcg-op.h"
  19. #include "qemu/log.h"
  20. #include "exec/cpu_ldst.h"
  21. #include "exec/helper-proto.h"
  22. #include "exec/helper-gen.h"
  23. #include "trace-tcg.h"
  24. /* internal defines */
  25. typedef struct DisasContext {
  26. target_ulong pc;
  27. int is_jmp;
  28. /* Nonzero if this instruction has been conditionally skipped. */
  29. int condjmp;
  30. /* The label that will be jumped to when the instruction is skipped. */
  31. TCGLabel *condlabel;
  32. struct TranslationBlock *tb;
  33. int singlestep_enabled;
  34. #ifndef CONFIG_USER_ONLY
  35. int user;
  36. #endif
  37. } DisasContext;
  38. #ifndef CONFIG_USER_ONLY
  39. #define IS_USER(s) (s->user)
  40. #else
  41. #define IS_USER(s) 1
  42. #endif
  43. /* These instructions trap after executing, so defer them until after the
  44. conditional executions state has been updated. */
  45. #define DISAS_SYSCALL 5
  46. static TCGv_ptr cpu_env;
  47. static TCGv_i32 cpu_R[32];
  48. /* FIXME: These should be removed. */
  49. static TCGv cpu_F0s, cpu_F1s;
  50. static TCGv_i64 cpu_F0d, cpu_F1d;
  51. #include "exec/gen-icount.h"
  52. static const char *regnames[] = {
  53. "r00", "r01", "r02", "r03", "r04", "r05", "r06", "r07",
  54. "r08", "r09", "r10", "r11", "r12", "r13", "r14", "r15",
  55. "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
  56. "r24", "r25", "r26", "r27", "r28", "r29", "r30", "pc" };
  57. /* initialize TCG globals. */
  58. void uc32_translate_init(void)
  59. {
  60. int i;
  61. cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
  62. for (i = 0; i < 32; i++) {
  63. cpu_R[i] = tcg_global_mem_new_i32(TCG_AREG0,
  64. offsetof(CPUUniCore32State, regs[i]), regnames[i]);
  65. }
  66. }
  67. static int num_temps;
  68. /* Allocate a temporary variable. */
  69. static TCGv_i32 new_tmp(void)
  70. {
  71. num_temps++;
  72. return tcg_temp_new_i32();
  73. }
  74. /* Release a temporary variable. */
  75. static void dead_tmp(TCGv tmp)
  76. {
  77. tcg_temp_free(tmp);
  78. num_temps--;
  79. }
  80. static inline TCGv load_cpu_offset(int offset)
  81. {
  82. TCGv tmp = new_tmp();
  83. tcg_gen_ld_i32(tmp, cpu_env, offset);
  84. return tmp;
  85. }
  86. #define load_cpu_field(name) load_cpu_offset(offsetof(CPUUniCore32State, name))
  87. static inline void store_cpu_offset(TCGv var, int offset)
  88. {
  89. tcg_gen_st_i32(var, cpu_env, offset);
  90. dead_tmp(var);
  91. }
  92. #define store_cpu_field(var, name) \
  93. store_cpu_offset(var, offsetof(CPUUniCore32State, name))
  94. /* Set a variable to the value of a CPU register. */
  95. static void load_reg_var(DisasContext *s, TCGv var, int reg)
  96. {
  97. if (reg == 31) {
  98. uint32_t addr;
  99. /* normaly, since we updated PC */
  100. addr = (long)s->pc;
  101. tcg_gen_movi_i32(var, addr);
  102. } else {
  103. tcg_gen_mov_i32(var, cpu_R[reg]);
  104. }
  105. }
  106. /* Create a new temporary and set it to the value of a CPU register. */
  107. static inline TCGv load_reg(DisasContext *s, int reg)
  108. {
  109. TCGv tmp = new_tmp();
  110. load_reg_var(s, tmp, reg);
  111. return tmp;
  112. }
  113. /* Set a CPU register. The source must be a temporary and will be
  114. marked as dead. */
  115. static void store_reg(DisasContext *s, int reg, TCGv var)
  116. {
  117. if (reg == 31) {
  118. tcg_gen_andi_i32(var, var, ~3);
  119. s->is_jmp = DISAS_JUMP;
  120. }
  121. tcg_gen_mov_i32(cpu_R[reg], var);
  122. dead_tmp(var);
  123. }
  124. /* Value extensions. */
  125. #define gen_uxtb(var) tcg_gen_ext8u_i32(var, var)
  126. #define gen_uxth(var) tcg_gen_ext16u_i32(var, var)
  127. #define gen_sxtb(var) tcg_gen_ext8s_i32(var, var)
  128. #define gen_sxth(var) tcg_gen_ext16s_i32(var, var)
  129. #define UCOP_REG_M (((insn) >> 0) & 0x1f)
  130. #define UCOP_REG_N (((insn) >> 19) & 0x1f)
  131. #define UCOP_REG_D (((insn) >> 14) & 0x1f)
  132. #define UCOP_REG_S (((insn) >> 9) & 0x1f)
  133. #define UCOP_REG_LO (((insn) >> 14) & 0x1f)
  134. #define UCOP_REG_HI (((insn) >> 9) & 0x1f)
  135. #define UCOP_SH_OP (((insn) >> 6) & 0x03)
  136. #define UCOP_SH_IM (((insn) >> 9) & 0x1f)
  137. #define UCOP_OPCODES (((insn) >> 25) & 0x0f)
  138. #define UCOP_IMM_9 (((insn) >> 0) & 0x1ff)
  139. #define UCOP_IMM10 (((insn) >> 0) & 0x3ff)
  140. #define UCOP_IMM14 (((insn) >> 0) & 0x3fff)
  141. #define UCOP_COND (((insn) >> 25) & 0x0f)
  142. #define UCOP_CMOV_COND (((insn) >> 19) & 0x0f)
  143. #define UCOP_CPNUM (((insn) >> 10) & 0x0f)
  144. #define UCOP_UCF64_FMT (((insn) >> 24) & 0x03)
  145. #define UCOP_UCF64_FUNC (((insn) >> 6) & 0x0f)
  146. #define UCOP_UCF64_COND (((insn) >> 6) & 0x0f)
  147. #define UCOP_SET(i) ((insn) & (1 << (i)))
  148. #define UCOP_SET_P UCOP_SET(28)
  149. #define UCOP_SET_U UCOP_SET(27)
  150. #define UCOP_SET_B UCOP_SET(26)
  151. #define UCOP_SET_W UCOP_SET(25)
  152. #define UCOP_SET_L UCOP_SET(24)
  153. #define UCOP_SET_S UCOP_SET(24)
  154. #define ILLEGAL cpu_abort(CPU(cpu), \
  155. "Illegal UniCore32 instruction %x at line %d!", \
  156. insn, __LINE__)
  157. #ifndef CONFIG_USER_ONLY
  158. static void disas_cp0_insn(CPUUniCore32State *env, DisasContext *s,
  159. uint32_t insn)
  160. {
  161. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  162. TCGv tmp, tmp2, tmp3;
  163. if ((insn & 0xfe000000) == 0xe0000000) {
  164. tmp2 = new_tmp();
  165. tmp3 = new_tmp();
  166. tcg_gen_movi_i32(tmp2, UCOP_REG_N);
  167. tcg_gen_movi_i32(tmp3, UCOP_IMM10);
  168. if (UCOP_SET_L) {
  169. tmp = new_tmp();
  170. gen_helper_cp0_get(tmp, cpu_env, tmp2, tmp3);
  171. store_reg(s, UCOP_REG_D, tmp);
  172. } else {
  173. tmp = load_reg(s, UCOP_REG_D);
  174. gen_helper_cp0_set(cpu_env, tmp, tmp2, tmp3);
  175. dead_tmp(tmp);
  176. }
  177. dead_tmp(tmp2);
  178. dead_tmp(tmp3);
  179. return;
  180. }
  181. ILLEGAL;
  182. }
  183. static void disas_ocd_insn(CPUUniCore32State *env, DisasContext *s,
  184. uint32_t insn)
  185. {
  186. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  187. TCGv tmp;
  188. if ((insn & 0xff003fff) == 0xe1000400) {
  189. /*
  190. * movc rd, pp.nn, #imm9
  191. * rd: UCOP_REG_D
  192. * nn: UCOP_REG_N (must be 0)
  193. * imm9: 0
  194. */
  195. if (UCOP_REG_N == 0) {
  196. tmp = new_tmp();
  197. tcg_gen_movi_i32(tmp, 0);
  198. store_reg(s, UCOP_REG_D, tmp);
  199. return;
  200. } else {
  201. ILLEGAL;
  202. }
  203. }
  204. if ((insn & 0xff003fff) == 0xe0000401) {
  205. /*
  206. * movc pp.nn, rn, #imm9
  207. * rn: UCOP_REG_D
  208. * nn: UCOP_REG_N (must be 1)
  209. * imm9: 1
  210. */
  211. if (UCOP_REG_N == 1) {
  212. tmp = load_reg(s, UCOP_REG_D);
  213. gen_helper_cp1_putc(tmp);
  214. dead_tmp(tmp);
  215. return;
  216. } else {
  217. ILLEGAL;
  218. }
  219. }
  220. ILLEGAL;
  221. }
  222. #endif
  223. static inline void gen_set_asr(TCGv var, uint32_t mask)
  224. {
  225. TCGv tmp_mask = tcg_const_i32(mask);
  226. gen_helper_asr_write(cpu_env, var, tmp_mask);
  227. tcg_temp_free_i32(tmp_mask);
  228. }
  229. /* Set NZCV flags from the high 4 bits of var. */
  230. #define gen_set_nzcv(var) gen_set_asr(var, ASR_NZCV)
  231. static void gen_exception(int excp)
  232. {
  233. TCGv tmp = new_tmp();
  234. tcg_gen_movi_i32(tmp, excp);
  235. gen_helper_exception(cpu_env, tmp);
  236. dead_tmp(tmp);
  237. }
  238. #define gen_set_CF(var) tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, CF))
  239. /* Set CF to the top bit of var. */
  240. static void gen_set_CF_bit31(TCGv var)
  241. {
  242. TCGv tmp = new_tmp();
  243. tcg_gen_shri_i32(tmp, var, 31);
  244. gen_set_CF(tmp);
  245. dead_tmp(tmp);
  246. }
  247. /* Set N and Z flags from var. */
  248. static inline void gen_logic_CC(TCGv var)
  249. {
  250. tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, NF));
  251. tcg_gen_st_i32(var, cpu_env, offsetof(CPUUniCore32State, ZF));
  252. }
  253. /* dest = T0 + T1 + CF. */
  254. static void gen_add_carry(TCGv dest, TCGv t0, TCGv t1)
  255. {
  256. TCGv tmp;
  257. tcg_gen_add_i32(dest, t0, t1);
  258. tmp = load_cpu_field(CF);
  259. tcg_gen_add_i32(dest, dest, tmp);
  260. dead_tmp(tmp);
  261. }
  262. /* dest = T0 - T1 + CF - 1. */
  263. static void gen_sub_carry(TCGv dest, TCGv t0, TCGv t1)
  264. {
  265. TCGv tmp;
  266. tcg_gen_sub_i32(dest, t0, t1);
  267. tmp = load_cpu_field(CF);
  268. tcg_gen_add_i32(dest, dest, tmp);
  269. tcg_gen_subi_i32(dest, dest, 1);
  270. dead_tmp(tmp);
  271. }
  272. static void shifter_out_im(TCGv var, int shift)
  273. {
  274. TCGv tmp = new_tmp();
  275. if (shift == 0) {
  276. tcg_gen_andi_i32(tmp, var, 1);
  277. } else {
  278. tcg_gen_shri_i32(tmp, var, shift);
  279. if (shift != 31) {
  280. tcg_gen_andi_i32(tmp, tmp, 1);
  281. }
  282. }
  283. gen_set_CF(tmp);
  284. dead_tmp(tmp);
  285. }
  286. /* Shift by immediate. Includes special handling for shift == 0. */
  287. static inline void gen_uc32_shift_im(TCGv var, int shiftop, int shift,
  288. int flags)
  289. {
  290. switch (shiftop) {
  291. case 0: /* LSL */
  292. if (shift != 0) {
  293. if (flags) {
  294. shifter_out_im(var, 32 - shift);
  295. }
  296. tcg_gen_shli_i32(var, var, shift);
  297. }
  298. break;
  299. case 1: /* LSR */
  300. if (shift == 0) {
  301. if (flags) {
  302. tcg_gen_shri_i32(var, var, 31);
  303. gen_set_CF(var);
  304. }
  305. tcg_gen_movi_i32(var, 0);
  306. } else {
  307. if (flags) {
  308. shifter_out_im(var, shift - 1);
  309. }
  310. tcg_gen_shri_i32(var, var, shift);
  311. }
  312. break;
  313. case 2: /* ASR */
  314. if (shift == 0) {
  315. shift = 32;
  316. }
  317. if (flags) {
  318. shifter_out_im(var, shift - 1);
  319. }
  320. if (shift == 32) {
  321. shift = 31;
  322. }
  323. tcg_gen_sari_i32(var, var, shift);
  324. break;
  325. case 3: /* ROR/RRX */
  326. if (shift != 0) {
  327. if (flags) {
  328. shifter_out_im(var, shift - 1);
  329. }
  330. tcg_gen_rotri_i32(var, var, shift); break;
  331. } else {
  332. TCGv tmp = load_cpu_field(CF);
  333. if (flags) {
  334. shifter_out_im(var, 0);
  335. }
  336. tcg_gen_shri_i32(var, var, 1);
  337. tcg_gen_shli_i32(tmp, tmp, 31);
  338. tcg_gen_or_i32(var, var, tmp);
  339. dead_tmp(tmp);
  340. }
  341. }
  342. };
  343. static inline void gen_uc32_shift_reg(TCGv var, int shiftop,
  344. TCGv shift, int flags)
  345. {
  346. if (flags) {
  347. switch (shiftop) {
  348. case 0:
  349. gen_helper_shl_cc(var, cpu_env, var, shift);
  350. break;
  351. case 1:
  352. gen_helper_shr_cc(var, cpu_env, var, shift);
  353. break;
  354. case 2:
  355. gen_helper_sar_cc(var, cpu_env, var, shift);
  356. break;
  357. case 3:
  358. gen_helper_ror_cc(var, cpu_env, var, shift);
  359. break;
  360. }
  361. } else {
  362. switch (shiftop) {
  363. case 0:
  364. gen_helper_shl(var, var, shift);
  365. break;
  366. case 1:
  367. gen_helper_shr(var, var, shift);
  368. break;
  369. case 2:
  370. gen_helper_sar(var, var, shift);
  371. break;
  372. case 3:
  373. tcg_gen_andi_i32(shift, shift, 0x1f);
  374. tcg_gen_rotr_i32(var, var, shift);
  375. break;
  376. }
  377. }
  378. dead_tmp(shift);
  379. }
  380. static void gen_test_cc(int cc, TCGLabel *label)
  381. {
  382. TCGv tmp;
  383. TCGv tmp2;
  384. TCGLabel *inv;
  385. switch (cc) {
  386. case 0: /* eq: Z */
  387. tmp = load_cpu_field(ZF);
  388. tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
  389. break;
  390. case 1: /* ne: !Z */
  391. tmp = load_cpu_field(ZF);
  392. tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
  393. break;
  394. case 2: /* cs: C */
  395. tmp = load_cpu_field(CF);
  396. tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
  397. break;
  398. case 3: /* cc: !C */
  399. tmp = load_cpu_field(CF);
  400. tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
  401. break;
  402. case 4: /* mi: N */
  403. tmp = load_cpu_field(NF);
  404. tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
  405. break;
  406. case 5: /* pl: !N */
  407. tmp = load_cpu_field(NF);
  408. tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
  409. break;
  410. case 6: /* vs: V */
  411. tmp = load_cpu_field(VF);
  412. tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
  413. break;
  414. case 7: /* vc: !V */
  415. tmp = load_cpu_field(VF);
  416. tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
  417. break;
  418. case 8: /* hi: C && !Z */
  419. inv = gen_new_label();
  420. tmp = load_cpu_field(CF);
  421. tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
  422. dead_tmp(tmp);
  423. tmp = load_cpu_field(ZF);
  424. tcg_gen_brcondi_i32(TCG_COND_NE, tmp, 0, label);
  425. gen_set_label(inv);
  426. break;
  427. case 9: /* ls: !C || Z */
  428. tmp = load_cpu_field(CF);
  429. tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
  430. dead_tmp(tmp);
  431. tmp = load_cpu_field(ZF);
  432. tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
  433. break;
  434. case 10: /* ge: N == V -> N ^ V == 0 */
  435. tmp = load_cpu_field(VF);
  436. tmp2 = load_cpu_field(NF);
  437. tcg_gen_xor_i32(tmp, tmp, tmp2);
  438. dead_tmp(tmp2);
  439. tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
  440. break;
  441. case 11: /* lt: N != V -> N ^ V != 0 */
  442. tmp = load_cpu_field(VF);
  443. tmp2 = load_cpu_field(NF);
  444. tcg_gen_xor_i32(tmp, tmp, tmp2);
  445. dead_tmp(tmp2);
  446. tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
  447. break;
  448. case 12: /* gt: !Z && N == V */
  449. inv = gen_new_label();
  450. tmp = load_cpu_field(ZF);
  451. tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, inv);
  452. dead_tmp(tmp);
  453. tmp = load_cpu_field(VF);
  454. tmp2 = load_cpu_field(NF);
  455. tcg_gen_xor_i32(tmp, tmp, tmp2);
  456. dead_tmp(tmp2);
  457. tcg_gen_brcondi_i32(TCG_COND_GE, tmp, 0, label);
  458. gen_set_label(inv);
  459. break;
  460. case 13: /* le: Z || N != V */
  461. tmp = load_cpu_field(ZF);
  462. tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label);
  463. dead_tmp(tmp);
  464. tmp = load_cpu_field(VF);
  465. tmp2 = load_cpu_field(NF);
  466. tcg_gen_xor_i32(tmp, tmp, tmp2);
  467. dead_tmp(tmp2);
  468. tcg_gen_brcondi_i32(TCG_COND_LT, tmp, 0, label);
  469. break;
  470. default:
  471. fprintf(stderr, "Bad condition code 0x%x\n", cc);
  472. abort();
  473. }
  474. dead_tmp(tmp);
  475. }
  476. static const uint8_t table_logic_cc[16] = {
  477. 1, /* and */ 1, /* xor */ 0, /* sub */ 0, /* rsb */
  478. 0, /* add */ 0, /* adc */ 0, /* sbc */ 0, /* rsc */
  479. 1, /* andl */ 1, /* xorl */ 0, /* cmp */ 0, /* cmn */
  480. 1, /* orr */ 1, /* mov */ 1, /* bic */ 1, /* mvn */
  481. };
  482. /* Set PC state from an immediate address. */
  483. static inline void gen_bx_im(DisasContext *s, uint32_t addr)
  484. {
  485. s->is_jmp = DISAS_UPDATE;
  486. tcg_gen_movi_i32(cpu_R[31], addr & ~3);
  487. }
  488. /* Set PC state from var. var is marked as dead. */
  489. static inline void gen_bx(DisasContext *s, TCGv var)
  490. {
  491. s->is_jmp = DISAS_UPDATE;
  492. tcg_gen_andi_i32(cpu_R[31], var, ~3);
  493. dead_tmp(var);
  494. }
  495. static inline void store_reg_bx(DisasContext *s, int reg, TCGv var)
  496. {
  497. store_reg(s, reg, var);
  498. }
  499. static inline TCGv gen_ld8s(TCGv addr, int index)
  500. {
  501. TCGv tmp = new_tmp();
  502. tcg_gen_qemu_ld8s(tmp, addr, index);
  503. return tmp;
  504. }
  505. static inline TCGv gen_ld8u(TCGv addr, int index)
  506. {
  507. TCGv tmp = new_tmp();
  508. tcg_gen_qemu_ld8u(tmp, addr, index);
  509. return tmp;
  510. }
  511. static inline TCGv gen_ld16s(TCGv addr, int index)
  512. {
  513. TCGv tmp = new_tmp();
  514. tcg_gen_qemu_ld16s(tmp, addr, index);
  515. return tmp;
  516. }
  517. static inline TCGv gen_ld16u(TCGv addr, int index)
  518. {
  519. TCGv tmp = new_tmp();
  520. tcg_gen_qemu_ld16u(tmp, addr, index);
  521. return tmp;
  522. }
  523. static inline TCGv gen_ld32(TCGv addr, int index)
  524. {
  525. TCGv tmp = new_tmp();
  526. tcg_gen_qemu_ld32u(tmp, addr, index);
  527. return tmp;
  528. }
  529. static inline void gen_st8(TCGv val, TCGv addr, int index)
  530. {
  531. tcg_gen_qemu_st8(val, addr, index);
  532. dead_tmp(val);
  533. }
  534. static inline void gen_st16(TCGv val, TCGv addr, int index)
  535. {
  536. tcg_gen_qemu_st16(val, addr, index);
  537. dead_tmp(val);
  538. }
  539. static inline void gen_st32(TCGv val, TCGv addr, int index)
  540. {
  541. tcg_gen_qemu_st32(val, addr, index);
  542. dead_tmp(val);
  543. }
  544. static inline void gen_set_pc_im(uint32_t val)
  545. {
  546. tcg_gen_movi_i32(cpu_R[31], val);
  547. }
  548. /* Force a TB lookup after an instruction that changes the CPU state. */
  549. static inline void gen_lookup_tb(DisasContext *s)
  550. {
  551. tcg_gen_movi_i32(cpu_R[31], s->pc & ~1);
  552. s->is_jmp = DISAS_UPDATE;
  553. }
  554. static inline void gen_add_data_offset(DisasContext *s, unsigned int insn,
  555. TCGv var)
  556. {
  557. int val;
  558. TCGv offset;
  559. if (UCOP_SET(29)) {
  560. /* immediate */
  561. val = UCOP_IMM14;
  562. if (!UCOP_SET_U) {
  563. val = -val;
  564. }
  565. if (val != 0) {
  566. tcg_gen_addi_i32(var, var, val);
  567. }
  568. } else {
  569. /* shift/register */
  570. offset = load_reg(s, UCOP_REG_M);
  571. gen_uc32_shift_im(offset, UCOP_SH_OP, UCOP_SH_IM, 0);
  572. if (!UCOP_SET_U) {
  573. tcg_gen_sub_i32(var, var, offset);
  574. } else {
  575. tcg_gen_add_i32(var, var, offset);
  576. }
  577. dead_tmp(offset);
  578. }
  579. }
  580. static inline void gen_add_datah_offset(DisasContext *s, unsigned int insn,
  581. TCGv var)
  582. {
  583. int val;
  584. TCGv offset;
  585. if (UCOP_SET(26)) {
  586. /* immediate */
  587. val = (insn & 0x1f) | ((insn >> 4) & 0x3e0);
  588. if (!UCOP_SET_U) {
  589. val = -val;
  590. }
  591. if (val != 0) {
  592. tcg_gen_addi_i32(var, var, val);
  593. }
  594. } else {
  595. /* register */
  596. offset = load_reg(s, UCOP_REG_M);
  597. if (!UCOP_SET_U) {
  598. tcg_gen_sub_i32(var, var, offset);
  599. } else {
  600. tcg_gen_add_i32(var, var, offset);
  601. }
  602. dead_tmp(offset);
  603. }
  604. }
  605. static inline long ucf64_reg_offset(int reg)
  606. {
  607. if (reg & 1) {
  608. return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
  609. + offsetof(CPU_DoubleU, l.upper);
  610. } else {
  611. return offsetof(CPUUniCore32State, ucf64.regs[reg >> 1])
  612. + offsetof(CPU_DoubleU, l.lower);
  613. }
  614. }
  615. #define ucf64_gen_ld32(reg) load_cpu_offset(ucf64_reg_offset(reg))
  616. #define ucf64_gen_st32(var, reg) store_cpu_offset(var, ucf64_reg_offset(reg))
  617. /* UniCore-F64 single load/store I_offset */
  618. static void do_ucf64_ldst_i(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  619. {
  620. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  621. int offset;
  622. TCGv tmp;
  623. TCGv addr;
  624. addr = load_reg(s, UCOP_REG_N);
  625. if (!UCOP_SET_P && !UCOP_SET_W) {
  626. ILLEGAL;
  627. }
  628. if (UCOP_SET_P) {
  629. offset = UCOP_IMM10 << 2;
  630. if (!UCOP_SET_U) {
  631. offset = -offset;
  632. }
  633. if (offset != 0) {
  634. tcg_gen_addi_i32(addr, addr, offset);
  635. }
  636. }
  637. if (UCOP_SET_L) { /* load */
  638. tmp = gen_ld32(addr, IS_USER(s));
  639. ucf64_gen_st32(tmp, UCOP_REG_D);
  640. } else { /* store */
  641. tmp = ucf64_gen_ld32(UCOP_REG_D);
  642. gen_st32(tmp, addr, IS_USER(s));
  643. }
  644. if (!UCOP_SET_P) {
  645. offset = UCOP_IMM10 << 2;
  646. if (!UCOP_SET_U) {
  647. offset = -offset;
  648. }
  649. if (offset != 0) {
  650. tcg_gen_addi_i32(addr, addr, offset);
  651. }
  652. }
  653. if (UCOP_SET_W) {
  654. store_reg(s, UCOP_REG_N, addr);
  655. } else {
  656. dead_tmp(addr);
  657. }
  658. }
  659. /* UniCore-F64 load/store multiple words */
  660. static void do_ucf64_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  661. {
  662. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  663. unsigned int i;
  664. int j, n, freg;
  665. TCGv tmp;
  666. TCGv addr;
  667. if (UCOP_REG_D != 0) {
  668. ILLEGAL;
  669. }
  670. if (UCOP_REG_N == 31) {
  671. ILLEGAL;
  672. }
  673. if ((insn << 24) == 0) {
  674. ILLEGAL;
  675. }
  676. addr = load_reg(s, UCOP_REG_N);
  677. n = 0;
  678. for (i = 0; i < 8; i++) {
  679. if (UCOP_SET(i)) {
  680. n++;
  681. }
  682. }
  683. if (UCOP_SET_U) {
  684. if (UCOP_SET_P) { /* pre increment */
  685. tcg_gen_addi_i32(addr, addr, 4);
  686. } /* unnecessary to do anything when post increment */
  687. } else {
  688. if (UCOP_SET_P) { /* pre decrement */
  689. tcg_gen_addi_i32(addr, addr, -(n * 4));
  690. } else { /* post decrement */
  691. if (n != 1) {
  692. tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
  693. }
  694. }
  695. }
  696. freg = ((insn >> 8) & 3) << 3; /* freg should be 0, 8, 16, 24 */
  697. for (i = 0, j = 0; i < 8; i++, freg++) {
  698. if (!UCOP_SET(i)) {
  699. continue;
  700. }
  701. if (UCOP_SET_L) { /* load */
  702. tmp = gen_ld32(addr, IS_USER(s));
  703. ucf64_gen_st32(tmp, freg);
  704. } else { /* store */
  705. tmp = ucf64_gen_ld32(freg);
  706. gen_st32(tmp, addr, IS_USER(s));
  707. }
  708. j++;
  709. /* unnecessary to add after the last transfer */
  710. if (j != n) {
  711. tcg_gen_addi_i32(addr, addr, 4);
  712. }
  713. }
  714. if (UCOP_SET_W) { /* write back */
  715. if (UCOP_SET_U) {
  716. if (!UCOP_SET_P) { /* post increment */
  717. tcg_gen_addi_i32(addr, addr, 4);
  718. } /* unnecessary to do anything when pre increment */
  719. } else {
  720. if (UCOP_SET_P) {
  721. /* pre decrement */
  722. if (n != 1) {
  723. tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
  724. }
  725. } else {
  726. /* post decrement */
  727. tcg_gen_addi_i32(addr, addr, -(n * 4));
  728. }
  729. }
  730. store_reg(s, UCOP_REG_N, addr);
  731. } else {
  732. dead_tmp(addr);
  733. }
  734. }
  735. /* UniCore-F64 mrc/mcr */
  736. static void do_ucf64_trans(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  737. {
  738. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  739. TCGv tmp;
  740. if ((insn & 0xfe0003ff) == 0xe2000000) {
  741. /* control register */
  742. if ((UCOP_REG_N != UC32_UCF64_FPSCR) || (UCOP_REG_D == 31)) {
  743. ILLEGAL;
  744. }
  745. if (UCOP_SET(24)) {
  746. /* CFF */
  747. tmp = new_tmp();
  748. gen_helper_ucf64_get_fpscr(tmp, cpu_env);
  749. store_reg(s, UCOP_REG_D, tmp);
  750. } else {
  751. /* CTF */
  752. tmp = load_reg(s, UCOP_REG_D);
  753. gen_helper_ucf64_set_fpscr(cpu_env, tmp);
  754. dead_tmp(tmp);
  755. gen_lookup_tb(s);
  756. }
  757. return;
  758. }
  759. if ((insn & 0xfe0003ff) == 0xe0000000) {
  760. /* general register */
  761. if (UCOP_REG_D == 31) {
  762. ILLEGAL;
  763. }
  764. if (UCOP_SET(24)) { /* MFF */
  765. tmp = ucf64_gen_ld32(UCOP_REG_N);
  766. store_reg(s, UCOP_REG_D, tmp);
  767. } else { /* MTF */
  768. tmp = load_reg(s, UCOP_REG_D);
  769. ucf64_gen_st32(tmp, UCOP_REG_N);
  770. }
  771. return;
  772. }
  773. if ((insn & 0xfb000000) == 0xe9000000) {
  774. /* MFFC */
  775. if (UCOP_REG_D != 31) {
  776. ILLEGAL;
  777. }
  778. if (UCOP_UCF64_COND & 0x8) {
  779. ILLEGAL;
  780. }
  781. tmp = new_tmp();
  782. tcg_gen_movi_i32(tmp, UCOP_UCF64_COND);
  783. if (UCOP_SET(26)) {
  784. tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
  785. tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
  786. gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, tmp, cpu_env);
  787. } else {
  788. tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
  789. tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
  790. gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, tmp, cpu_env);
  791. }
  792. dead_tmp(tmp);
  793. return;
  794. }
  795. ILLEGAL;
  796. }
  797. /* UniCore-F64 convert instructions */
  798. static void do_ucf64_fcvt(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  799. {
  800. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  801. if (UCOP_UCF64_FMT == 3) {
  802. ILLEGAL;
  803. }
  804. if (UCOP_REG_N != 0) {
  805. ILLEGAL;
  806. }
  807. switch (UCOP_UCF64_FUNC) {
  808. case 0: /* cvt.s */
  809. switch (UCOP_UCF64_FMT) {
  810. case 1 /* d */:
  811. tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
  812. gen_helper_ucf64_df2sf(cpu_F0s, cpu_F0d, cpu_env);
  813. tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
  814. break;
  815. case 2 /* w */:
  816. tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
  817. gen_helper_ucf64_si2sf(cpu_F0s, cpu_F0s, cpu_env);
  818. tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
  819. break;
  820. default /* s */:
  821. ILLEGAL;
  822. break;
  823. }
  824. break;
  825. case 1: /* cvt.d */
  826. switch (UCOP_UCF64_FMT) {
  827. case 0 /* s */:
  828. tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
  829. gen_helper_ucf64_sf2df(cpu_F0d, cpu_F0s, cpu_env);
  830. tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
  831. break;
  832. case 2 /* w */:
  833. tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
  834. gen_helper_ucf64_si2df(cpu_F0d, cpu_F0s, cpu_env);
  835. tcg_gen_st_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_D));
  836. break;
  837. default /* d */:
  838. ILLEGAL;
  839. break;
  840. }
  841. break;
  842. case 4: /* cvt.w */
  843. switch (UCOP_UCF64_FMT) {
  844. case 0 /* s */:
  845. tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
  846. gen_helper_ucf64_sf2si(cpu_F0s, cpu_F0s, cpu_env);
  847. tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
  848. break;
  849. case 1 /* d */:
  850. tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
  851. gen_helper_ucf64_df2si(cpu_F0s, cpu_F0d, cpu_env);
  852. tcg_gen_st_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_D));
  853. break;
  854. default /* w */:
  855. ILLEGAL;
  856. break;
  857. }
  858. break;
  859. default:
  860. ILLEGAL;
  861. }
  862. }
  863. /* UniCore-F64 compare instructions */
  864. static void do_ucf64_fcmp(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  865. {
  866. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  867. if (UCOP_SET(25)) {
  868. ILLEGAL;
  869. }
  870. if (UCOP_REG_D != 0) {
  871. ILLEGAL;
  872. }
  873. ILLEGAL; /* TODO */
  874. if (UCOP_SET(24)) {
  875. tcg_gen_ld_i64(cpu_F0d, cpu_env, ucf64_reg_offset(UCOP_REG_N));
  876. tcg_gen_ld_i64(cpu_F1d, cpu_env, ucf64_reg_offset(UCOP_REG_M));
  877. /* gen_helper_ucf64_cmpd(cpu_F0d, cpu_F1d, cpu_env); */
  878. } else {
  879. tcg_gen_ld_i32(cpu_F0s, cpu_env, ucf64_reg_offset(UCOP_REG_N));
  880. tcg_gen_ld_i32(cpu_F1s, cpu_env, ucf64_reg_offset(UCOP_REG_M));
  881. /* gen_helper_ucf64_cmps(cpu_F0s, cpu_F1s, cpu_env); */
  882. }
  883. }
  884. #define gen_helper_ucf64_movs(x, y) do { } while (0)
  885. #define gen_helper_ucf64_movd(x, y) do { } while (0)
  886. #define UCF64_OP1(name) do { \
  887. if (UCOP_REG_N != 0) { \
  888. ILLEGAL; \
  889. } \
  890. switch (UCOP_UCF64_FMT) { \
  891. case 0 /* s */: \
  892. tcg_gen_ld_i32(cpu_F0s, cpu_env, \
  893. ucf64_reg_offset(UCOP_REG_M)); \
  894. gen_helper_ucf64_##name##s(cpu_F0s, cpu_F0s); \
  895. tcg_gen_st_i32(cpu_F0s, cpu_env, \
  896. ucf64_reg_offset(UCOP_REG_D)); \
  897. break; \
  898. case 1 /* d */: \
  899. tcg_gen_ld_i64(cpu_F0d, cpu_env, \
  900. ucf64_reg_offset(UCOP_REG_M)); \
  901. gen_helper_ucf64_##name##d(cpu_F0d, cpu_F0d); \
  902. tcg_gen_st_i64(cpu_F0d, cpu_env, \
  903. ucf64_reg_offset(UCOP_REG_D)); \
  904. break; \
  905. case 2 /* w */: \
  906. ILLEGAL; \
  907. break; \
  908. } \
  909. } while (0)
  910. #define UCF64_OP2(name) do { \
  911. switch (UCOP_UCF64_FMT) { \
  912. case 0 /* s */: \
  913. tcg_gen_ld_i32(cpu_F0s, cpu_env, \
  914. ucf64_reg_offset(UCOP_REG_N)); \
  915. tcg_gen_ld_i32(cpu_F1s, cpu_env, \
  916. ucf64_reg_offset(UCOP_REG_M)); \
  917. gen_helper_ucf64_##name##s(cpu_F0s, \
  918. cpu_F0s, cpu_F1s, cpu_env); \
  919. tcg_gen_st_i32(cpu_F0s, cpu_env, \
  920. ucf64_reg_offset(UCOP_REG_D)); \
  921. break; \
  922. case 1 /* d */: \
  923. tcg_gen_ld_i64(cpu_F0d, cpu_env, \
  924. ucf64_reg_offset(UCOP_REG_N)); \
  925. tcg_gen_ld_i64(cpu_F1d, cpu_env, \
  926. ucf64_reg_offset(UCOP_REG_M)); \
  927. gen_helper_ucf64_##name##d(cpu_F0d, \
  928. cpu_F0d, cpu_F1d, cpu_env); \
  929. tcg_gen_st_i64(cpu_F0d, cpu_env, \
  930. ucf64_reg_offset(UCOP_REG_D)); \
  931. break; \
  932. case 2 /* w */: \
  933. ILLEGAL; \
  934. break; \
  935. } \
  936. } while (0)
  937. /* UniCore-F64 data processing */
  938. static void do_ucf64_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  939. {
  940. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  941. if (UCOP_UCF64_FMT == 3) {
  942. ILLEGAL;
  943. }
  944. switch (UCOP_UCF64_FUNC) {
  945. case 0: /* add */
  946. UCF64_OP2(add);
  947. break;
  948. case 1: /* sub */
  949. UCF64_OP2(sub);
  950. break;
  951. case 2: /* mul */
  952. UCF64_OP2(mul);
  953. break;
  954. case 4: /* div */
  955. UCF64_OP2(div);
  956. break;
  957. case 5: /* abs */
  958. UCF64_OP1(abs);
  959. break;
  960. case 6: /* mov */
  961. UCF64_OP1(mov);
  962. break;
  963. case 7: /* neg */
  964. UCF64_OP1(neg);
  965. break;
  966. default:
  967. ILLEGAL;
  968. }
  969. }
  970. /* Disassemble an F64 instruction */
  971. static void disas_ucf64_insn(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  972. {
  973. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  974. if (!UCOP_SET(29)) {
  975. if (UCOP_SET(26)) {
  976. do_ucf64_ldst_m(env, s, insn);
  977. } else {
  978. do_ucf64_ldst_i(env, s, insn);
  979. }
  980. } else {
  981. if (UCOP_SET(5)) {
  982. switch ((insn >> 26) & 0x3) {
  983. case 0:
  984. do_ucf64_datap(env, s, insn);
  985. break;
  986. case 1:
  987. ILLEGAL;
  988. break;
  989. case 2:
  990. do_ucf64_fcvt(env, s, insn);
  991. break;
  992. case 3:
  993. do_ucf64_fcmp(env, s, insn);
  994. break;
  995. }
  996. } else {
  997. do_ucf64_trans(env, s, insn);
  998. }
  999. }
  1000. }
  1001. static inline void gen_goto_tb(DisasContext *s, int n, uint32_t dest)
  1002. {
  1003. TranslationBlock *tb;
  1004. tb = s->tb;
  1005. if ((tb->pc & TARGET_PAGE_MASK) == (dest & TARGET_PAGE_MASK)) {
  1006. tcg_gen_goto_tb(n);
  1007. gen_set_pc_im(dest);
  1008. tcg_gen_exit_tb((uintptr_t)tb + n);
  1009. } else {
  1010. gen_set_pc_im(dest);
  1011. tcg_gen_exit_tb(0);
  1012. }
  1013. }
  1014. static inline void gen_jmp(DisasContext *s, uint32_t dest)
  1015. {
  1016. if (unlikely(s->singlestep_enabled)) {
  1017. /* An indirect jump so that we still trigger the debug exception. */
  1018. gen_bx_im(s, dest);
  1019. } else {
  1020. gen_goto_tb(s, 0, dest);
  1021. s->is_jmp = DISAS_TB_JUMP;
  1022. }
  1023. }
  1024. /* Returns nonzero if access to the PSR is not permitted. Marks t0 as dead. */
  1025. static int gen_set_psr(DisasContext *s, uint32_t mask, int bsr, TCGv t0)
  1026. {
  1027. TCGv tmp;
  1028. if (bsr) {
  1029. /* ??? This is also undefined in system mode. */
  1030. if (IS_USER(s)) {
  1031. return 1;
  1032. }
  1033. tmp = load_cpu_field(bsr);
  1034. tcg_gen_andi_i32(tmp, tmp, ~mask);
  1035. tcg_gen_andi_i32(t0, t0, mask);
  1036. tcg_gen_or_i32(tmp, tmp, t0);
  1037. store_cpu_field(tmp, bsr);
  1038. } else {
  1039. gen_set_asr(t0, mask);
  1040. }
  1041. dead_tmp(t0);
  1042. gen_lookup_tb(s);
  1043. return 0;
  1044. }
  1045. /* Generate an old-style exception return. Marks pc as dead. */
  1046. static void gen_exception_return(DisasContext *s, TCGv pc)
  1047. {
  1048. TCGv tmp;
  1049. store_reg(s, 31, pc);
  1050. tmp = load_cpu_field(bsr);
  1051. gen_set_asr(tmp, 0xffffffff);
  1052. dead_tmp(tmp);
  1053. s->is_jmp = DISAS_UPDATE;
  1054. }
  1055. static void disas_coproc_insn(CPUUniCore32State *env, DisasContext *s,
  1056. uint32_t insn)
  1057. {
  1058. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  1059. switch (UCOP_CPNUM) {
  1060. #ifndef CONFIG_USER_ONLY
  1061. case 0:
  1062. disas_cp0_insn(env, s, insn);
  1063. break;
  1064. case 1:
  1065. disas_ocd_insn(env, s, insn);
  1066. break;
  1067. #endif
  1068. case 2:
  1069. disas_ucf64_insn(env, s, insn);
  1070. break;
  1071. default:
  1072. /* Unknown coprocessor. */
  1073. cpu_abort(CPU(cpu), "Unknown coprocessor!");
  1074. }
  1075. }
  1076. /* data processing instructions */
  1077. static void do_datap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  1078. {
  1079. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  1080. TCGv tmp;
  1081. TCGv tmp2;
  1082. int logic_cc;
  1083. if (UCOP_OPCODES == 0x0f || UCOP_OPCODES == 0x0d) {
  1084. if (UCOP_SET(23)) { /* CMOV instructions */
  1085. if ((UCOP_CMOV_COND == 0xe) || (UCOP_CMOV_COND == 0xf)) {
  1086. ILLEGAL;
  1087. }
  1088. /* if not always execute, we generate a conditional jump to
  1089. next instruction */
  1090. s->condlabel = gen_new_label();
  1091. gen_test_cc(UCOP_CMOV_COND ^ 1, s->condlabel);
  1092. s->condjmp = 1;
  1093. }
  1094. }
  1095. logic_cc = table_logic_cc[UCOP_OPCODES] & (UCOP_SET_S >> 24);
  1096. if (UCOP_SET(29)) {
  1097. unsigned int val;
  1098. /* immediate operand */
  1099. val = UCOP_IMM_9;
  1100. if (UCOP_SH_IM) {
  1101. val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
  1102. }
  1103. tmp2 = new_tmp();
  1104. tcg_gen_movi_i32(tmp2, val);
  1105. if (logic_cc && UCOP_SH_IM) {
  1106. gen_set_CF_bit31(tmp2);
  1107. }
  1108. } else {
  1109. /* register */
  1110. tmp2 = load_reg(s, UCOP_REG_M);
  1111. if (UCOP_SET(5)) {
  1112. tmp = load_reg(s, UCOP_REG_S);
  1113. gen_uc32_shift_reg(tmp2, UCOP_SH_OP, tmp, logic_cc);
  1114. } else {
  1115. gen_uc32_shift_im(tmp2, UCOP_SH_OP, UCOP_SH_IM, logic_cc);
  1116. }
  1117. }
  1118. if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
  1119. tmp = load_reg(s, UCOP_REG_N);
  1120. } else {
  1121. TCGV_UNUSED(tmp);
  1122. }
  1123. switch (UCOP_OPCODES) {
  1124. case 0x00:
  1125. tcg_gen_and_i32(tmp, tmp, tmp2);
  1126. if (logic_cc) {
  1127. gen_logic_CC(tmp);
  1128. }
  1129. store_reg_bx(s, UCOP_REG_D, tmp);
  1130. break;
  1131. case 0x01:
  1132. tcg_gen_xor_i32(tmp, tmp, tmp2);
  1133. if (logic_cc) {
  1134. gen_logic_CC(tmp);
  1135. }
  1136. store_reg_bx(s, UCOP_REG_D, tmp);
  1137. break;
  1138. case 0x02:
  1139. if (UCOP_SET_S && UCOP_REG_D == 31) {
  1140. /* SUBS r31, ... is used for exception return. */
  1141. if (IS_USER(s)) {
  1142. ILLEGAL;
  1143. }
  1144. gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
  1145. gen_exception_return(s, tmp);
  1146. } else {
  1147. if (UCOP_SET_S) {
  1148. gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
  1149. } else {
  1150. tcg_gen_sub_i32(tmp, tmp, tmp2);
  1151. }
  1152. store_reg_bx(s, UCOP_REG_D, tmp);
  1153. }
  1154. break;
  1155. case 0x03:
  1156. if (UCOP_SET_S) {
  1157. gen_helper_sub_cc(tmp, cpu_env, tmp2, tmp);
  1158. } else {
  1159. tcg_gen_sub_i32(tmp, tmp2, tmp);
  1160. }
  1161. store_reg_bx(s, UCOP_REG_D, tmp);
  1162. break;
  1163. case 0x04:
  1164. if (UCOP_SET_S) {
  1165. gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
  1166. } else {
  1167. tcg_gen_add_i32(tmp, tmp, tmp2);
  1168. }
  1169. store_reg_bx(s, UCOP_REG_D, tmp);
  1170. break;
  1171. case 0x05:
  1172. if (UCOP_SET_S) {
  1173. gen_helper_adc_cc(tmp, cpu_env, tmp, tmp2);
  1174. } else {
  1175. gen_add_carry(tmp, tmp, tmp2);
  1176. }
  1177. store_reg_bx(s, UCOP_REG_D, tmp);
  1178. break;
  1179. case 0x06:
  1180. if (UCOP_SET_S) {
  1181. gen_helper_sbc_cc(tmp, cpu_env, tmp, tmp2);
  1182. } else {
  1183. gen_sub_carry(tmp, tmp, tmp2);
  1184. }
  1185. store_reg_bx(s, UCOP_REG_D, tmp);
  1186. break;
  1187. case 0x07:
  1188. if (UCOP_SET_S) {
  1189. gen_helper_sbc_cc(tmp, cpu_env, tmp2, tmp);
  1190. } else {
  1191. gen_sub_carry(tmp, tmp2, tmp);
  1192. }
  1193. store_reg_bx(s, UCOP_REG_D, tmp);
  1194. break;
  1195. case 0x08:
  1196. if (UCOP_SET_S) {
  1197. tcg_gen_and_i32(tmp, tmp, tmp2);
  1198. gen_logic_CC(tmp);
  1199. }
  1200. dead_tmp(tmp);
  1201. break;
  1202. case 0x09:
  1203. if (UCOP_SET_S) {
  1204. tcg_gen_xor_i32(tmp, tmp, tmp2);
  1205. gen_logic_CC(tmp);
  1206. }
  1207. dead_tmp(tmp);
  1208. break;
  1209. case 0x0a:
  1210. if (UCOP_SET_S) {
  1211. gen_helper_sub_cc(tmp, cpu_env, tmp, tmp2);
  1212. }
  1213. dead_tmp(tmp);
  1214. break;
  1215. case 0x0b:
  1216. if (UCOP_SET_S) {
  1217. gen_helper_add_cc(tmp, cpu_env, tmp, tmp2);
  1218. }
  1219. dead_tmp(tmp);
  1220. break;
  1221. case 0x0c:
  1222. tcg_gen_or_i32(tmp, tmp, tmp2);
  1223. if (logic_cc) {
  1224. gen_logic_CC(tmp);
  1225. }
  1226. store_reg_bx(s, UCOP_REG_D, tmp);
  1227. break;
  1228. case 0x0d:
  1229. if (logic_cc && UCOP_REG_D == 31) {
  1230. /* MOVS r31, ... is used for exception return. */
  1231. if (IS_USER(s)) {
  1232. ILLEGAL;
  1233. }
  1234. gen_exception_return(s, tmp2);
  1235. } else {
  1236. if (logic_cc) {
  1237. gen_logic_CC(tmp2);
  1238. }
  1239. store_reg_bx(s, UCOP_REG_D, tmp2);
  1240. }
  1241. break;
  1242. case 0x0e:
  1243. tcg_gen_andc_i32(tmp, tmp, tmp2);
  1244. if (logic_cc) {
  1245. gen_logic_CC(tmp);
  1246. }
  1247. store_reg_bx(s, UCOP_REG_D, tmp);
  1248. break;
  1249. default:
  1250. case 0x0f:
  1251. tcg_gen_not_i32(tmp2, tmp2);
  1252. if (logic_cc) {
  1253. gen_logic_CC(tmp2);
  1254. }
  1255. store_reg_bx(s, UCOP_REG_D, tmp2);
  1256. break;
  1257. }
  1258. if (UCOP_OPCODES != 0x0f && UCOP_OPCODES != 0x0d) {
  1259. dead_tmp(tmp2);
  1260. }
  1261. }
  1262. /* multiply */
  1263. static void do_mult(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  1264. {
  1265. TCGv tmp, tmp2, tmp3, tmp4;
  1266. if (UCOP_SET(27)) {
  1267. /* 64 bit mul */
  1268. tmp = load_reg(s, UCOP_REG_M);
  1269. tmp2 = load_reg(s, UCOP_REG_N);
  1270. if (UCOP_SET(26)) {
  1271. tcg_gen_muls2_i32(tmp, tmp2, tmp, tmp2);
  1272. } else {
  1273. tcg_gen_mulu2_i32(tmp, tmp2, tmp, tmp2);
  1274. }
  1275. if (UCOP_SET(25)) { /* mult accumulate */
  1276. tmp3 = load_reg(s, UCOP_REG_LO);
  1277. tmp4 = load_reg(s, UCOP_REG_HI);
  1278. tcg_gen_add2_i32(tmp, tmp2, tmp, tmp2, tmp3, tmp4);
  1279. dead_tmp(tmp3);
  1280. dead_tmp(tmp4);
  1281. }
  1282. store_reg(s, UCOP_REG_LO, tmp);
  1283. store_reg(s, UCOP_REG_HI, tmp2);
  1284. } else {
  1285. /* 32 bit mul */
  1286. tmp = load_reg(s, UCOP_REG_M);
  1287. tmp2 = load_reg(s, UCOP_REG_N);
  1288. tcg_gen_mul_i32(tmp, tmp, tmp2);
  1289. dead_tmp(tmp2);
  1290. if (UCOP_SET(25)) {
  1291. /* Add */
  1292. tmp2 = load_reg(s, UCOP_REG_S);
  1293. tcg_gen_add_i32(tmp, tmp, tmp2);
  1294. dead_tmp(tmp2);
  1295. }
  1296. if (UCOP_SET_S) {
  1297. gen_logic_CC(tmp);
  1298. }
  1299. store_reg(s, UCOP_REG_D, tmp);
  1300. }
  1301. }
  1302. /* miscellaneous instructions */
  1303. static void do_misc(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  1304. {
  1305. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  1306. unsigned int val;
  1307. TCGv tmp;
  1308. if ((insn & 0xffffffe0) == 0x10ffc120) {
  1309. /* Trivial implementation equivalent to bx. */
  1310. tmp = load_reg(s, UCOP_REG_M);
  1311. gen_bx(s, tmp);
  1312. return;
  1313. }
  1314. if ((insn & 0xfbffc000) == 0x30ffc000) {
  1315. /* PSR = immediate */
  1316. val = UCOP_IMM_9;
  1317. if (UCOP_SH_IM) {
  1318. val = (val >> UCOP_SH_IM) | (val << (32 - UCOP_SH_IM));
  1319. }
  1320. tmp = new_tmp();
  1321. tcg_gen_movi_i32(tmp, val);
  1322. if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
  1323. ILLEGAL;
  1324. }
  1325. return;
  1326. }
  1327. if ((insn & 0xfbffffe0) == 0x12ffc020) {
  1328. /* PSR.flag = reg */
  1329. tmp = load_reg(s, UCOP_REG_M);
  1330. if (gen_set_psr(s, ASR_NZCV, UCOP_SET_B, tmp)) {
  1331. ILLEGAL;
  1332. }
  1333. return;
  1334. }
  1335. if ((insn & 0xfbffffe0) == 0x10ffc020) {
  1336. /* PSR = reg */
  1337. tmp = load_reg(s, UCOP_REG_M);
  1338. if (gen_set_psr(s, ~ASR_RESERVED, UCOP_SET_B, tmp)) {
  1339. ILLEGAL;
  1340. }
  1341. return;
  1342. }
  1343. if ((insn & 0xfbf83fff) == 0x10f80000) {
  1344. /* reg = PSR */
  1345. if (UCOP_SET_B) {
  1346. if (IS_USER(s)) {
  1347. ILLEGAL;
  1348. }
  1349. tmp = load_cpu_field(bsr);
  1350. } else {
  1351. tmp = new_tmp();
  1352. gen_helper_asr_read(tmp, cpu_env);
  1353. }
  1354. store_reg(s, UCOP_REG_D, tmp);
  1355. return;
  1356. }
  1357. if ((insn & 0xfbf83fe0) == 0x12f80120) {
  1358. /* clz */
  1359. tmp = load_reg(s, UCOP_REG_M);
  1360. if (UCOP_SET(26)) {
  1361. gen_helper_clo(tmp, tmp);
  1362. } else {
  1363. gen_helper_clz(tmp, tmp);
  1364. }
  1365. store_reg(s, UCOP_REG_D, tmp);
  1366. return;
  1367. }
  1368. /* otherwise */
  1369. ILLEGAL;
  1370. }
  1371. /* load/store I_offset and R_offset */
  1372. static void do_ldst_ir(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  1373. {
  1374. unsigned int mmu_idx;
  1375. TCGv tmp;
  1376. TCGv tmp2;
  1377. tmp2 = load_reg(s, UCOP_REG_N);
  1378. mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
  1379. /* immediate */
  1380. if (UCOP_SET_P) {
  1381. gen_add_data_offset(s, insn, tmp2);
  1382. }
  1383. if (UCOP_SET_L) {
  1384. /* load */
  1385. if (UCOP_SET_B) {
  1386. tmp = gen_ld8u(tmp2, mmu_idx);
  1387. } else {
  1388. tmp = gen_ld32(tmp2, mmu_idx);
  1389. }
  1390. } else {
  1391. /* store */
  1392. tmp = load_reg(s, UCOP_REG_D);
  1393. if (UCOP_SET_B) {
  1394. gen_st8(tmp, tmp2, mmu_idx);
  1395. } else {
  1396. gen_st32(tmp, tmp2, mmu_idx);
  1397. }
  1398. }
  1399. if (!UCOP_SET_P) {
  1400. gen_add_data_offset(s, insn, tmp2);
  1401. store_reg(s, UCOP_REG_N, tmp2);
  1402. } else if (UCOP_SET_W) {
  1403. store_reg(s, UCOP_REG_N, tmp2);
  1404. } else {
  1405. dead_tmp(tmp2);
  1406. }
  1407. if (UCOP_SET_L) {
  1408. /* Complete the load. */
  1409. if (UCOP_REG_D == 31) {
  1410. gen_bx(s, tmp);
  1411. } else {
  1412. store_reg(s, UCOP_REG_D, tmp);
  1413. }
  1414. }
  1415. }
  1416. /* SWP instruction */
  1417. static void do_swap(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  1418. {
  1419. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  1420. TCGv addr;
  1421. TCGv tmp;
  1422. TCGv tmp2;
  1423. if ((insn & 0xff003fe0) != 0x40000120) {
  1424. ILLEGAL;
  1425. }
  1426. /* ??? This is not really atomic. However we know
  1427. we never have multiple CPUs running in parallel,
  1428. so it is good enough. */
  1429. addr = load_reg(s, UCOP_REG_N);
  1430. tmp = load_reg(s, UCOP_REG_M);
  1431. if (UCOP_SET_B) {
  1432. tmp2 = gen_ld8u(addr, IS_USER(s));
  1433. gen_st8(tmp, addr, IS_USER(s));
  1434. } else {
  1435. tmp2 = gen_ld32(addr, IS_USER(s));
  1436. gen_st32(tmp, addr, IS_USER(s));
  1437. }
  1438. dead_tmp(addr);
  1439. store_reg(s, UCOP_REG_D, tmp2);
  1440. }
  1441. /* load/store hw/sb */
  1442. static void do_ldst_hwsb(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  1443. {
  1444. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  1445. TCGv addr;
  1446. TCGv tmp;
  1447. if (UCOP_SH_OP == 0) {
  1448. do_swap(env, s, insn);
  1449. return;
  1450. }
  1451. addr = load_reg(s, UCOP_REG_N);
  1452. if (UCOP_SET_P) {
  1453. gen_add_datah_offset(s, insn, addr);
  1454. }
  1455. if (UCOP_SET_L) { /* load */
  1456. switch (UCOP_SH_OP) {
  1457. case 1:
  1458. tmp = gen_ld16u(addr, IS_USER(s));
  1459. break;
  1460. case 2:
  1461. tmp = gen_ld8s(addr, IS_USER(s));
  1462. break;
  1463. default: /* see do_swap */
  1464. case 3:
  1465. tmp = gen_ld16s(addr, IS_USER(s));
  1466. break;
  1467. }
  1468. } else { /* store */
  1469. if (UCOP_SH_OP != 1) {
  1470. ILLEGAL;
  1471. }
  1472. tmp = load_reg(s, UCOP_REG_D);
  1473. gen_st16(tmp, addr, IS_USER(s));
  1474. }
  1475. /* Perform base writeback before the loaded value to
  1476. ensure correct behavior with overlapping index registers. */
  1477. if (!UCOP_SET_P) {
  1478. gen_add_datah_offset(s, insn, addr);
  1479. store_reg(s, UCOP_REG_N, addr);
  1480. } else if (UCOP_SET_W) {
  1481. store_reg(s, UCOP_REG_N, addr);
  1482. } else {
  1483. dead_tmp(addr);
  1484. }
  1485. if (UCOP_SET_L) {
  1486. /* Complete the load. */
  1487. store_reg(s, UCOP_REG_D, tmp);
  1488. }
  1489. }
  1490. /* load/store multiple words */
  1491. static void do_ldst_m(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  1492. {
  1493. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  1494. unsigned int val, i, mmu_idx;
  1495. int j, n, reg, user, loaded_base;
  1496. TCGv tmp;
  1497. TCGv tmp2;
  1498. TCGv addr;
  1499. TCGv loaded_var;
  1500. if (UCOP_SET(7)) {
  1501. ILLEGAL;
  1502. }
  1503. /* XXX: store correct base if write back */
  1504. user = 0;
  1505. if (UCOP_SET_B) { /* S bit in instruction table */
  1506. if (IS_USER(s)) {
  1507. ILLEGAL; /* only usable in supervisor mode */
  1508. }
  1509. if (UCOP_SET(18) == 0) { /* pc reg */
  1510. user = 1;
  1511. }
  1512. }
  1513. mmu_idx = (IS_USER(s) || (!UCOP_SET_P && UCOP_SET_W));
  1514. addr = load_reg(s, UCOP_REG_N);
  1515. /* compute total size */
  1516. loaded_base = 0;
  1517. TCGV_UNUSED(loaded_var);
  1518. n = 0;
  1519. for (i = 0; i < 6; i++) {
  1520. if (UCOP_SET(i)) {
  1521. n++;
  1522. }
  1523. }
  1524. for (i = 9; i < 19; i++) {
  1525. if (UCOP_SET(i)) {
  1526. n++;
  1527. }
  1528. }
  1529. /* XXX: test invalid n == 0 case ? */
  1530. if (UCOP_SET_U) {
  1531. if (UCOP_SET_P) {
  1532. /* pre increment */
  1533. tcg_gen_addi_i32(addr, addr, 4);
  1534. } else {
  1535. /* post increment */
  1536. }
  1537. } else {
  1538. if (UCOP_SET_P) {
  1539. /* pre decrement */
  1540. tcg_gen_addi_i32(addr, addr, -(n * 4));
  1541. } else {
  1542. /* post decrement */
  1543. if (n != 1) {
  1544. tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
  1545. }
  1546. }
  1547. }
  1548. j = 0;
  1549. reg = UCOP_SET(6) ? 16 : 0;
  1550. for (i = 0; i < 19; i++, reg++) {
  1551. if (i == 6) {
  1552. i = i + 3;
  1553. }
  1554. if (UCOP_SET(i)) {
  1555. if (UCOP_SET_L) { /* load */
  1556. tmp = gen_ld32(addr, mmu_idx);
  1557. if (reg == 31) {
  1558. gen_bx(s, tmp);
  1559. } else if (user) {
  1560. tmp2 = tcg_const_i32(reg);
  1561. gen_helper_set_user_reg(cpu_env, tmp2, tmp);
  1562. tcg_temp_free_i32(tmp2);
  1563. dead_tmp(tmp);
  1564. } else if (reg == UCOP_REG_N) {
  1565. loaded_var = tmp;
  1566. loaded_base = 1;
  1567. } else {
  1568. store_reg(s, reg, tmp);
  1569. }
  1570. } else { /* store */
  1571. if (reg == 31) {
  1572. /* special case: r31 = PC + 4 */
  1573. val = (long)s->pc;
  1574. tmp = new_tmp();
  1575. tcg_gen_movi_i32(tmp, val);
  1576. } else if (user) {
  1577. tmp = new_tmp();
  1578. tmp2 = tcg_const_i32(reg);
  1579. gen_helper_get_user_reg(tmp, cpu_env, tmp2);
  1580. tcg_temp_free_i32(tmp2);
  1581. } else {
  1582. tmp = load_reg(s, reg);
  1583. }
  1584. gen_st32(tmp, addr, mmu_idx);
  1585. }
  1586. j++;
  1587. /* no need to add after the last transfer */
  1588. if (j != n) {
  1589. tcg_gen_addi_i32(addr, addr, 4);
  1590. }
  1591. }
  1592. }
  1593. if (UCOP_SET_W) { /* write back */
  1594. if (UCOP_SET_U) {
  1595. if (UCOP_SET_P) {
  1596. /* pre increment */
  1597. } else {
  1598. /* post increment */
  1599. tcg_gen_addi_i32(addr, addr, 4);
  1600. }
  1601. } else {
  1602. if (UCOP_SET_P) {
  1603. /* pre decrement */
  1604. if (n != 1) {
  1605. tcg_gen_addi_i32(addr, addr, -((n - 1) * 4));
  1606. }
  1607. } else {
  1608. /* post decrement */
  1609. tcg_gen_addi_i32(addr, addr, -(n * 4));
  1610. }
  1611. }
  1612. store_reg(s, UCOP_REG_N, addr);
  1613. } else {
  1614. dead_tmp(addr);
  1615. }
  1616. if (loaded_base) {
  1617. store_reg(s, UCOP_REG_N, loaded_var);
  1618. }
  1619. if (UCOP_SET_B && !user) {
  1620. /* Restore ASR from BSR. */
  1621. tmp = load_cpu_field(bsr);
  1622. gen_set_asr(tmp, 0xffffffff);
  1623. dead_tmp(tmp);
  1624. s->is_jmp = DISAS_UPDATE;
  1625. }
  1626. }
  1627. /* branch (and link) */
  1628. static void do_branch(CPUUniCore32State *env, DisasContext *s, uint32_t insn)
  1629. {
  1630. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  1631. unsigned int val;
  1632. int32_t offset;
  1633. TCGv tmp;
  1634. if (UCOP_COND == 0xf) {
  1635. ILLEGAL;
  1636. }
  1637. if (UCOP_COND != 0xe) {
  1638. /* if not always execute, we generate a conditional jump to
  1639. next instruction */
  1640. s->condlabel = gen_new_label();
  1641. gen_test_cc(UCOP_COND ^ 1, s->condlabel);
  1642. s->condjmp = 1;
  1643. }
  1644. val = (int32_t)s->pc;
  1645. if (UCOP_SET_L) {
  1646. tmp = new_tmp();
  1647. tcg_gen_movi_i32(tmp, val);
  1648. store_reg(s, 30, tmp);
  1649. }
  1650. offset = (((int32_t)insn << 8) >> 8);
  1651. val += (offset << 2); /* unicore is pc+4 */
  1652. gen_jmp(s, val);
  1653. }
  1654. static void disas_uc32_insn(CPUUniCore32State *env, DisasContext *s)
  1655. {
  1656. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  1657. unsigned int insn;
  1658. insn = cpu_ldl_code(env, s->pc);
  1659. s->pc += 4;
  1660. /* UniCore instructions class:
  1661. * AAAB BBBC xxxx xxxx xxxx xxxD xxEx xxxx
  1662. * AAA : see switch case
  1663. * BBBB : opcodes or cond or PUBW
  1664. * C : S OR L
  1665. * D : 8
  1666. * E : 5
  1667. */
  1668. switch (insn >> 29) {
  1669. case 0x0:
  1670. if (UCOP_SET(5) && UCOP_SET(8) && !UCOP_SET(28)) {
  1671. do_mult(env, s, insn);
  1672. break;
  1673. }
  1674. if (UCOP_SET(8)) {
  1675. do_misc(env, s, insn);
  1676. break;
  1677. }
  1678. case 0x1:
  1679. if (((UCOP_OPCODES >> 2) == 2) && !UCOP_SET_S) {
  1680. do_misc(env, s, insn);
  1681. break;
  1682. }
  1683. do_datap(env, s, insn);
  1684. break;
  1685. case 0x2:
  1686. if (UCOP_SET(8) && UCOP_SET(5)) {
  1687. do_ldst_hwsb(env, s, insn);
  1688. break;
  1689. }
  1690. if (UCOP_SET(8) || UCOP_SET(5)) {
  1691. ILLEGAL;
  1692. }
  1693. case 0x3:
  1694. do_ldst_ir(env, s, insn);
  1695. break;
  1696. case 0x4:
  1697. if (UCOP_SET(8)) {
  1698. ILLEGAL; /* extended instructions */
  1699. }
  1700. do_ldst_m(env, s, insn);
  1701. break;
  1702. case 0x5:
  1703. do_branch(env, s, insn);
  1704. break;
  1705. case 0x6:
  1706. /* Coprocessor. */
  1707. disas_coproc_insn(env, s, insn);
  1708. break;
  1709. case 0x7:
  1710. if (!UCOP_SET(28)) {
  1711. disas_coproc_insn(env, s, insn);
  1712. break;
  1713. }
  1714. if ((insn & 0xff000000) == 0xff000000) { /* syscall */
  1715. gen_set_pc_im(s->pc);
  1716. s->is_jmp = DISAS_SYSCALL;
  1717. break;
  1718. }
  1719. ILLEGAL;
  1720. }
  1721. }
  1722. /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
  1723. basic block 'tb'. */
  1724. void gen_intermediate_code(CPUUniCore32State *env, TranslationBlock *tb)
  1725. {
  1726. UniCore32CPU *cpu = uc32_env_get_cpu(env);
  1727. CPUState *cs = CPU(cpu);
  1728. DisasContext dc1, *dc = &dc1;
  1729. target_ulong pc_start;
  1730. uint32_t next_page_start;
  1731. int num_insns;
  1732. int max_insns;
  1733. /* generate intermediate code */
  1734. num_temps = 0;
  1735. pc_start = tb->pc;
  1736. dc->tb = tb;
  1737. dc->is_jmp = DISAS_NEXT;
  1738. dc->pc = pc_start;
  1739. dc->singlestep_enabled = cs->singlestep_enabled;
  1740. dc->condjmp = 0;
  1741. cpu_F0s = tcg_temp_new_i32();
  1742. cpu_F1s = tcg_temp_new_i32();
  1743. cpu_F0d = tcg_temp_new_i64();
  1744. cpu_F1d = tcg_temp_new_i64();
  1745. next_page_start = (pc_start & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
  1746. num_insns = 0;
  1747. max_insns = tb->cflags & CF_COUNT_MASK;
  1748. if (max_insns == 0) {
  1749. max_insns = CF_COUNT_MASK;
  1750. }
  1751. if (max_insns > TCG_MAX_INSNS) {
  1752. max_insns = TCG_MAX_INSNS;
  1753. }
  1754. #ifndef CONFIG_USER_ONLY
  1755. if ((env->uncached_asr & ASR_M) == ASR_MODE_USER) {
  1756. dc->user = 1;
  1757. } else {
  1758. dc->user = 0;
  1759. }
  1760. #endif
  1761. gen_tb_start(tb);
  1762. do {
  1763. tcg_gen_insn_start(dc->pc);
  1764. num_insns++;
  1765. if (unlikely(cpu_breakpoint_test(cs, dc->pc, BP_ANY))) {
  1766. gen_set_pc_im(dc->pc);
  1767. gen_exception(EXCP_DEBUG);
  1768. dc->is_jmp = DISAS_JUMP;
  1769. /* Advance PC so that clearing the breakpoint will
  1770. invalidate this TB. */
  1771. dc->pc += 2; /* FIXME */
  1772. goto done_generating;
  1773. }
  1774. if (num_insns == max_insns && (tb->cflags & CF_LAST_IO)) {
  1775. gen_io_start();
  1776. }
  1777. disas_uc32_insn(env, dc);
  1778. if (num_temps) {
  1779. fprintf(stderr, "Internal resource leak before %08x\n", dc->pc);
  1780. num_temps = 0;
  1781. }
  1782. if (dc->condjmp && !dc->is_jmp) {
  1783. gen_set_label(dc->condlabel);
  1784. dc->condjmp = 0;
  1785. }
  1786. /* Translation stops when a conditional branch is encountered.
  1787. * Otherwise the subsequent code could get translated several times.
  1788. * Also stop translation when a page boundary is reached. This
  1789. * ensures prefetch aborts occur at the right place. */
  1790. } while (!dc->is_jmp && !tcg_op_buf_full() &&
  1791. !cs->singlestep_enabled &&
  1792. !singlestep &&
  1793. dc->pc < next_page_start &&
  1794. num_insns < max_insns);
  1795. if (tb->cflags & CF_LAST_IO) {
  1796. if (dc->condjmp) {
  1797. /* FIXME: This can theoretically happen with self-modifying
  1798. code. */
  1799. cpu_abort(cs, "IO on conditional branch instruction");
  1800. }
  1801. gen_io_end();
  1802. }
  1803. /* At this stage dc->condjmp will only be set when the skipped
  1804. instruction was a conditional branch or trap, and the PC has
  1805. already been written. */
  1806. if (unlikely(cs->singlestep_enabled)) {
  1807. /* Make sure the pc is updated, and raise a debug exception. */
  1808. if (dc->condjmp) {
  1809. if (dc->is_jmp == DISAS_SYSCALL) {
  1810. gen_exception(UC32_EXCP_PRIV);
  1811. } else {
  1812. gen_exception(EXCP_DEBUG);
  1813. }
  1814. gen_set_label(dc->condlabel);
  1815. }
  1816. if (dc->condjmp || !dc->is_jmp) {
  1817. gen_set_pc_im(dc->pc);
  1818. dc->condjmp = 0;
  1819. }
  1820. if (dc->is_jmp == DISAS_SYSCALL && !dc->condjmp) {
  1821. gen_exception(UC32_EXCP_PRIV);
  1822. } else {
  1823. gen_exception(EXCP_DEBUG);
  1824. }
  1825. } else {
  1826. /* While branches must always occur at the end of an IT block,
  1827. there are a few other things that can cause us to terminate
  1828. the TB in the middel of an IT block:
  1829. - Exception generating instructions (bkpt, swi, undefined).
  1830. - Page boundaries.
  1831. - Hardware watchpoints.
  1832. Hardware breakpoints have already been handled and skip this code.
  1833. */
  1834. switch (dc->is_jmp) {
  1835. case DISAS_NEXT:
  1836. gen_goto_tb(dc, 1, dc->pc);
  1837. break;
  1838. default:
  1839. case DISAS_JUMP:
  1840. case DISAS_UPDATE:
  1841. /* indicate that the hash table must be used to find the next TB */
  1842. tcg_gen_exit_tb(0);
  1843. break;
  1844. case DISAS_TB_JUMP:
  1845. /* nothing more to generate */
  1846. break;
  1847. case DISAS_SYSCALL:
  1848. gen_exception(UC32_EXCP_PRIV);
  1849. break;
  1850. }
  1851. if (dc->condjmp) {
  1852. gen_set_label(dc->condlabel);
  1853. gen_goto_tb(dc, 1, dc->pc);
  1854. dc->condjmp = 0;
  1855. }
  1856. }
  1857. done_generating:
  1858. gen_tb_end(tb, num_insns);
  1859. #ifdef DEBUG_DISAS
  1860. if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
  1861. qemu_log("----------------\n");
  1862. qemu_log("IN: %s\n", lookup_symbol(pc_start));
  1863. log_target_disas(cs, pc_start, dc->pc - pc_start, 0);
  1864. qemu_log("\n");
  1865. }
  1866. #endif
  1867. tb->size = dc->pc - pc_start;
  1868. tb->icount = num_insns;
  1869. }
  1870. static const char *cpu_mode_names[16] = {
  1871. "USER", "REAL", "INTR", "PRIV", "UM14", "UM15", "UM16", "TRAP",
  1872. "UM18", "UM19", "UM1A", "EXTN", "UM1C", "UM1D", "UM1E", "SUSR"
  1873. };
  1874. #undef UCF64_DUMP_STATE
  1875. #ifdef UCF64_DUMP_STATE
  1876. static void cpu_dump_state_ucf64(CPUUniCore32State *env, FILE *f,
  1877. fprintf_function cpu_fprintf, int flags)
  1878. {
  1879. int i;
  1880. union {
  1881. uint32_t i;
  1882. float s;
  1883. } s0, s1;
  1884. CPU_DoubleU d;
  1885. /* ??? This assumes float64 and double have the same layout.
  1886. Oh well, it's only debug dumps. */
  1887. union {
  1888. float64 f64;
  1889. double d;
  1890. } d0;
  1891. for (i = 0; i < 16; i++) {
  1892. d.d = env->ucf64.regs[i];
  1893. s0.i = d.l.lower;
  1894. s1.i = d.l.upper;
  1895. d0.f64 = d.d;
  1896. cpu_fprintf(f, "s%02d=%08x(%8g) s%02d=%08x(%8g)",
  1897. i * 2, (int)s0.i, s0.s,
  1898. i * 2 + 1, (int)s1.i, s1.s);
  1899. cpu_fprintf(f, " d%02d=%" PRIx64 "(%8g)\n",
  1900. i, (uint64_t)d0.f64, d0.d);
  1901. }
  1902. cpu_fprintf(f, "FPSCR: %08x\n", (int)env->ucf64.xregs[UC32_UCF64_FPSCR]);
  1903. }
  1904. #else
  1905. #define cpu_dump_state_ucf64(env, file, pr, flags) do { } while (0)
  1906. #endif
  1907. void uc32_cpu_dump_state(CPUState *cs, FILE *f,
  1908. fprintf_function cpu_fprintf, int flags)
  1909. {
  1910. UniCore32CPU *cpu = UNICORE32_CPU(cs);
  1911. CPUUniCore32State *env = &cpu->env;
  1912. int i;
  1913. uint32_t psr;
  1914. for (i = 0; i < 32; i++) {
  1915. cpu_fprintf(f, "R%02d=%08x", i, env->regs[i]);
  1916. if ((i % 4) == 3) {
  1917. cpu_fprintf(f, "\n");
  1918. } else {
  1919. cpu_fprintf(f, " ");
  1920. }
  1921. }
  1922. psr = cpu_asr_read(env);
  1923. cpu_fprintf(f, "PSR=%08x %c%c%c%c %s\n",
  1924. psr,
  1925. psr & (1 << 31) ? 'N' : '-',
  1926. psr & (1 << 30) ? 'Z' : '-',
  1927. psr & (1 << 29) ? 'C' : '-',
  1928. psr & (1 << 28) ? 'V' : '-',
  1929. cpu_mode_names[psr & 0xf]);
  1930. cpu_dump_state_ucf64(env, f, cpu_fprintf, flags);
  1931. }
  1932. void restore_state_to_opc(CPUUniCore32State *env, TranslationBlock *tb,
  1933. target_ulong *data)
  1934. {
  1935. env->regs[31] = data[0];
  1936. }