cpu-all.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146
  1. /*
  2. * defines common to all virtual CPUs
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, write to the Free Software
  18. * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
  19. */
  20. #ifndef CPU_ALL_H
  21. #define CPU_ALL_H
  22. #include "qemu-common.h"
  23. #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__)
  24. #define WORDS_ALIGNED
  25. #endif
  26. /* some important defines:
  27. *
  28. * WORDS_ALIGNED : if defined, the host cpu can only make word aligned
  29. * memory accesses.
  30. *
  31. * WORDS_BIGENDIAN : if defined, the host cpu is big endian and
  32. * otherwise little endian.
  33. *
  34. * (TARGET_WORDS_ALIGNED : same for target cpu (not supported yet))
  35. *
  36. * TARGET_WORDS_BIGENDIAN : same for target cpu
  37. */
  38. #include "bswap.h"
  39. #include "softfloat.h"
  40. #if defined(WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
  41. #define BSWAP_NEEDED
  42. #endif
  43. #ifdef BSWAP_NEEDED
  44. static inline uint16_t tswap16(uint16_t s)
  45. {
  46. return bswap16(s);
  47. }
  48. static inline uint32_t tswap32(uint32_t s)
  49. {
  50. return bswap32(s);
  51. }
  52. static inline uint64_t tswap64(uint64_t s)
  53. {
  54. return bswap64(s);
  55. }
  56. static inline void tswap16s(uint16_t *s)
  57. {
  58. *s = bswap16(*s);
  59. }
  60. static inline void tswap32s(uint32_t *s)
  61. {
  62. *s = bswap32(*s);
  63. }
  64. static inline void tswap64s(uint64_t *s)
  65. {
  66. *s = bswap64(*s);
  67. }
  68. #else
  69. static inline uint16_t tswap16(uint16_t s)
  70. {
  71. return s;
  72. }
  73. static inline uint32_t tswap32(uint32_t s)
  74. {
  75. return s;
  76. }
  77. static inline uint64_t tswap64(uint64_t s)
  78. {
  79. return s;
  80. }
  81. static inline void tswap16s(uint16_t *s)
  82. {
  83. }
  84. static inline void tswap32s(uint32_t *s)
  85. {
  86. }
  87. static inline void tswap64s(uint64_t *s)
  88. {
  89. }
  90. #endif
  91. #if TARGET_LONG_SIZE == 4
  92. #define tswapl(s) tswap32(s)
  93. #define tswapls(s) tswap32s((uint32_t *)(s))
  94. #define bswaptls(s) bswap32s(s)
  95. #else
  96. #define tswapl(s) tswap64(s)
  97. #define tswapls(s) tswap64s((uint64_t *)(s))
  98. #define bswaptls(s) bswap64s(s)
  99. #endif
  100. typedef union {
  101. float32 f;
  102. uint32_t l;
  103. } CPU_FloatU;
  104. /* NOTE: arm FPA is horrible as double 32 bit words are stored in big
  105. endian ! */
  106. typedef union {
  107. float64 d;
  108. #if defined(WORDS_BIGENDIAN) \
  109. || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
  110. struct {
  111. uint32_t upper;
  112. uint32_t lower;
  113. } l;
  114. #else
  115. struct {
  116. uint32_t lower;
  117. uint32_t upper;
  118. } l;
  119. #endif
  120. uint64_t ll;
  121. } CPU_DoubleU;
  122. #ifdef TARGET_SPARC
  123. typedef union {
  124. float128 q;
  125. #if defined(WORDS_BIGENDIAN) \
  126. || (defined(__arm__) && !defined(__VFP_FP__) && !defined(CONFIG_SOFTFLOAT))
  127. struct {
  128. uint32_t upmost;
  129. uint32_t upper;
  130. uint32_t lower;
  131. uint32_t lowest;
  132. } l;
  133. struct {
  134. uint64_t upper;
  135. uint64_t lower;
  136. } ll;
  137. #else
  138. struct {
  139. uint32_t lowest;
  140. uint32_t lower;
  141. uint32_t upper;
  142. uint32_t upmost;
  143. } l;
  144. struct {
  145. uint64_t lower;
  146. uint64_t upper;
  147. } ll;
  148. #endif
  149. } CPU_QuadU;
  150. #endif
  151. /* CPU memory access without any memory or io remapping */
  152. /*
  153. * the generic syntax for the memory accesses is:
  154. *
  155. * load: ld{type}{sign}{size}{endian}_{access_type}(ptr)
  156. *
  157. * store: st{type}{size}{endian}_{access_type}(ptr, val)
  158. *
  159. * type is:
  160. * (empty): integer access
  161. * f : float access
  162. *
  163. * sign is:
  164. * (empty): for floats or 32 bit size
  165. * u : unsigned
  166. * s : signed
  167. *
  168. * size is:
  169. * b: 8 bits
  170. * w: 16 bits
  171. * l: 32 bits
  172. * q: 64 bits
  173. *
  174. * endian is:
  175. * (empty): target cpu endianness or 8 bit access
  176. * r : reversed target cpu endianness (not implemented yet)
  177. * be : big endian (not implemented yet)
  178. * le : little endian (not implemented yet)
  179. *
  180. * access_type is:
  181. * raw : host memory access
  182. * user : user mode access using soft MMU
  183. * kernel : kernel mode access using soft MMU
  184. */
  185. static inline int ldub_p(const void *ptr)
  186. {
  187. return *(uint8_t *)ptr;
  188. }
  189. static inline int ldsb_p(const void *ptr)
  190. {
  191. return *(int8_t *)ptr;
  192. }
  193. static inline void stb_p(void *ptr, int v)
  194. {
  195. *(uint8_t *)ptr = v;
  196. }
  197. /* NOTE: on arm, putting 2 in /proc/sys/debug/alignment so that the
  198. kernel handles unaligned load/stores may give better results, but
  199. it is a system wide setting : bad */
  200. #if defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
  201. /* conservative code for little endian unaligned accesses */
  202. static inline int lduw_le_p(const void *ptr)
  203. {
  204. #ifdef _ARCH_PPC
  205. int val;
  206. __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
  207. return val;
  208. #else
  209. const uint8_t *p = ptr;
  210. return p[0] | (p[1] << 8);
  211. #endif
  212. }
  213. static inline int ldsw_le_p(const void *ptr)
  214. {
  215. #ifdef _ARCH_PPC
  216. int val;
  217. __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (ptr));
  218. return (int16_t)val;
  219. #else
  220. const uint8_t *p = ptr;
  221. return (int16_t)(p[0] | (p[1] << 8));
  222. #endif
  223. }
  224. static inline int ldl_le_p(const void *ptr)
  225. {
  226. #ifdef _ARCH_PPC
  227. int val;
  228. __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (ptr));
  229. return val;
  230. #else
  231. const uint8_t *p = ptr;
  232. return p[0] | (p[1] << 8) | (p[2] << 16) | (p[3] << 24);
  233. #endif
  234. }
  235. static inline uint64_t ldq_le_p(const void *ptr)
  236. {
  237. const uint8_t *p = ptr;
  238. uint32_t v1, v2;
  239. v1 = ldl_le_p(p);
  240. v2 = ldl_le_p(p + 4);
  241. return v1 | ((uint64_t)v2 << 32);
  242. }
  243. static inline void stw_le_p(void *ptr, int v)
  244. {
  245. #ifdef _ARCH_PPC
  246. __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*(uint16_t *)ptr) : "r" (v), "r" (ptr));
  247. #else
  248. uint8_t *p = ptr;
  249. p[0] = v;
  250. p[1] = v >> 8;
  251. #endif
  252. }
  253. static inline void stl_le_p(void *ptr, int v)
  254. {
  255. #ifdef _ARCH_PPC
  256. __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*(uint32_t *)ptr) : "r" (v), "r" (ptr));
  257. #else
  258. uint8_t *p = ptr;
  259. p[0] = v;
  260. p[1] = v >> 8;
  261. p[2] = v >> 16;
  262. p[3] = v >> 24;
  263. #endif
  264. }
  265. static inline void stq_le_p(void *ptr, uint64_t v)
  266. {
  267. uint8_t *p = ptr;
  268. stl_le_p(p, (uint32_t)v);
  269. stl_le_p(p + 4, v >> 32);
  270. }
  271. /* float access */
  272. static inline float32 ldfl_le_p(const void *ptr)
  273. {
  274. union {
  275. float32 f;
  276. uint32_t i;
  277. } u;
  278. u.i = ldl_le_p(ptr);
  279. return u.f;
  280. }
  281. static inline void stfl_le_p(void *ptr, float32 v)
  282. {
  283. union {
  284. float32 f;
  285. uint32_t i;
  286. } u;
  287. u.f = v;
  288. stl_le_p(ptr, u.i);
  289. }
  290. static inline float64 ldfq_le_p(const void *ptr)
  291. {
  292. CPU_DoubleU u;
  293. u.l.lower = ldl_le_p(ptr);
  294. u.l.upper = ldl_le_p(ptr + 4);
  295. return u.d;
  296. }
  297. static inline void stfq_le_p(void *ptr, float64 v)
  298. {
  299. CPU_DoubleU u;
  300. u.d = v;
  301. stl_le_p(ptr, u.l.lower);
  302. stl_le_p(ptr + 4, u.l.upper);
  303. }
  304. #else
  305. static inline int lduw_le_p(const void *ptr)
  306. {
  307. return *(uint16_t *)ptr;
  308. }
  309. static inline int ldsw_le_p(const void *ptr)
  310. {
  311. return *(int16_t *)ptr;
  312. }
  313. static inline int ldl_le_p(const void *ptr)
  314. {
  315. return *(uint32_t *)ptr;
  316. }
  317. static inline uint64_t ldq_le_p(const void *ptr)
  318. {
  319. return *(uint64_t *)ptr;
  320. }
  321. static inline void stw_le_p(void *ptr, int v)
  322. {
  323. *(uint16_t *)ptr = v;
  324. }
  325. static inline void stl_le_p(void *ptr, int v)
  326. {
  327. *(uint32_t *)ptr = v;
  328. }
  329. static inline void stq_le_p(void *ptr, uint64_t v)
  330. {
  331. *(uint64_t *)ptr = v;
  332. }
  333. /* float access */
  334. static inline float32 ldfl_le_p(const void *ptr)
  335. {
  336. return *(float32 *)ptr;
  337. }
  338. static inline float64 ldfq_le_p(const void *ptr)
  339. {
  340. return *(float64 *)ptr;
  341. }
  342. static inline void stfl_le_p(void *ptr, float32 v)
  343. {
  344. *(float32 *)ptr = v;
  345. }
  346. static inline void stfq_le_p(void *ptr, float64 v)
  347. {
  348. *(float64 *)ptr = v;
  349. }
  350. #endif
  351. #if !defined(WORDS_BIGENDIAN) || defined(WORDS_ALIGNED)
  352. static inline int lduw_be_p(const void *ptr)
  353. {
  354. #if defined(__i386__)
  355. int val;
  356. asm volatile ("movzwl %1, %0\n"
  357. "xchgb %b0, %h0\n"
  358. : "=q" (val)
  359. : "m" (*(uint16_t *)ptr));
  360. return val;
  361. #else
  362. const uint8_t *b = ptr;
  363. return ((b[0] << 8) | b[1]);
  364. #endif
  365. }
  366. static inline int ldsw_be_p(const void *ptr)
  367. {
  368. #if defined(__i386__)
  369. int val;
  370. asm volatile ("movzwl %1, %0\n"
  371. "xchgb %b0, %h0\n"
  372. : "=q" (val)
  373. : "m" (*(uint16_t *)ptr));
  374. return (int16_t)val;
  375. #else
  376. const uint8_t *b = ptr;
  377. return (int16_t)((b[0] << 8) | b[1]);
  378. #endif
  379. }
  380. static inline int ldl_be_p(const void *ptr)
  381. {
  382. #if defined(__i386__) || defined(__x86_64__)
  383. int val;
  384. asm volatile ("movl %1, %0\n"
  385. "bswap %0\n"
  386. : "=r" (val)
  387. : "m" (*(uint32_t *)ptr));
  388. return val;
  389. #else
  390. const uint8_t *b = ptr;
  391. return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3];
  392. #endif
  393. }
  394. static inline uint64_t ldq_be_p(const void *ptr)
  395. {
  396. uint32_t a,b;
  397. a = ldl_be_p(ptr);
  398. b = ldl_be_p((uint8_t *)ptr + 4);
  399. return (((uint64_t)a<<32)|b);
  400. }
  401. static inline void stw_be_p(void *ptr, int v)
  402. {
  403. #if defined(__i386__)
  404. asm volatile ("xchgb %b0, %h0\n"
  405. "movw %w0, %1\n"
  406. : "=q" (v)
  407. : "m" (*(uint16_t *)ptr), "0" (v));
  408. #else
  409. uint8_t *d = (uint8_t *) ptr;
  410. d[0] = v >> 8;
  411. d[1] = v;
  412. #endif
  413. }
  414. static inline void stl_be_p(void *ptr, int v)
  415. {
  416. #if defined(__i386__) || defined(__x86_64__)
  417. asm volatile ("bswap %0\n"
  418. "movl %0, %1\n"
  419. : "=r" (v)
  420. : "m" (*(uint32_t *)ptr), "0" (v));
  421. #else
  422. uint8_t *d = (uint8_t *) ptr;
  423. d[0] = v >> 24;
  424. d[1] = v >> 16;
  425. d[2] = v >> 8;
  426. d[3] = v;
  427. #endif
  428. }
  429. static inline void stq_be_p(void *ptr, uint64_t v)
  430. {
  431. stl_be_p(ptr, v >> 32);
  432. stl_be_p((uint8_t *)ptr + 4, v);
  433. }
  434. /* float access */
  435. static inline float32 ldfl_be_p(const void *ptr)
  436. {
  437. union {
  438. float32 f;
  439. uint32_t i;
  440. } u;
  441. u.i = ldl_be_p(ptr);
  442. return u.f;
  443. }
  444. static inline void stfl_be_p(void *ptr, float32 v)
  445. {
  446. union {
  447. float32 f;
  448. uint32_t i;
  449. } u;
  450. u.f = v;
  451. stl_be_p(ptr, u.i);
  452. }
  453. static inline float64 ldfq_be_p(const void *ptr)
  454. {
  455. CPU_DoubleU u;
  456. u.l.upper = ldl_be_p(ptr);
  457. u.l.lower = ldl_be_p((uint8_t *)ptr + 4);
  458. return u.d;
  459. }
  460. static inline void stfq_be_p(void *ptr, float64 v)
  461. {
  462. CPU_DoubleU u;
  463. u.d = v;
  464. stl_be_p(ptr, u.l.upper);
  465. stl_be_p((uint8_t *)ptr + 4, u.l.lower);
  466. }
  467. #else
  468. static inline int lduw_be_p(const void *ptr)
  469. {
  470. return *(uint16_t *)ptr;
  471. }
  472. static inline int ldsw_be_p(const void *ptr)
  473. {
  474. return *(int16_t *)ptr;
  475. }
  476. static inline int ldl_be_p(const void *ptr)
  477. {
  478. return *(uint32_t *)ptr;
  479. }
  480. static inline uint64_t ldq_be_p(const void *ptr)
  481. {
  482. return *(uint64_t *)ptr;
  483. }
  484. static inline void stw_be_p(void *ptr, int v)
  485. {
  486. *(uint16_t *)ptr = v;
  487. }
  488. static inline void stl_be_p(void *ptr, int v)
  489. {
  490. *(uint32_t *)ptr = v;
  491. }
  492. static inline void stq_be_p(void *ptr, uint64_t v)
  493. {
  494. *(uint64_t *)ptr = v;
  495. }
  496. /* float access */
  497. static inline float32 ldfl_be_p(const void *ptr)
  498. {
  499. return *(float32 *)ptr;
  500. }
  501. static inline float64 ldfq_be_p(const void *ptr)
  502. {
  503. return *(float64 *)ptr;
  504. }
  505. static inline void stfl_be_p(void *ptr, float32 v)
  506. {
  507. *(float32 *)ptr = v;
  508. }
  509. static inline void stfq_be_p(void *ptr, float64 v)
  510. {
  511. *(float64 *)ptr = v;
  512. }
  513. #endif
  514. /* target CPU memory access functions */
  515. #if defined(TARGET_WORDS_BIGENDIAN)
  516. #define lduw_p(p) lduw_be_p(p)
  517. #define ldsw_p(p) ldsw_be_p(p)
  518. #define ldl_p(p) ldl_be_p(p)
  519. #define ldq_p(p) ldq_be_p(p)
  520. #define ldfl_p(p) ldfl_be_p(p)
  521. #define ldfq_p(p) ldfq_be_p(p)
  522. #define stw_p(p, v) stw_be_p(p, v)
  523. #define stl_p(p, v) stl_be_p(p, v)
  524. #define stq_p(p, v) stq_be_p(p, v)
  525. #define stfl_p(p, v) stfl_be_p(p, v)
  526. #define stfq_p(p, v) stfq_be_p(p, v)
  527. #else
  528. #define lduw_p(p) lduw_le_p(p)
  529. #define ldsw_p(p) ldsw_le_p(p)
  530. #define ldl_p(p) ldl_le_p(p)
  531. #define ldq_p(p) ldq_le_p(p)
  532. #define ldfl_p(p) ldfl_le_p(p)
  533. #define ldfq_p(p) ldfq_le_p(p)
  534. #define stw_p(p, v) stw_le_p(p, v)
  535. #define stl_p(p, v) stl_le_p(p, v)
  536. #define stq_p(p, v) stq_le_p(p, v)
  537. #define stfl_p(p, v) stfl_le_p(p, v)
  538. #define stfq_p(p, v) stfq_le_p(p, v)
  539. #endif
  540. /* MMU memory access macros */
  541. #if defined(CONFIG_USER_ONLY)
  542. #include <assert.h>
  543. #include "qemu-types.h"
  544. /* On some host systems the guest address space is reserved on the host.
  545. * This allows the guest address space to be offset to a convenient location.
  546. */
  547. //#define GUEST_BASE 0x20000000
  548. #define GUEST_BASE 0
  549. /* All direct uses of g2h and h2g need to go away for usermode softmmu. */
  550. #define g2h(x) ((void *)((unsigned long)(x) + GUEST_BASE))
  551. #define h2g(x) ({ \
  552. unsigned long __ret = (unsigned long)(x) - GUEST_BASE; \
  553. /* Check if given address fits target address space */ \
  554. assert(__ret == (abi_ulong)__ret); \
  555. (abi_ulong)__ret; \
  556. })
  557. #define h2g_valid(x) ({ \
  558. unsigned long __guest = (unsigned long)(x) - GUEST_BASE; \
  559. (__guest == (abi_ulong)__guest); \
  560. })
  561. #define saddr(x) g2h(x)
  562. #define laddr(x) g2h(x)
  563. #else /* !CONFIG_USER_ONLY */
  564. /* NOTE: we use double casts if pointers and target_ulong have
  565. different sizes */
  566. #define saddr(x) (uint8_t *)(long)(x)
  567. #define laddr(x) (uint8_t *)(long)(x)
  568. #endif
  569. #define ldub_raw(p) ldub_p(laddr((p)))
  570. #define ldsb_raw(p) ldsb_p(laddr((p)))
  571. #define lduw_raw(p) lduw_p(laddr((p)))
  572. #define ldsw_raw(p) ldsw_p(laddr((p)))
  573. #define ldl_raw(p) ldl_p(laddr((p)))
  574. #define ldq_raw(p) ldq_p(laddr((p)))
  575. #define ldfl_raw(p) ldfl_p(laddr((p)))
  576. #define ldfq_raw(p) ldfq_p(laddr((p)))
  577. #define stb_raw(p, v) stb_p(saddr((p)), v)
  578. #define stw_raw(p, v) stw_p(saddr((p)), v)
  579. #define stl_raw(p, v) stl_p(saddr((p)), v)
  580. #define stq_raw(p, v) stq_p(saddr((p)), v)
  581. #define stfl_raw(p, v) stfl_p(saddr((p)), v)
  582. #define stfq_raw(p, v) stfq_p(saddr((p)), v)
  583. #if defined(CONFIG_USER_ONLY)
  584. /* if user mode, no other memory access functions */
  585. #define ldub(p) ldub_raw(p)
  586. #define ldsb(p) ldsb_raw(p)
  587. #define lduw(p) lduw_raw(p)
  588. #define ldsw(p) ldsw_raw(p)
  589. #define ldl(p) ldl_raw(p)
  590. #define ldq(p) ldq_raw(p)
  591. #define ldfl(p) ldfl_raw(p)
  592. #define ldfq(p) ldfq_raw(p)
  593. #define stb(p, v) stb_raw(p, v)
  594. #define stw(p, v) stw_raw(p, v)
  595. #define stl(p, v) stl_raw(p, v)
  596. #define stq(p, v) stq_raw(p, v)
  597. #define stfl(p, v) stfl_raw(p, v)
  598. #define stfq(p, v) stfq_raw(p, v)
  599. #define ldub_code(p) ldub_raw(p)
  600. #define ldsb_code(p) ldsb_raw(p)
  601. #define lduw_code(p) lduw_raw(p)
  602. #define ldsw_code(p) ldsw_raw(p)
  603. #define ldl_code(p) ldl_raw(p)
  604. #define ldq_code(p) ldq_raw(p)
  605. #define ldub_kernel(p) ldub_raw(p)
  606. #define ldsb_kernel(p) ldsb_raw(p)
  607. #define lduw_kernel(p) lduw_raw(p)
  608. #define ldsw_kernel(p) ldsw_raw(p)
  609. #define ldl_kernel(p) ldl_raw(p)
  610. #define ldq_kernel(p) ldq_raw(p)
  611. #define ldfl_kernel(p) ldfl_raw(p)
  612. #define ldfq_kernel(p) ldfq_raw(p)
  613. #define stb_kernel(p, v) stb_raw(p, v)
  614. #define stw_kernel(p, v) stw_raw(p, v)
  615. #define stl_kernel(p, v) stl_raw(p, v)
  616. #define stq_kernel(p, v) stq_raw(p, v)
  617. #define stfl_kernel(p, v) stfl_raw(p, v)
  618. #define stfq_kernel(p, vt) stfq_raw(p, v)
  619. #endif /* defined(CONFIG_USER_ONLY) */
  620. /* page related stuff */
  621. #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
  622. #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
  623. #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
  624. /* ??? These should be the larger of unsigned long and target_ulong. */
  625. extern unsigned long qemu_real_host_page_size;
  626. extern unsigned long qemu_host_page_bits;
  627. extern unsigned long qemu_host_page_size;
  628. extern unsigned long qemu_host_page_mask;
  629. #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
  630. /* same as PROT_xxx */
  631. #define PAGE_READ 0x0001
  632. #define PAGE_WRITE 0x0002
  633. #define PAGE_EXEC 0x0004
  634. #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
  635. #define PAGE_VALID 0x0008
  636. /* original state of the write flag (used when tracking self-modifying
  637. code */
  638. #define PAGE_WRITE_ORG 0x0010
  639. #define PAGE_RESERVED 0x0020
  640. void page_dump(FILE *f);
  641. int page_get_flags(target_ulong address);
  642. void page_set_flags(target_ulong start, target_ulong end, int flags);
  643. int page_check_range(target_ulong start, target_ulong len, int flags);
  644. void cpu_exec_init_all(unsigned long tb_size);
  645. CPUState *cpu_copy(CPUState *env);
  646. void cpu_dump_state(CPUState *env, FILE *f,
  647. int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
  648. int flags);
  649. void cpu_dump_statistics (CPUState *env, FILE *f,
  650. int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
  651. int flags);
  652. void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
  653. __attribute__ ((__format__ (__printf__, 2, 3)));
  654. extern CPUState *first_cpu;
  655. extern CPUState *cpu_single_env;
  656. extern int64_t qemu_icount;
  657. extern int use_icount;
  658. #define CPU_INTERRUPT_EXIT 0x01 /* wants exit from main loop */
  659. #define CPU_INTERRUPT_HARD 0x02 /* hardware interrupt pending */
  660. #define CPU_INTERRUPT_EXITTB 0x04 /* exit the current TB (use for x86 a20 case) */
  661. #define CPU_INTERRUPT_TIMER 0x08 /* internal timer exception pending */
  662. #define CPU_INTERRUPT_FIQ 0x10 /* Fast interrupt pending. */
  663. #define CPU_INTERRUPT_HALT 0x20 /* CPU halt wanted */
  664. #define CPU_INTERRUPT_SMI 0x40 /* (x86 only) SMI interrupt pending */
  665. #define CPU_INTERRUPT_DEBUG 0x80 /* Debug event occured. */
  666. #define CPU_INTERRUPT_VIRQ 0x100 /* virtual interrupt pending. */
  667. #define CPU_INTERRUPT_NMI 0x200 /* NMI pending. */
  668. void cpu_interrupt(CPUState *s, int mask);
  669. void cpu_reset_interrupt(CPUState *env, int mask);
  670. /* Breakpoint/watchpoint flags */
  671. #define BP_MEM_READ 0x01
  672. #define BP_MEM_WRITE 0x02
  673. #define BP_MEM_ACCESS (BP_MEM_READ | BP_MEM_WRITE)
  674. #define BP_STOP_BEFORE_ACCESS 0x04
  675. #define BP_WATCHPOINT_HIT 0x08
  676. #define BP_GDB 0x10
  677. #define BP_CPU 0x20
  678. int cpu_breakpoint_insert(CPUState *env, target_ulong pc, int flags,
  679. CPUBreakpoint **breakpoint);
  680. int cpu_breakpoint_remove(CPUState *env, target_ulong pc, int flags);
  681. void cpu_breakpoint_remove_by_ref(CPUState *env, CPUBreakpoint *breakpoint);
  682. void cpu_breakpoint_remove_all(CPUState *env, int mask);
  683. int cpu_watchpoint_insert(CPUState *env, target_ulong addr, target_ulong len,
  684. int flags, CPUWatchpoint **watchpoint);
  685. int cpu_watchpoint_remove(CPUState *env, target_ulong addr,
  686. target_ulong len, int flags);
  687. void cpu_watchpoint_remove_by_ref(CPUState *env, CPUWatchpoint *watchpoint);
  688. void cpu_watchpoint_remove_all(CPUState *env, int mask);
  689. #define SSTEP_ENABLE 0x1 /* Enable simulated HW single stepping */
  690. #define SSTEP_NOIRQ 0x2 /* Do not use IRQ while single stepping */
  691. #define SSTEP_NOTIMER 0x4 /* Do not Timers while single stepping */
  692. void cpu_single_step(CPUState *env, int enabled);
  693. void cpu_reset(CPUState *s);
  694. /* Return the physical page corresponding to a virtual one. Use it
  695. only for debugging because no protection checks are done. Return -1
  696. if no page found. */
  697. target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
  698. #define CPU_LOG_TB_OUT_ASM (1 << 0)
  699. #define CPU_LOG_TB_IN_ASM (1 << 1)
  700. #define CPU_LOG_TB_OP (1 << 2)
  701. #define CPU_LOG_TB_OP_OPT (1 << 3)
  702. #define CPU_LOG_INT (1 << 4)
  703. #define CPU_LOG_EXEC (1 << 5)
  704. #define CPU_LOG_PCALL (1 << 6)
  705. #define CPU_LOG_IOPORT (1 << 7)
  706. #define CPU_LOG_TB_CPU (1 << 8)
  707. #define CPU_LOG_RESET (1 << 9)
  708. /* define log items */
  709. typedef struct CPULogItem {
  710. int mask;
  711. const char *name;
  712. const char *help;
  713. } CPULogItem;
  714. extern const CPULogItem cpu_log_items[];
  715. void cpu_set_log(int log_flags);
  716. void cpu_set_log_filename(const char *filename);
  717. int cpu_str_to_log_mask(const char *str);
  718. /* IO ports API */
  719. /* NOTE: as these functions may be even used when there is an isa
  720. brige on non x86 targets, we always defined them */
  721. #ifndef NO_CPU_IO_DEFS
  722. void cpu_outb(CPUState *env, int addr, int val);
  723. void cpu_outw(CPUState *env, int addr, int val);
  724. void cpu_outl(CPUState *env, int addr, int val);
  725. int cpu_inb(CPUState *env, int addr);
  726. int cpu_inw(CPUState *env, int addr);
  727. int cpu_inl(CPUState *env, int addr);
  728. #endif
  729. /* address in the RAM (different from a physical address) */
  730. #ifdef USE_KQEMU
  731. typedef uint32_t ram_addr_t;
  732. #else
  733. typedef unsigned long ram_addr_t;
  734. #endif
  735. /* memory API */
  736. extern ram_addr_t phys_ram_size;
  737. extern int phys_ram_fd;
  738. extern uint8_t *phys_ram_base;
  739. extern uint8_t *phys_ram_dirty;
  740. extern ram_addr_t ram_size;
  741. /* physical memory access */
  742. /* MMIO pages are identified by a combination of an IO device index and
  743. 3 flags. The ROMD code stores the page ram offset in iotlb entry,
  744. so only a limited number of ids are avaiable. */
  745. #define IO_MEM_SHIFT 3
  746. #define IO_MEM_NB_ENTRIES (1 << (TARGET_PAGE_BITS - IO_MEM_SHIFT))
  747. #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
  748. #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
  749. #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
  750. #define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
  751. /* Acts like a ROM when read and like a device when written. */
  752. #define IO_MEM_ROMD (1)
  753. #define IO_MEM_SUBPAGE (2)
  754. #define IO_MEM_SUBWIDTH (4)
  755. /* Flags stored in the low bits of the TLB virtual address. These are
  756. defined so that fast path ram access is all zeros. */
  757. /* Zero if TLB entry is valid. */
  758. #define TLB_INVALID_MASK (1 << 3)
  759. /* Set if TLB entry references a clean RAM page. The iotlb entry will
  760. contain the page physical address. */
  761. #define TLB_NOTDIRTY (1 << 4)
  762. /* Set if TLB entry is an IO callback. */
  763. #define TLB_MMIO (1 << 5)
  764. typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
  765. typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
  766. void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
  767. ram_addr_t size,
  768. ram_addr_t phys_offset,
  769. ram_addr_t region_offset);
  770. static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
  771. ram_addr_t size,
  772. ram_addr_t phys_offset)
  773. {
  774. cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
  775. }
  776. ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
  777. ram_addr_t qemu_ram_alloc(ram_addr_t);
  778. void qemu_ram_free(ram_addr_t addr);
  779. int cpu_register_io_memory(int io_index,
  780. CPUReadMemoryFunc **mem_read,
  781. CPUWriteMemoryFunc **mem_write,
  782. void *opaque);
  783. void cpu_unregister_io_memory(int table_address);
  784. CPUWriteMemoryFunc **cpu_get_io_memory_write(int io_index);
  785. CPUReadMemoryFunc **cpu_get_io_memory_read(int io_index);
  786. void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
  787. int len, int is_write);
  788. static inline void cpu_physical_memory_read(target_phys_addr_t addr,
  789. uint8_t *buf, int len)
  790. {
  791. cpu_physical_memory_rw(addr, buf, len, 0);
  792. }
  793. static inline void cpu_physical_memory_write(target_phys_addr_t addr,
  794. const uint8_t *buf, int len)
  795. {
  796. cpu_physical_memory_rw(addr, (uint8_t *)buf, len, 1);
  797. }
  798. void *cpu_physical_memory_map(target_phys_addr_t addr,
  799. target_phys_addr_t *plen,
  800. int is_write);
  801. void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
  802. int is_write, target_phys_addr_t access_len);
  803. void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
  804. void cpu_unregister_map_client(void *cookie);
  805. uint32_t ldub_phys(target_phys_addr_t addr);
  806. uint32_t lduw_phys(target_phys_addr_t addr);
  807. uint32_t ldl_phys(target_phys_addr_t addr);
  808. uint64_t ldq_phys(target_phys_addr_t addr);
  809. void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
  810. void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
  811. void stb_phys(target_phys_addr_t addr, uint32_t val);
  812. void stw_phys(target_phys_addr_t addr, uint32_t val);
  813. void stl_phys(target_phys_addr_t addr, uint32_t val);
  814. void stq_phys(target_phys_addr_t addr, uint64_t val);
  815. void cpu_physical_memory_write_rom(target_phys_addr_t addr,
  816. const uint8_t *buf, int len);
  817. int cpu_memory_rw_debug(CPUState *env, target_ulong addr,
  818. uint8_t *buf, int len, int is_write);
  819. #define VGA_DIRTY_FLAG 0x01
  820. #define CODE_DIRTY_FLAG 0x02
  821. #define KQEMU_DIRTY_FLAG 0x04
  822. #define MIGRATION_DIRTY_FLAG 0x08
  823. /* read dirty bit (return 0 or 1) */
  824. static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
  825. {
  826. return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
  827. }
  828. static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
  829. int dirty_flags)
  830. {
  831. return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
  832. }
  833. static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
  834. {
  835. phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
  836. }
  837. void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
  838. int dirty_flags);
  839. void cpu_tlb_update_dirty(CPUState *env);
  840. int cpu_physical_memory_set_dirty_tracking(int enable);
  841. int cpu_physical_memory_get_dirty_tracking(void);
  842. void cpu_physical_sync_dirty_bitmap(target_phys_addr_t start_addr, target_phys_addr_t end_addr);
  843. void dump_exec_info(FILE *f,
  844. int (*cpu_fprintf)(FILE *f, const char *fmt, ...));
  845. /* Coalesced MMIO regions are areas where write operations can be reordered.
  846. * This usually implies that write operations are side-effect free. This allows
  847. * batching which can make a major impact on performance when using
  848. * virtualization.
  849. */
  850. void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
  851. void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
  852. /*******************************************/
  853. /* host CPU ticks (if available) */
  854. #if defined(_ARCH_PPC)
  855. static inline int64_t cpu_get_real_ticks(void)
  856. {
  857. int64_t retval;
  858. #ifdef _ARCH_PPC64
  859. /* This reads timebase in one 64bit go and includes Cell workaround from:
  860. http://ozlabs.org/pipermail/linuxppc-dev/2006-October/027052.html
  861. */
  862. __asm__ __volatile__ (
  863. "mftb %0\n\t"
  864. "cmpwi %0,0\n\t"
  865. "beq- $-8"
  866. : "=r" (retval));
  867. #else
  868. /* http://ozlabs.org/pipermail/linuxppc-dev/1999-October/003889.html */
  869. unsigned long junk;
  870. __asm__ __volatile__ (
  871. "mftbu %1\n\t"
  872. "mftb %L0\n\t"
  873. "mftbu %0\n\t"
  874. "cmpw %0,%1\n\t"
  875. "bne $-16"
  876. : "=r" (retval), "=r" (junk));
  877. #endif
  878. return retval;
  879. }
  880. #elif defined(__i386__)
  881. static inline int64_t cpu_get_real_ticks(void)
  882. {
  883. int64_t val;
  884. asm volatile ("rdtsc" : "=A" (val));
  885. return val;
  886. }
  887. #elif defined(__x86_64__)
  888. static inline int64_t cpu_get_real_ticks(void)
  889. {
  890. uint32_t low,high;
  891. int64_t val;
  892. asm volatile("rdtsc" : "=a" (low), "=d" (high));
  893. val = high;
  894. val <<= 32;
  895. val |= low;
  896. return val;
  897. }
  898. #elif defined(__hppa__)
  899. static inline int64_t cpu_get_real_ticks(void)
  900. {
  901. int val;
  902. asm volatile ("mfctl %%cr16, %0" : "=r"(val));
  903. return val;
  904. }
  905. #elif defined(__ia64)
  906. static inline int64_t cpu_get_real_ticks(void)
  907. {
  908. int64_t val;
  909. asm volatile ("mov %0 = ar.itc" : "=r"(val) :: "memory");
  910. return val;
  911. }
  912. #elif defined(__s390__)
  913. static inline int64_t cpu_get_real_ticks(void)
  914. {
  915. int64_t val;
  916. asm volatile("stck 0(%1)" : "=m" (val) : "a" (&val) : "cc");
  917. return val;
  918. }
  919. #elif defined(__sparc_v8plus__) || defined(__sparc_v8plusa__) || defined(__sparc_v9__)
  920. static inline int64_t cpu_get_real_ticks (void)
  921. {
  922. #if defined(_LP64)
  923. uint64_t rval;
  924. asm volatile("rd %%tick,%0" : "=r"(rval));
  925. return rval;
  926. #else
  927. union {
  928. uint64_t i64;
  929. struct {
  930. uint32_t high;
  931. uint32_t low;
  932. } i32;
  933. } rval;
  934. asm volatile("rd %%tick,%1; srlx %1,32,%0"
  935. : "=r"(rval.i32.high), "=r"(rval.i32.low));
  936. return rval.i64;
  937. #endif
  938. }
  939. #elif defined(__mips__)
  940. static inline int64_t cpu_get_real_ticks(void)
  941. {
  942. #if __mips_isa_rev >= 2
  943. uint32_t count;
  944. static uint32_t cyc_per_count = 0;
  945. if (!cyc_per_count)
  946. __asm__ __volatile__("rdhwr %0, $3" : "=r" (cyc_per_count));
  947. __asm__ __volatile__("rdhwr %1, $2" : "=r" (count));
  948. return (int64_t)(count * cyc_per_count);
  949. #else
  950. /* FIXME */
  951. static int64_t ticks = 0;
  952. return ticks++;
  953. #endif
  954. }
  955. #else
  956. /* The host CPU doesn't have an easily accessible cycle counter.
  957. Just return a monotonically increasing value. This will be
  958. totally wrong, but hopefully better than nothing. */
  959. static inline int64_t cpu_get_real_ticks (void)
  960. {
  961. static int64_t ticks = 0;
  962. return ticks++;
  963. }
  964. #endif
  965. /* profiling */
  966. #ifdef CONFIG_PROFILER
  967. static inline int64_t profile_getclock(void)
  968. {
  969. return cpu_get_real_ticks();
  970. }
  971. extern int64_t kqemu_time, kqemu_time_start;
  972. extern int64_t qemu_time, qemu_time_start;
  973. extern int64_t tlb_flush_time;
  974. extern int64_t kqemu_exec_count;
  975. extern int64_t dev_time;
  976. extern int64_t kqemu_ret_int_count;
  977. extern int64_t kqemu_ret_excp_count;
  978. extern int64_t kqemu_ret_intr_count;
  979. #endif
  980. #endif /* CPU_ALL_H */