cpu-all.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. /*
  2. * defines common to all virtual CPUs
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #ifndef CPU_ALL_H
  20. #define CPU_ALL_H
  21. #include "exec/page-protection.h"
  22. #include "exec/cpu-common.h"
  23. #include "exec/memory.h"
  24. #include "exec/tswap.h"
  25. #include "hw/core/cpu.h"
  26. /* some important defines:
  27. *
  28. * HOST_BIG_ENDIAN : whether the host cpu is big endian and
  29. * otherwise little endian.
  30. *
  31. * TARGET_BIG_ENDIAN : same for the target cpu
  32. */
  33. #if HOST_BIG_ENDIAN != TARGET_BIG_ENDIAN
  34. #define BSWAP_NEEDED
  35. #endif
  36. /* Target-endianness CPU memory access functions. These fit into the
  37. * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
  38. */
  39. #if TARGET_BIG_ENDIAN
  40. #define lduw_p(p) lduw_be_p(p)
  41. #define ldsw_p(p) ldsw_be_p(p)
  42. #define ldl_p(p) ldl_be_p(p)
  43. #define ldq_p(p) ldq_be_p(p)
  44. #define stw_p(p, v) stw_be_p(p, v)
  45. #define stl_p(p, v) stl_be_p(p, v)
  46. #define stq_p(p, v) stq_be_p(p, v)
  47. #define ldn_p(p, sz) ldn_be_p(p, sz)
  48. #define stn_p(p, sz, v) stn_be_p(p, sz, v)
  49. #else
  50. #define lduw_p(p) lduw_le_p(p)
  51. #define ldsw_p(p) ldsw_le_p(p)
  52. #define ldl_p(p) ldl_le_p(p)
  53. #define ldq_p(p) ldq_le_p(p)
  54. #define stw_p(p, v) stw_le_p(p, v)
  55. #define stl_p(p, v) stl_le_p(p, v)
  56. #define stq_p(p, v) stq_le_p(p, v)
  57. #define ldn_p(p, sz) ldn_le_p(p, sz)
  58. #define stn_p(p, sz, v) stn_le_p(p, sz, v)
  59. #endif
  60. /* MMU memory access macros */
  61. #if !defined(CONFIG_USER_ONLY)
  62. #include "exec/hwaddr.h"
  63. #define SUFFIX
  64. #define ARG1 as
  65. #define ARG1_DECL AddressSpace *as
  66. #define TARGET_ENDIANNESS
  67. #include "exec/memory_ldst.h.inc"
  68. #define SUFFIX _cached_slow
  69. #define ARG1 cache
  70. #define ARG1_DECL MemoryRegionCache *cache
  71. #define TARGET_ENDIANNESS
  72. #include "exec/memory_ldst.h.inc"
  73. static inline void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val)
  74. {
  75. address_space_stl_notdirty(as, addr, val,
  76. MEMTXATTRS_UNSPECIFIED, NULL);
  77. }
  78. #define SUFFIX
  79. #define ARG1 as
  80. #define ARG1_DECL AddressSpace *as
  81. #define TARGET_ENDIANNESS
  82. #include "exec/memory_ldst_phys.h.inc"
  83. /* Inline fast path for direct RAM access. */
  84. #define ENDIANNESS
  85. #include "exec/memory_ldst_cached.h.inc"
  86. #define SUFFIX _cached
  87. #define ARG1 cache
  88. #define ARG1_DECL MemoryRegionCache *cache
  89. #define TARGET_ENDIANNESS
  90. #include "exec/memory_ldst_phys.h.inc"
  91. #endif
  92. /* page related stuff */
  93. #include "exec/cpu-defs.h"
  94. #include "exec/target_page.h"
  95. CPUArchState *cpu_copy(CPUArchState *env);
  96. /* Flags for use in ENV->INTERRUPT_PENDING.
  97. The numbers assigned here are non-sequential in order to preserve
  98. binary compatibility with the vmstate dump. Bit 0 (0x0001) was
  99. previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
  100. the vmstate dump. */
  101. /* External hardware interrupt pending. This is typically used for
  102. interrupts from devices. */
  103. #define CPU_INTERRUPT_HARD 0x0002
  104. /* Exit the current TB. This is typically used when some system-level device
  105. makes some change to the memory mapping. E.g. the a20 line change. */
  106. #define CPU_INTERRUPT_EXITTB 0x0004
  107. /* Halt the CPU. */
  108. #define CPU_INTERRUPT_HALT 0x0020
  109. /* Debug event pending. */
  110. #define CPU_INTERRUPT_DEBUG 0x0080
  111. /* Reset signal. */
  112. #define CPU_INTERRUPT_RESET 0x0400
  113. /* Several target-specific external hardware interrupts. Each target/cpu.h
  114. should define proper names based on these defines. */
  115. #define CPU_INTERRUPT_TGT_EXT_0 0x0008
  116. #define CPU_INTERRUPT_TGT_EXT_1 0x0010
  117. #define CPU_INTERRUPT_TGT_EXT_2 0x0040
  118. #define CPU_INTERRUPT_TGT_EXT_3 0x0200
  119. #define CPU_INTERRUPT_TGT_EXT_4 0x1000
  120. /* Several target-specific internal interrupts. These differ from the
  121. preceding target-specific interrupts in that they are intended to
  122. originate from within the cpu itself, typically in response to some
  123. instruction being executed. These, therefore, are not masked while
  124. single-stepping within the debugger. */
  125. #define CPU_INTERRUPT_TGT_INT_0 0x0100
  126. #define CPU_INTERRUPT_TGT_INT_1 0x0800
  127. #define CPU_INTERRUPT_TGT_INT_2 0x2000
  128. /* First unused bit: 0x4000. */
  129. /* The set of all bits that should be masked when single-stepping. */
  130. #define CPU_INTERRUPT_SSTEP_MASK \
  131. (CPU_INTERRUPT_HARD \
  132. | CPU_INTERRUPT_TGT_EXT_0 \
  133. | CPU_INTERRUPT_TGT_EXT_1 \
  134. | CPU_INTERRUPT_TGT_EXT_2 \
  135. | CPU_INTERRUPT_TGT_EXT_3 \
  136. | CPU_INTERRUPT_TGT_EXT_4)
  137. #include "cpu.h"
  138. #ifdef CONFIG_USER_ONLY
  139. static inline int cpu_mmu_index(CPUState *cs, bool ifetch);
  140. /*
  141. * Allow some level of source compatibility with softmmu. We do not
  142. * support any of the more exotic features, so only invalid pages may
  143. * be signaled by probe_access_flags().
  144. */
  145. #define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
  146. #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 2))
  147. #define TLB_WATCHPOINT 0
  148. static inline int cpu_mmu_index(CPUState *cs, bool ifetch)
  149. {
  150. return MMU_USER_IDX;
  151. }
  152. #else
  153. /*
  154. * Flags stored in the low bits of the TLB virtual address.
  155. * These are defined so that fast path ram access is all zeros.
  156. * The flags all must be between TARGET_PAGE_BITS and
  157. * maximum address alignment bit.
  158. *
  159. * Use TARGET_PAGE_BITS_MIN so that these bits are constant
  160. * when TARGET_PAGE_BITS_VARY is in effect.
  161. *
  162. * The count, if not the placement of these bits is known
  163. * to tcg/tcg-op-ldst.c, check_max_alignment().
  164. */
  165. /* Zero if TLB entry is valid. */
  166. #define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS_MIN - 1))
  167. /* Set if TLB entry references a clean RAM page. The iotlb entry will
  168. contain the page physical address. */
  169. #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS_MIN - 2))
  170. /* Set if TLB entry is an IO callback. */
  171. #define TLB_MMIO (1 << (TARGET_PAGE_BITS_MIN - 3))
  172. /* Set if TLB entry writes ignored. */
  173. #define TLB_DISCARD_WRITE (1 << (TARGET_PAGE_BITS_MIN - 4))
  174. /* Set if the slow path must be used; more flags in CPUTLBEntryFull. */
  175. #define TLB_FORCE_SLOW (1 << (TARGET_PAGE_BITS_MIN - 5))
  176. /*
  177. * Use this mask to check interception with an alignment mask
  178. * in a TCG backend.
  179. */
  180. #define TLB_FLAGS_MASK \
  181. (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO \
  182. | TLB_FORCE_SLOW | TLB_DISCARD_WRITE)
  183. /*
  184. * Flags stored in CPUTLBEntryFull.slow_flags[x].
  185. * TLB_FORCE_SLOW must be set in CPUTLBEntry.addr_idx[x].
  186. */
  187. /* Set if TLB entry requires byte swap. */
  188. #define TLB_BSWAP (1 << 0)
  189. /* Set if TLB entry contains a watchpoint. */
  190. #define TLB_WATCHPOINT (1 << 1)
  191. /* Set if TLB entry requires aligned accesses. */
  192. #define TLB_CHECK_ALIGNED (1 << 2)
  193. #define TLB_SLOW_FLAGS_MASK (TLB_BSWAP | TLB_WATCHPOINT | TLB_CHECK_ALIGNED)
  194. /* The two sets of flags must not overlap. */
  195. QEMU_BUILD_BUG_ON(TLB_FLAGS_MASK & TLB_SLOW_FLAGS_MASK);
  196. /**
  197. * tlb_hit_page: return true if page aligned @addr is a hit against the
  198. * TLB entry @tlb_addr
  199. *
  200. * @addr: virtual address to test (must be page aligned)
  201. * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
  202. */
  203. static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr)
  204. {
  205. return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
  206. }
  207. /**
  208. * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
  209. *
  210. * @addr: virtual address to test (need not be page aligned)
  211. * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
  212. */
  213. static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
  214. {
  215. return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
  216. }
  217. #endif /* !CONFIG_USER_ONLY */
  218. /* Validate correct placement of CPUArchState. */
  219. QEMU_BUILD_BUG_ON(offsetof(ArchCPU, parent_obj) != 0);
  220. QEMU_BUILD_BUG_ON(offsetof(ArchCPU, env) != sizeof(CPUState));
  221. #endif /* CPU_ALL_H */