exec-all.h 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /*
  2. * internal execution defines for qemu
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #ifndef EXEC_ALL_H
  20. #define EXEC_ALL_H
  21. #include "cpu.h"
  22. #if defined(CONFIG_USER_ONLY)
  23. #include "exec/cpu_ldst.h"
  24. #endif
  25. #include "exec/mmu-access-type.h"
  26. #include "exec/translation-block.h"
  27. #if defined(CONFIG_TCG)
  28. /**
  29. * probe_access:
  30. * @env: CPUArchState
  31. * @addr: guest virtual address to look up
  32. * @size: size of the access
  33. * @access_type: read, write or execute permission
  34. * @mmu_idx: MMU index to use for lookup
  35. * @retaddr: return address for unwinding
  36. *
  37. * Look up the guest virtual address @addr. Raise an exception if the
  38. * page does not satisfy @access_type. Raise an exception if the
  39. * access (@addr, @size) hits a watchpoint. For writes, mark a clean
  40. * page as dirty.
  41. *
  42. * Finally, return the host address for a page that is backed by RAM,
  43. * or NULL if the page requires I/O.
  44. */
  45. void *probe_access(CPUArchState *env, vaddr addr, int size,
  46. MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
  47. static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
  48. int mmu_idx, uintptr_t retaddr)
  49. {
  50. return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
  51. }
  52. static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
  53. int mmu_idx, uintptr_t retaddr)
  54. {
  55. return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
  56. }
  57. /**
  58. * probe_access_flags:
  59. * @env: CPUArchState
  60. * @addr: guest virtual address to look up
  61. * @size: size of the access
  62. * @access_type: read, write or execute permission
  63. * @mmu_idx: MMU index to use for lookup
  64. * @nonfault: suppress the fault
  65. * @phost: return value for host address
  66. * @retaddr: return address for unwinding
  67. *
  68. * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
  69. * the page, and storing the host address for RAM in @phost.
  70. *
  71. * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
  72. * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
  73. * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
  74. * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
  75. */
  76. int probe_access_flags(CPUArchState *env, vaddr addr, int size,
  77. MMUAccessType access_type, int mmu_idx,
  78. bool nonfault, void **phost, uintptr_t retaddr);
  79. #ifndef CONFIG_USER_ONLY
  80. /**
  81. * probe_access_full:
  82. * Like probe_access_flags, except also return into @pfull.
  83. *
  84. * The CPUTLBEntryFull structure returned via @pfull is transient
  85. * and must be consumed or copied immediately, before any further
  86. * access or changes to TLB @mmu_idx.
  87. *
  88. * This function will not fault if @nonfault is set, but will
  89. * return TLB_INVALID_MASK if the page is not mapped, or is not
  90. * accessible with @access_type.
  91. *
  92. * This function will return TLB_MMIO in order to force the access
  93. * to be handled out-of-line if plugins wish to instrument the access.
  94. */
  95. int probe_access_full(CPUArchState *env, vaddr addr, int size,
  96. MMUAccessType access_type, int mmu_idx,
  97. bool nonfault, void **phost,
  98. CPUTLBEntryFull **pfull, uintptr_t retaddr);
  99. /**
  100. * probe_access_full_mmu:
  101. * Like probe_access_full, except:
  102. *
  103. * This function is intended to be used for page table accesses by
  104. * the target mmu itself. Since such page walking happens while
  105. * handling another potential mmu fault, this function never raises
  106. * exceptions (akin to @nonfault true for probe_access_full).
  107. * Likewise this function does not trigger plugin instrumentation.
  108. */
  109. int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
  110. MMUAccessType access_type, int mmu_idx,
  111. void **phost, CPUTLBEntryFull **pfull);
  112. #endif /* !CONFIG_USER_ONLY */
  113. #endif /* CONFIG_TCG */
  114. static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
  115. {
  116. #ifdef CONFIG_USER_ONLY
  117. return tb->itree.start;
  118. #else
  119. return tb->page_addr[0];
  120. #endif
  121. }
  122. static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
  123. {
  124. #ifdef CONFIG_USER_ONLY
  125. tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
  126. return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
  127. #else
  128. return tb->page_addr[1];
  129. #endif
  130. }
  131. static inline void tb_set_page_addr0(TranslationBlock *tb,
  132. tb_page_addr_t addr)
  133. {
  134. #ifdef CONFIG_USER_ONLY
  135. tb->itree.start = addr;
  136. /*
  137. * To begin, we record an interval of one byte. When the translation
  138. * loop encounters a second page, the interval will be extended to
  139. * include the first byte of the second page, which is sufficient to
  140. * allow tb_page_addr1() above to work properly. The final corrected
  141. * interval will be set by tb_page_add() from tb->size before the
  142. * node is added to the interval tree.
  143. */
  144. tb->itree.last = addr;
  145. #else
  146. tb->page_addr[0] = addr;
  147. #endif
  148. }
  149. static inline void tb_set_page_addr1(TranslationBlock *tb,
  150. tb_page_addr_t addr)
  151. {
  152. #ifdef CONFIG_USER_ONLY
  153. /* Extend the interval to the first byte of the second page. See above. */
  154. tb->itree.last = addr;
  155. #else
  156. tb->page_addr[1] = addr;
  157. #endif
  158. }
  159. /* TranslationBlock invalidate API */
  160. void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
  161. void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
  162. void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
  163. /* GETPC is the true target of the return instruction that we'll execute. */
  164. #if defined(CONFIG_TCG_INTERPRETER)
  165. extern __thread uintptr_t tci_tb_ptr;
  166. # define GETPC() tci_tb_ptr
  167. #else
  168. # define GETPC() \
  169. ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
  170. #endif
  171. /* The true return address will often point to a host insn that is part of
  172. the next translated guest insn. Adjust the address backward to point to
  173. the middle of the call insn. Subtracting one would do the job except for
  174. several compressed mode architectures (arm, mips) which set the low bit
  175. to indicate the compressed mode; subtracting two works around that. It
  176. is also the case that there are no host isas that contain a call insn
  177. smaller than 4 bytes, so we don't worry about special-casing this. */
  178. #define GETPC_ADJ 2
  179. #if !defined(CONFIG_USER_ONLY)
  180. /**
  181. * iotlb_to_section:
  182. * @cpu: CPU performing the access
  183. * @index: TCG CPU IOTLB entry
  184. *
  185. * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
  186. * it refers to. @index will have been initially created and returned
  187. * by memory_region_section_get_iotlb().
  188. */
  189. struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
  190. hwaddr index, MemTxAttrs attrs);
  191. #endif
  192. /**
  193. * get_page_addr_code_hostp()
  194. * @env: CPUArchState
  195. * @addr: guest virtual address of guest code
  196. *
  197. * See get_page_addr_code() (full-system version) for documentation on the
  198. * return value.
  199. *
  200. * Sets *@hostp (when @hostp is non-NULL) as follows.
  201. * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
  202. * to the host address where @addr's content is kept.
  203. *
  204. * Note: this function can trigger an exception.
  205. */
  206. tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
  207. void **hostp);
  208. /**
  209. * get_page_addr_code()
  210. * @env: CPUArchState
  211. * @addr: guest virtual address of guest code
  212. *
  213. * If we cannot translate and execute from the entire RAM page, or if
  214. * the region is not backed by RAM, returns -1. Otherwise, returns the
  215. * ram_addr_t corresponding to the guest code at @addr.
  216. *
  217. * Note: this function can trigger an exception.
  218. */
  219. static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
  220. vaddr addr)
  221. {
  222. return get_page_addr_code_hostp(env, addr, NULL);
  223. }
  224. #if !defined(CONFIG_USER_ONLY)
  225. MemoryRegionSection *
  226. address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
  227. hwaddr *xlat, hwaddr *plen,
  228. MemTxAttrs attrs, int *prot);
  229. hwaddr memory_region_section_get_iotlb(CPUState *cpu,
  230. MemoryRegionSection *section);
  231. #endif
  232. #endif