exec-all.h 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. /*
  2. * internal execution defines for qemu
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #ifndef EXEC_ALL_H
  20. #define EXEC_ALL_H
  21. #include "cpu.h"
  22. #if defined(CONFIG_USER_ONLY)
  23. #include "exec/cpu_ldst.h"
  24. #endif
  25. #include "exec/mmu-access-type.h"
  26. #include "exec/translation-block.h"
  27. #if defined(CONFIG_TCG)
  28. #include "accel/tcg/getpc.h"
  29. /**
  30. * probe_access:
  31. * @env: CPUArchState
  32. * @addr: guest virtual address to look up
  33. * @size: size of the access
  34. * @access_type: read, write or execute permission
  35. * @mmu_idx: MMU index to use for lookup
  36. * @retaddr: return address for unwinding
  37. *
  38. * Look up the guest virtual address @addr. Raise an exception if the
  39. * page does not satisfy @access_type. Raise an exception if the
  40. * access (@addr, @size) hits a watchpoint. For writes, mark a clean
  41. * page as dirty.
  42. *
  43. * Finally, return the host address for a page that is backed by RAM,
  44. * or NULL if the page requires I/O.
  45. */
  46. void *probe_access(CPUArchState *env, vaddr addr, int size,
  47. MMUAccessType access_type, int mmu_idx, uintptr_t retaddr);
  48. static inline void *probe_write(CPUArchState *env, vaddr addr, int size,
  49. int mmu_idx, uintptr_t retaddr)
  50. {
  51. return probe_access(env, addr, size, MMU_DATA_STORE, mmu_idx, retaddr);
  52. }
  53. static inline void *probe_read(CPUArchState *env, vaddr addr, int size,
  54. int mmu_idx, uintptr_t retaddr)
  55. {
  56. return probe_access(env, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
  57. }
  58. /**
  59. * probe_access_flags:
  60. * @env: CPUArchState
  61. * @addr: guest virtual address to look up
  62. * @size: size of the access
  63. * @access_type: read, write or execute permission
  64. * @mmu_idx: MMU index to use for lookup
  65. * @nonfault: suppress the fault
  66. * @phost: return value for host address
  67. * @retaddr: return address for unwinding
  68. *
  69. * Similar to probe_access, loosely returning the TLB_FLAGS_MASK for
  70. * the page, and storing the host address for RAM in @phost.
  71. *
  72. * If @nonfault is set, do not raise an exception but return TLB_INVALID_MASK.
  73. * Do not handle watchpoints, but include TLB_WATCHPOINT in the returned flags.
  74. * Do handle clean pages, so exclude TLB_NOTDIRY from the returned flags.
  75. * For simplicity, all "mmio-like" flags are folded to TLB_MMIO.
  76. */
  77. int probe_access_flags(CPUArchState *env, vaddr addr, int size,
  78. MMUAccessType access_type, int mmu_idx,
  79. bool nonfault, void **phost, uintptr_t retaddr);
  80. #ifndef CONFIG_USER_ONLY
  81. /**
  82. * probe_access_full:
  83. * Like probe_access_flags, except also return into @pfull.
  84. *
  85. * The CPUTLBEntryFull structure returned via @pfull is transient
  86. * and must be consumed or copied immediately, before any further
  87. * access or changes to TLB @mmu_idx.
  88. *
  89. * This function will not fault if @nonfault is set, but will
  90. * return TLB_INVALID_MASK if the page is not mapped, or is not
  91. * accessible with @access_type.
  92. *
  93. * This function will return TLB_MMIO in order to force the access
  94. * to be handled out-of-line if plugins wish to instrument the access.
  95. */
  96. int probe_access_full(CPUArchState *env, vaddr addr, int size,
  97. MMUAccessType access_type, int mmu_idx,
  98. bool nonfault, void **phost,
  99. CPUTLBEntryFull **pfull, uintptr_t retaddr);
  100. /**
  101. * probe_access_full_mmu:
  102. * Like probe_access_full, except:
  103. *
  104. * This function is intended to be used for page table accesses by
  105. * the target mmu itself. Since such page walking happens while
  106. * handling another potential mmu fault, this function never raises
  107. * exceptions (akin to @nonfault true for probe_access_full).
  108. * Likewise this function does not trigger plugin instrumentation.
  109. */
  110. int probe_access_full_mmu(CPUArchState *env, vaddr addr, int size,
  111. MMUAccessType access_type, int mmu_idx,
  112. void **phost, CPUTLBEntryFull **pfull);
  113. #endif /* !CONFIG_USER_ONLY */
  114. #endif /* CONFIG_TCG */
  115. static inline tb_page_addr_t tb_page_addr0(const TranslationBlock *tb)
  116. {
  117. #ifdef CONFIG_USER_ONLY
  118. return tb->itree.start;
  119. #else
  120. return tb->page_addr[0];
  121. #endif
  122. }
  123. static inline tb_page_addr_t tb_page_addr1(const TranslationBlock *tb)
  124. {
  125. #ifdef CONFIG_USER_ONLY
  126. tb_page_addr_t next = tb->itree.last & TARGET_PAGE_MASK;
  127. return next == (tb->itree.start & TARGET_PAGE_MASK) ? -1 : next;
  128. #else
  129. return tb->page_addr[1];
  130. #endif
  131. }
  132. static inline void tb_set_page_addr0(TranslationBlock *tb,
  133. tb_page_addr_t addr)
  134. {
  135. #ifdef CONFIG_USER_ONLY
  136. tb->itree.start = addr;
  137. /*
  138. * To begin, we record an interval of one byte. When the translation
  139. * loop encounters a second page, the interval will be extended to
  140. * include the first byte of the second page, which is sufficient to
  141. * allow tb_page_addr1() above to work properly. The final corrected
  142. * interval will be set by tb_page_add() from tb->size before the
  143. * node is added to the interval tree.
  144. */
  145. tb->itree.last = addr;
  146. #else
  147. tb->page_addr[0] = addr;
  148. #endif
  149. }
  150. static inline void tb_set_page_addr1(TranslationBlock *tb,
  151. tb_page_addr_t addr)
  152. {
  153. #ifdef CONFIG_USER_ONLY
  154. /* Extend the interval to the first byte of the second page. See above. */
  155. tb->itree.last = addr;
  156. #else
  157. tb->page_addr[1] = addr;
  158. #endif
  159. }
  160. /* TranslationBlock invalidate API */
  161. void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
  162. void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last);
  163. void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr);
  164. #if !defined(CONFIG_USER_ONLY)
  165. /**
  166. * iotlb_to_section:
  167. * @cpu: CPU performing the access
  168. * @index: TCG CPU IOTLB entry
  169. *
  170. * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that
  171. * it refers to. @index will have been initially created and returned
  172. * by memory_region_section_get_iotlb().
  173. */
  174. struct MemoryRegionSection *iotlb_to_section(CPUState *cpu,
  175. hwaddr index, MemTxAttrs attrs);
  176. #endif
  177. /**
  178. * get_page_addr_code_hostp()
  179. * @env: CPUArchState
  180. * @addr: guest virtual address of guest code
  181. *
  182. * See get_page_addr_code() (full-system version) for documentation on the
  183. * return value.
  184. *
  185. * Sets *@hostp (when @hostp is non-NULL) as follows.
  186. * If the return value is -1, sets *@hostp to NULL. Otherwise, sets *@hostp
  187. * to the host address where @addr's content is kept.
  188. *
  189. * Note: this function can trigger an exception.
  190. */
  191. tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
  192. void **hostp);
  193. /**
  194. * get_page_addr_code()
  195. * @env: CPUArchState
  196. * @addr: guest virtual address of guest code
  197. *
  198. * If we cannot translate and execute from the entire RAM page, or if
  199. * the region is not backed by RAM, returns -1. Otherwise, returns the
  200. * ram_addr_t corresponding to the guest code at @addr.
  201. *
  202. * Note: this function can trigger an exception.
  203. */
  204. static inline tb_page_addr_t get_page_addr_code(CPUArchState *env,
  205. vaddr addr)
  206. {
  207. return get_page_addr_code_hostp(env, addr, NULL);
  208. }
  209. #if !defined(CONFIG_USER_ONLY)
  210. MemoryRegionSection *
  211. address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr,
  212. hwaddr *xlat, hwaddr *plen,
  213. MemTxAttrs attrs, int *prot);
  214. hwaddr memory_region_section_get_iotlb(CPUState *cpu,
  215. MemoryRegionSection *section);
  216. #endif
  217. #endif