cputlb.h 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. /*
  2. * Common CPU TLB handling
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2.1 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #ifndef CPUTLB_H
  20. #define CPUTLB_H
  21. #include "exec/cpu-common.h"
  22. #include "exec/hwaddr.h"
  23. #include "exec/memattrs.h"
  24. #include "exec/vaddr.h"
  25. #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
  26. void tlb_protect_code(ram_addr_t ram_addr);
  27. void tlb_unprotect_code(ram_addr_t ram_addr);
  28. #endif
  29. #ifndef CONFIG_USER_ONLY
  30. void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
  31. void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
  32. #endif
  33. /**
  34. * tlb_set_page_full:
  35. * @cpu: CPU context
  36. * @mmu_idx: mmu index of the tlb to modify
  37. * @addr: virtual address of the entry to add
  38. * @full: the details of the tlb entry
  39. *
  40. * Add an entry to @cpu tlb index @mmu_idx. All of the fields of
  41. * @full must be filled, except for xlat_section, and constitute
  42. * the complete description of the translated page.
  43. *
  44. * This is generally called by the target tlb_fill function after
  45. * having performed a successful page table walk to find the physical
  46. * address and attributes for the translation.
  47. *
  48. * At most one entry for a given virtual address is permitted. Only a
  49. * single TARGET_PAGE_SIZE region is mapped; @full->lg_page_size is only
  50. * used by tlb_flush_page.
  51. */
  52. void tlb_set_page_full(CPUState *cpu, int mmu_idx, vaddr addr,
  53. CPUTLBEntryFull *full);
  54. /**
  55. * tlb_set_page_with_attrs:
  56. * @cpu: CPU to add this TLB entry for
  57. * @addr: virtual address of page to add entry for
  58. * @paddr: physical address of the page
  59. * @attrs: memory transaction attributes
  60. * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits)
  61. * @mmu_idx: MMU index to insert TLB entry for
  62. * @size: size of the page in bytes
  63. *
  64. * Add an entry to this CPU's TLB (a mapping from virtual address
  65. * @addr to physical address @paddr) with the specified memory
  66. * transaction attributes. This is generally called by the target CPU
  67. * specific code after it has been called through the tlb_fill()
  68. * entry point and performed a successful page table walk to find
  69. * the physical address and attributes for the virtual address
  70. * which provoked the TLB miss.
  71. *
  72. * At most one entry for a given virtual address is permitted. Only a
  73. * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only
  74. * used by tlb_flush_page.
  75. */
  76. void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
  77. hwaddr paddr, MemTxAttrs attrs,
  78. int prot, int mmu_idx, vaddr size);
  79. /**
  80. * tlb_set_page:
  81. *
  82. * This function is equivalent to calling tlb_set_page_with_attrs()
  83. * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided
  84. * as a convenience for CPUs which don't use memory transaction attributes.
  85. */
  86. void tlb_set_page(CPUState *cpu, vaddr addr,
  87. hwaddr paddr, int prot,
  88. int mmu_idx, vaddr size);
  89. #if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
  90. /**
  91. * tlb_flush_page:
  92. * @cpu: CPU whose TLB should be flushed
  93. * @addr: virtual address of page to be flushed
  94. *
  95. * Flush one page from the TLB of the specified CPU, for all
  96. * MMU indexes.
  97. */
  98. void tlb_flush_page(CPUState *cpu, vaddr addr);
  99. /**
  100. * tlb_flush_page_all_cpus_synced:
  101. * @cpu: src CPU of the flush
  102. * @addr: virtual address of page to be flushed
  103. *
  104. * Flush one page from the TLB of all CPUs, for all
  105. * MMU indexes.
  106. *
  107. * When this function returns, no CPUs will subsequently perform
  108. * translations using the flushed TLBs.
  109. */
  110. void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
  111. /**
  112. * tlb_flush:
  113. * @cpu: CPU whose TLB should be flushed
  114. *
  115. * Flush the entire TLB for the specified CPU. Most CPU architectures
  116. * allow the implementation to drop entries from the TLB at any time
  117. * so this is generally safe. If more selective flushing is required
  118. * use one of the other functions for efficiency.
  119. */
  120. void tlb_flush(CPUState *cpu);
  121. /**
  122. * tlb_flush_all_cpus_synced:
  123. * @cpu: src CPU of the flush
  124. *
  125. * Flush the entire TLB for all CPUs, for all MMU indexes.
  126. *
  127. * When this function returns, no CPUs will subsequently perform
  128. * translations using the flushed TLBs.
  129. */
  130. void tlb_flush_all_cpus_synced(CPUState *src_cpu);
  131. /**
  132. * tlb_flush_page_by_mmuidx:
  133. * @cpu: CPU whose TLB should be flushed
  134. * @addr: virtual address of page to be flushed
  135. * @idxmap: bitmap of MMU indexes to flush
  136. *
  137. * Flush one page from the TLB of the specified CPU, for the specified
  138. * MMU indexes.
  139. */
  140. void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
  141. uint16_t idxmap);
  142. /**
  143. * tlb_flush_page_by_mmuidx_all_cpus_synced:
  144. * @cpu: Originating CPU of the flush
  145. * @addr: virtual address of page to be flushed
  146. * @idxmap: bitmap of MMU indexes to flush
  147. *
  148. * Flush one page from the TLB of all CPUs, for the specified
  149. * MMU indexes.
  150. *
  151. * When this function returns, no CPUs will subsequently perform
  152. * translations using the flushed TLBs.
  153. */
  154. void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
  155. uint16_t idxmap);
  156. /**
  157. * tlb_flush_by_mmuidx:
  158. * @cpu: CPU whose TLB should be flushed
  159. * @wait: If true ensure synchronisation by exiting the cpu_loop
  160. * @idxmap: bitmap of MMU indexes to flush
  161. *
  162. * Flush all entries from the TLB of the specified CPU, for the specified
  163. * MMU indexes.
  164. */
  165. void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
  166. /**
  167. * tlb_flush_by_mmuidx_all_cpus_synced:
  168. * @cpu: Originating CPU of the flush
  169. * @idxmap: bitmap of MMU indexes to flush
  170. *
  171. * Flush all entries from the TLB of all CPUs, for the specified
  172. * MMU indexes.
  173. *
  174. * When this function returns, no CPUs will subsequently perform
  175. * translations using the flushed TLBs.
  176. */
  177. void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
  178. /**
  179. * tlb_flush_page_bits_by_mmuidx
  180. * @cpu: CPU whose TLB should be flushed
  181. * @addr: virtual address of page to be flushed
  182. * @idxmap: bitmap of mmu indexes to flush
  183. * @bits: number of significant bits in address
  184. *
  185. * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
  186. */
  187. void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
  188. uint16_t idxmap, unsigned bits);
  189. /* Similarly, with broadcast and syncing. */
  190. void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
  191. uint16_t idxmap,
  192. unsigned bits);
  193. /**
  194. * tlb_flush_range_by_mmuidx
  195. * @cpu: CPU whose TLB should be flushed
  196. * @addr: virtual address of the start of the range to be flushed
  197. * @len: length of range to be flushed
  198. * @idxmap: bitmap of mmu indexes to flush
  199. * @bits: number of significant bits in address
  200. *
  201. * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
  202. * comparing only the low @bits worth of each virtual page.
  203. */
  204. void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
  205. vaddr len, uint16_t idxmap,
  206. unsigned bits);
  207. /* Similarly, with broadcast and syncing. */
  208. void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
  209. vaddr addr,
  210. vaddr len,
  211. uint16_t idxmap,
  212. unsigned bits);
  213. #else
  214. static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
  215. {
  216. }
  217. static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
  218. {
  219. }
  220. static inline void tlb_flush(CPUState *cpu)
  221. {
  222. }
  223. static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
  224. {
  225. }
  226. static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
  227. vaddr addr, uint16_t idxmap)
  228. {
  229. }
  230. static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
  231. {
  232. }
  233. static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
  234. vaddr addr,
  235. uint16_t idxmap)
  236. {
  237. }
  238. static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
  239. uint16_t idxmap)
  240. {
  241. }
  242. static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
  243. vaddr addr,
  244. uint16_t idxmap,
  245. unsigned bits)
  246. {
  247. }
  248. static inline void
  249. tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
  250. uint16_t idxmap, unsigned bits)
  251. {
  252. }
  253. static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
  254. vaddr len, uint16_t idxmap,
  255. unsigned bits)
  256. {
  257. }
  258. static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
  259. vaddr addr,
  260. vaddr len,
  261. uint16_t idxmap,
  262. unsigned bits)
  263. {
  264. }
  265. #endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
  266. #endif /* CPUTLB_H */