|
@@ -25,21 +25,14 @@
|
|
|
#include "exec/memattrs.h"
|
|
|
#include "exec/vaddr.h"
|
|
|
|
|
|
-#ifdef CONFIG_TCG
|
|
|
-
|
|
|
-#if !defined(CONFIG_USER_ONLY)
|
|
|
-/* cputlb.c */
|
|
|
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
|
|
void tlb_protect_code(ram_addr_t ram_addr);
|
|
|
void tlb_unprotect_code(ram_addr_t ram_addr);
|
|
|
#endif
|
|
|
|
|
|
-#endif /* CONFIG_TCG */
|
|
|
-
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
-
|
|
|
void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length);
|
|
|
void tlb_reset_dirty_range_all(ram_addr_t start, ram_addr_t length);
|
|
|
-
|
|
|
#endif
|
|
|
|
|
|
/**
|
|
@@ -101,4 +94,193 @@ void tlb_set_page(CPUState *cpu, vaddr addr,
|
|
|
hwaddr paddr, int prot,
|
|
|
int mmu_idx, vaddr size);
|
|
|
|
|
|
-#endif
|
|
|
+#if defined(CONFIG_TCG) && !defined(CONFIG_USER_ONLY)
|
|
|
+/**
|
|
|
+ * tlb_flush_page:
|
|
|
+ * @cpu: CPU whose TLB should be flushed
|
|
|
+ * @addr: virtual address of page to be flushed
|
|
|
+ *
|
|
|
+ * Flush one page from the TLB of the specified CPU, for all
|
|
|
+ * MMU indexes.
|
|
|
+ */
|
|
|
+void tlb_flush_page(CPUState *cpu, vaddr addr);
|
|
|
+
|
|
|
+/**
|
|
|
+ * tlb_flush_page_all_cpus_synced:
|
|
|
+ * @cpu: src CPU of the flush
|
|
|
+ * @addr: virtual address of page to be flushed
|
|
|
+ *
|
|
|
+ * Flush one page from the TLB of all CPUs, for all
|
|
|
+ * MMU indexes.
|
|
|
+ *
|
|
|
+ * When this function returns, no CPUs will subsequently perform
|
|
|
+ * translations using the flushed TLBs.
|
|
|
+ */
|
|
|
+void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr);
|
|
|
+
|
|
|
+/**
|
|
|
+ * tlb_flush:
|
|
|
+ * @cpu: CPU whose TLB should be flushed
|
|
|
+ *
|
|
|
+ * Flush the entire TLB for the specified CPU. Most CPU architectures
|
|
|
+ * allow the implementation to drop entries from the TLB at any time
|
|
|
+ * so this is generally safe. If more selective flushing is required
|
|
|
+ * use one of the other functions for efficiency.
|
|
|
+ */
|
|
|
+void tlb_flush(CPUState *cpu);
|
|
|
+
|
|
|
+/**
|
|
|
+ * tlb_flush_all_cpus_synced:
|
|
|
+ * @cpu: src CPU of the flush
|
|
|
+ *
|
|
|
+ * Flush the entire TLB for all CPUs, for all MMU indexes.
|
|
|
+ *
|
|
|
+ * When this function returns, no CPUs will subsequently perform
|
|
|
+ * translations using the flushed TLBs.
|
|
|
+ */
|
|
|
+void tlb_flush_all_cpus_synced(CPUState *src_cpu);
|
|
|
+
|
|
|
+/**
|
|
|
+ * tlb_flush_page_by_mmuidx:
|
|
|
+ * @cpu: CPU whose TLB should be flushed
|
|
|
+ * @addr: virtual address of page to be flushed
|
|
|
+ * @idxmap: bitmap of MMU indexes to flush
|
|
|
+ *
|
|
|
+ * Flush one page from the TLB of the specified CPU, for the specified
|
|
|
+ * MMU indexes.
|
|
|
+ */
|
|
|
+void tlb_flush_page_by_mmuidx(CPUState *cpu, vaddr addr,
|
|
|
+ uint16_t idxmap);
|
|
|
+
|
|
|
+/**
|
|
|
+ * tlb_flush_page_by_mmuidx_all_cpus_synced:
|
|
|
+ * @cpu: Originating CPU of the flush
|
|
|
+ * @addr: virtual address of page to be flushed
|
|
|
+ * @idxmap: bitmap of MMU indexes to flush
|
|
|
+ *
|
|
|
+ * Flush one page from the TLB of all CPUs, for the specified
|
|
|
+ * MMU indexes.
|
|
|
+ *
|
|
|
+ * When this function returns, no CPUs will subsequently perform
|
|
|
+ * translations using the flushed TLBs.
|
|
|
+ */
|
|
|
+void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
|
|
|
+ uint16_t idxmap);
|
|
|
+
|
|
|
+/**
|
|
|
+ * tlb_flush_by_mmuidx:
|
|
|
+ * @cpu: CPU whose TLB should be flushed
|
|
|
+ * @wait: If true ensure synchronisation by exiting the cpu_loop
|
|
|
+ * @idxmap: bitmap of MMU indexes to flush
|
|
|
+ *
|
|
|
+ * Flush all entries from the TLB of the specified CPU, for the specified
|
|
|
+ * MMU indexes.
|
|
|
+ */
|
|
|
+void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap);
|
|
|
+
|
|
|
+/**
|
|
|
+ * tlb_flush_by_mmuidx_all_cpus_synced:
|
|
|
+ * @cpu: Originating CPU of the flush
|
|
|
+ * @idxmap: bitmap of MMU indexes to flush
|
|
|
+ *
|
|
|
+ * Flush all entries from the TLB of all CPUs, for the specified
|
|
|
+ * MMU indexes.
|
|
|
+ *
|
|
|
+ * When this function returns, no CPUs will subsequently perform
|
|
|
+ * translations using the flushed TLBs.
|
|
|
+ */
|
|
|
+void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
|
|
|
+
|
|
|
+/**
|
|
|
+ * tlb_flush_page_bits_by_mmuidx
|
|
|
+ * @cpu: CPU whose TLB should be flushed
|
|
|
+ * @addr: virtual address of page to be flushed
|
|
|
+ * @idxmap: bitmap of mmu indexes to flush
|
|
|
+ * @bits: number of significant bits in address
|
|
|
+ *
|
|
|
+ * Similar to tlb_flush_page_mask, but with a bitmap of indexes.
|
|
|
+ */
|
|
|
+void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, vaddr addr,
|
|
|
+ uint16_t idxmap, unsigned bits);
|
|
|
+
|
|
|
+/* Similarly, with broadcast and syncing. */
|
|
|
+void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
|
|
|
+ uint16_t idxmap,
|
|
|
+ unsigned bits);
|
|
|
+
|
|
|
+/**
|
|
|
+ * tlb_flush_range_by_mmuidx
|
|
|
+ * @cpu: CPU whose TLB should be flushed
|
|
|
+ * @addr: virtual address of the start of the range to be flushed
|
|
|
+ * @len: length of range to be flushed
|
|
|
+ * @idxmap: bitmap of mmu indexes to flush
|
|
|
+ * @bits: number of significant bits in address
|
|
|
+ *
|
|
|
+ * For each mmuidx in @idxmap, flush all pages within [@addr,@addr+@len),
|
|
|
+ * comparing only the low @bits worth of each virtual page.
|
|
|
+ */
|
|
|
+void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
|
|
|
+ vaddr len, uint16_t idxmap,
|
|
|
+ unsigned bits);
|
|
|
+
|
|
|
+/* Similarly, with broadcast and syncing. */
|
|
|
+void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
|
|
|
+ vaddr addr,
|
|
|
+ vaddr len,
|
|
|
+ uint16_t idxmap,
|
|
|
+ unsigned bits);
|
|
|
+#else
|
|
|
+static inline void tlb_flush_page(CPUState *cpu, vaddr addr)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void tlb_flush_page_all_cpus_synced(CPUState *src, vaddr addr)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void tlb_flush(CPUState *cpu)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void tlb_flush_page_by_mmuidx(CPUState *cpu,
|
|
|
+ vaddr addr, uint16_t idxmap)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
+static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu,
|
|
|
+ vaddr addr,
|
|
|
+ uint16_t idxmap)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
|
|
|
+ uint16_t idxmap)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void tlb_flush_page_bits_by_mmuidx(CPUState *cpu,
|
|
|
+ vaddr addr,
|
|
|
+ uint16_t idxmap,
|
|
|
+ unsigned bits)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void
|
|
|
+tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *cpu, vaddr addr,
|
|
|
+ uint16_t idxmap, unsigned bits)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void tlb_flush_range_by_mmuidx(CPUState *cpu, vaddr addr,
|
|
|
+ vaddr len, uint16_t idxmap,
|
|
|
+ unsigned bits)
|
|
|
+{
|
|
|
+}
|
|
|
+static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
|
|
|
+ vaddr addr,
|
|
|
+ vaddr len,
|
|
|
+ uint16_t idxmap,
|
|
|
+ unsigned bits)
|
|
|
+{
|
|
|
+}
|
|
|
+#endif /* CONFIG_TCG && !CONFIG_USER_ONLY */
|
|
|
+#endif /* CPUTLB_H */
|