|
@@ -19,7 +19,7 @@
|
|
|
|
|
|
#include "qemu/osdep.h"
|
|
#include "qemu/osdep.h"
|
|
#include "qemu/main-loop.h"
|
|
#include "qemu/main-loop.h"
|
|
-#include "hw/core/tcg-cpu-ops.h"
|
|
|
|
|
|
+#include "accel/tcg/cpu-ops.h"
|
|
#include "exec/exec-all.h"
|
|
#include "exec/exec-all.h"
|
|
#include "exec/page-protection.h"
|
|
#include "exec/page-protection.h"
|
|
#include "exec/memory.h"
|
|
#include "exec/memory.h"
|
|
@@ -37,16 +37,16 @@
|
|
#include "exec/helper-proto-common.h"
|
|
#include "exec/helper-proto-common.h"
|
|
#include "qemu/atomic.h"
|
|
#include "qemu/atomic.h"
|
|
#include "qemu/atomic128.h"
|
|
#include "qemu/atomic128.h"
|
|
-#include "exec/translate-all.h"
|
|
|
|
|
|
+#include "tb-internal.h"
|
|
#include "trace.h"
|
|
#include "trace.h"
|
|
#include "tb-hash.h"
|
|
#include "tb-hash.h"
|
|
|
|
+#include "tb-internal.h"
|
|
#include "internal-common.h"
|
|
#include "internal-common.h"
|
|
#include "internal-target.h"
|
|
#include "internal-target.h"
|
|
#ifdef CONFIG_PLUGIN
|
|
#ifdef CONFIG_PLUGIN
|
|
#include "qemu/plugin-memory.h"
|
|
#include "qemu/plugin-memory.h"
|
|
#endif
|
|
#endif
|
|
#include "tcg/tcg-ldst.h"
|
|
#include "tcg/tcg-ldst.h"
|
|
-#include "tcg/oversized-guest.h"
|
|
|
|
|
|
|
|
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
|
|
/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
|
|
/* #define DEBUG_TLB */
|
|
/* #define DEBUG_TLB */
|
|
@@ -104,26 +104,15 @@ static inline uint64_t tlb_read_idx(const CPUTLBEntry *entry,
|
|
{
|
|
{
|
|
/* Do not rearrange the CPUTLBEntry structure members. */
|
|
/* Do not rearrange the CPUTLBEntry structure members. */
|
|
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
|
|
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_read) !=
|
|
- MMU_DATA_LOAD * sizeof(uint64_t));
|
|
|
|
|
|
+ MMU_DATA_LOAD * sizeof(uintptr_t));
|
|
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
|
|
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_write) !=
|
|
- MMU_DATA_STORE * sizeof(uint64_t));
|
|
|
|
|
|
+ MMU_DATA_STORE * sizeof(uintptr_t));
|
|
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
|
|
QEMU_BUILD_BUG_ON(offsetof(CPUTLBEntry, addr_code) !=
|
|
- MMU_INST_FETCH * sizeof(uint64_t));
|
|
|
|
|
|
+ MMU_INST_FETCH * sizeof(uintptr_t));
|
|
|
|
|
|
-#if TARGET_LONG_BITS == 32
|
|
|
|
- /* Use qatomic_read, in case of addr_write; only care about low bits. */
|
|
|
|
- const uint32_t *ptr = (uint32_t *)&entry->addr_idx[access_type];
|
|
|
|
- ptr += HOST_BIG_ENDIAN;
|
|
|
|
- return qatomic_read(ptr);
|
|
|
|
-#else
|
|
|
|
- const uint64_t *ptr = &entry->addr_idx[access_type];
|
|
|
|
-# if TCG_OVERSIZED_GUEST
|
|
|
|
- return *ptr;
|
|
|
|
-# else
|
|
|
|
|
|
+ const uintptr_t *ptr = &entry->addr_idx[access_type];
|
|
/* ofs might correspond to .addr_write, so use qatomic_read */
|
|
/* ofs might correspond to .addr_write, so use qatomic_read */
|
|
return qatomic_read(ptr);
|
|
return qatomic_read(ptr);
|
|
-# endif
|
|
|
|
-#endif
|
|
|
|
}
|
|
}
|
|
|
|
|
|
static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
|
|
static inline uint64_t tlb_addr_write(const CPUTLBEntry *entry)
|
|
@@ -903,16 +892,8 @@ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
|
|
addr &= TARGET_PAGE_MASK;
|
|
addr &= TARGET_PAGE_MASK;
|
|
addr += tlb_entry->addend;
|
|
addr += tlb_entry->addend;
|
|
if ((addr - start) < length) {
|
|
if ((addr - start) < length) {
|
|
-#if TARGET_LONG_BITS == 32
|
|
|
|
- uint32_t *ptr_write = (uint32_t *)&tlb_entry->addr_write;
|
|
|
|
- ptr_write += HOST_BIG_ENDIAN;
|
|
|
|
- qatomic_set(ptr_write, *ptr_write | TLB_NOTDIRTY);
|
|
|
|
-#elif TCG_OVERSIZED_GUEST
|
|
|
|
- tlb_entry->addr_write |= TLB_NOTDIRTY;
|
|
|
|
-#else
|
|
|
|
qatomic_set(&tlb_entry->addr_write,
|
|
qatomic_set(&tlb_entry->addr_write,
|
|
tlb_entry->addr_write | TLB_NOTDIRTY);
|
|
tlb_entry->addr_write | TLB_NOTDIRTY);
|
|
-#endif
|
|
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -1199,7 +1180,7 @@ void tlb_set_page_full(CPUState *cpu, int mmu_idx,
|
|
|
|
|
|
void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
|
|
void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
|
|
hwaddr paddr, MemTxAttrs attrs, int prot,
|
|
hwaddr paddr, MemTxAttrs attrs, int prot,
|
|
- int mmu_idx, uint64_t size)
|
|
|
|
|
|
+ int mmu_idx, vaddr size)
|
|
{
|
|
{
|
|
CPUTLBEntryFull full = {
|
|
CPUTLBEntryFull full = {
|
|
.phys_addr = paddr,
|
|
.phys_addr = paddr,
|
|
@@ -1214,29 +1195,65 @@ void tlb_set_page_with_attrs(CPUState *cpu, vaddr addr,
|
|
|
|
|
|
void tlb_set_page(CPUState *cpu, vaddr addr,
|
|
void tlb_set_page(CPUState *cpu, vaddr addr,
|
|
hwaddr paddr, int prot,
|
|
hwaddr paddr, int prot,
|
|
- int mmu_idx, uint64_t size)
|
|
|
|
|
|
+ int mmu_idx, vaddr size)
|
|
{
|
|
{
|
|
tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
|
|
tlb_set_page_with_attrs(cpu, addr, paddr, MEMTXATTRS_UNSPECIFIED,
|
|
prot, mmu_idx, size);
|
|
prot, mmu_idx, size);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+/**
|
|
|
|
+ * tlb_hit_page: return true if page aligned @addr is a hit against the
|
|
|
|
+ * TLB entry @tlb_addr
|
|
|
|
+ *
|
|
|
|
+ * @addr: virtual address to test (must be page aligned)
|
|
|
|
+ * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
|
|
|
|
+ */
|
|
|
|
+static inline bool tlb_hit_page(uint64_t tlb_addr, vaddr addr)
|
|
|
|
+{
|
|
|
|
+ return addr == (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK));
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+/**
|
|
|
|
+ * tlb_hit: return true if @addr is a hit against the TLB entry @tlb_addr
|
|
|
|
+ *
|
|
|
|
+ * @addr: virtual address to test (need not be page aligned)
|
|
|
|
+ * @tlb_addr: TLB entry address (a CPUTLBEntry addr_read/write/code value)
|
|
|
|
+ */
|
|
|
|
+static inline bool tlb_hit(uint64_t tlb_addr, vaddr addr)
|
|
|
|
+{
|
|
|
|
+ return tlb_hit_page(tlb_addr, addr & TARGET_PAGE_MASK);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
- * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
|
|
|
|
- * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
|
|
|
|
- * be discarded and looked up again (e.g. via tlb_entry()).
|
|
|
|
|
|
+ * Note: tlb_fill_align() can trigger a resize of the TLB.
|
|
|
|
+ * This means that all of the caller's prior references to the TLB table
|
|
|
|
+ * (e.g. CPUTLBEntry pointers) must be discarded and looked up again
|
|
|
|
+ * (e.g. via tlb_entry()).
|
|
*/
|
|
*/
|
|
-static void tlb_fill(CPUState *cpu, vaddr addr, int size,
|
|
|
|
- MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
|
|
|
|
|
|
+static bool tlb_fill_align(CPUState *cpu, vaddr addr, MMUAccessType type,
|
|
|
|
+ int mmu_idx, MemOp memop, int size,
|
|
|
|
+ bool probe, uintptr_t ra)
|
|
{
|
|
{
|
|
- bool ok;
|
|
|
|
|
|
+ const TCGCPUOps *ops = cpu->cc->tcg_ops;
|
|
|
|
+ CPUTLBEntryFull full;
|
|
|
|
|
|
- /*
|
|
|
|
- * This is not a probe, so only valid return is success; failure
|
|
|
|
- * should result in exception + longjmp to the cpu loop.
|
|
|
|
- */
|
|
|
|
- ok = cpu->cc->tcg_ops->tlb_fill(cpu, addr, size,
|
|
|
|
- access_type, mmu_idx, false, retaddr);
|
|
|
|
- assert(ok);
|
|
|
|
|
|
+ if (ops->tlb_fill_align) {
|
|
|
|
+ if (ops->tlb_fill_align(cpu, &full, addr, type, mmu_idx,
|
|
|
|
+ memop, size, probe, ra)) {
|
|
|
|
+ tlb_set_page_full(cpu, mmu_idx, addr, &full);
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ } else {
|
|
|
|
+ /* Legacy behaviour is alignment before paging. */
|
|
|
|
+ if (addr & ((1u << memop_alignment_bits(memop)) - 1)) {
|
|
|
|
+ ops->do_unaligned_access(cpu, addr, type, mmu_idx, ra);
|
|
|
|
+ }
|
|
|
|
+ if (ops->tlb_fill(cpu, addr, size, type, mmu_idx, probe, ra)) {
|
|
|
|
+ return true;
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+ assert(probe);
|
|
|
|
+ return false;
|
|
}
|
|
}
|
|
|
|
|
|
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
|
|
static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
|
|
@@ -1351,22 +1368,22 @@ static int probe_access_internal(CPUState *cpu, vaddr addr,
|
|
|
|
|
|
if (!tlb_hit_page(tlb_addr, page_addr)) {
|
|
if (!tlb_hit_page(tlb_addr, page_addr)) {
|
|
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
|
|
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type, page_addr)) {
|
|
- if (!cpu->cc->tcg_ops->tlb_fill(cpu, addr, fault_size, access_type,
|
|
|
|
- mmu_idx, nonfault, retaddr)) {
|
|
|
|
|
|
+ if (!tlb_fill_align(cpu, addr, access_type, mmu_idx,
|
|
|
|
+ 0, fault_size, nonfault, retaddr)) {
|
|
/* Non-faulting page table read failed. */
|
|
/* Non-faulting page table read failed. */
|
|
*phost = NULL;
|
|
*phost = NULL;
|
|
*pfull = NULL;
|
|
*pfull = NULL;
|
|
return TLB_INVALID_MASK;
|
|
return TLB_INVALID_MASK;
|
|
}
|
|
}
|
|
|
|
|
|
- /* TLB resize via tlb_fill may have moved the entry. */
|
|
|
|
|
|
+ /* TLB resize via tlb_fill_align may have moved the entry. */
|
|
index = tlb_index(cpu, mmu_idx, addr);
|
|
index = tlb_index(cpu, mmu_idx, addr);
|
|
entry = tlb_entry(cpu, mmu_idx, addr);
|
|
entry = tlb_entry(cpu, mmu_idx, addr);
|
|
|
|
|
|
/*
|
|
/*
|
|
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
|
|
* With PAGE_WRITE_INV, we set TLB_INVALID_MASK immediately,
|
|
- * to force the next access through tlb_fill. We've just
|
|
|
|
- * called tlb_fill, so we know that this entry *is* valid.
|
|
|
|
|
|
+ * to force the next access through tlb_fill_align. We've just
|
|
|
|
+ * called tlb_fill_align, so we know that this entry *is* valid.
|
|
*/
|
|
*/
|
|
flags &= ~TLB_INVALID_MASK;
|
|
flags &= ~TLB_INVALID_MASK;
|
|
}
|
|
}
|
|
@@ -1491,7 +1508,7 @@ void *probe_access(CPUArchState *env, vaddr addr, int size,
|
|
return host;
|
|
return host;
|
|
}
|
|
}
|
|
|
|
|
|
-void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
|
|
|
|
|
|
+void *tlb_vaddr_to_host(CPUArchState *env, vaddr addr,
|
|
MMUAccessType access_type, int mmu_idx)
|
|
MMUAccessType access_type, int mmu_idx)
|
|
{
|
|
{
|
|
CPUTLBEntryFull *full;
|
|
CPUTLBEntryFull *full;
|
|
@@ -1607,16 +1624,17 @@ typedef struct MMULookupLocals {
|
|
* mmu_lookup1: translate one page
|
|
* mmu_lookup1: translate one page
|
|
* @cpu: generic cpu state
|
|
* @cpu: generic cpu state
|
|
* @data: lookup parameters
|
|
* @data: lookup parameters
|
|
|
|
+ * @memop: memory operation for the access, or 0
|
|
* @mmu_idx: virtual address context
|
|
* @mmu_idx: virtual address context
|
|
* @access_type: load/store/code
|
|
* @access_type: load/store/code
|
|
* @ra: return address into tcg generated code, or 0
|
|
* @ra: return address into tcg generated code, or 0
|
|
*
|
|
*
|
|
* Resolve the translation for the one page at @data.addr, filling in
|
|
* Resolve the translation for the one page at @data.addr, filling in
|
|
* the rest of @data with the results. If the translation fails,
|
|
* the rest of @data with the results. If the translation fails,
|
|
- * tlb_fill will longjmp out. Return true if the softmmu tlb for
|
|
|
|
|
|
+ * tlb_fill_align will longjmp out. Return true if the softmmu tlb for
|
|
* @mmu_idx may have resized.
|
|
* @mmu_idx may have resized.
|
|
*/
|
|
*/
|
|
-static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
|
|
|
|
|
|
+static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data, MemOp memop,
|
|
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
|
|
int mmu_idx, MMUAccessType access_type, uintptr_t ra)
|
|
{
|
|
{
|
|
vaddr addr = data->addr;
|
|
vaddr addr = data->addr;
|
|
@@ -1631,7 +1649,8 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
|
|
if (!tlb_hit(tlb_addr, addr)) {
|
|
if (!tlb_hit(tlb_addr, addr)) {
|
|
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
|
|
if (!victim_tlb_hit(cpu, mmu_idx, index, access_type,
|
|
addr & TARGET_PAGE_MASK)) {
|
|
addr & TARGET_PAGE_MASK)) {
|
|
- tlb_fill(cpu, addr, data->size, access_type, mmu_idx, ra);
|
|
|
|
|
|
+ tlb_fill_align(cpu, addr, access_type, mmu_idx,
|
|
|
|
+ memop, data->size, false, ra);
|
|
maybe_resized = true;
|
|
maybe_resized = true;
|
|
index = tlb_index(cpu, mmu_idx, addr);
|
|
index = tlb_index(cpu, mmu_idx, addr);
|
|
entry = tlb_entry(cpu, mmu_idx, addr);
|
|
entry = tlb_entry(cpu, mmu_idx, addr);
|
|
@@ -1643,6 +1662,25 @@ static bool mmu_lookup1(CPUState *cpu, MMULookupPageData *data,
|
|
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
|
|
flags = tlb_addr & (TLB_FLAGS_MASK & ~TLB_FORCE_SLOW);
|
|
flags |= full->slow_flags[access_type];
|
|
flags |= full->slow_flags[access_type];
|
|
|
|
|
|
|
|
+ if (likely(!maybe_resized)) {
|
|
|
|
+ /* Alignment has not been checked by tlb_fill_align. */
|
|
|
|
+ int a_bits = memop_alignment_bits(memop);
|
|
|
|
+
|
|
|
|
+ /*
|
|
|
|
+ * This alignment check differs from the one above, in that this is
|
|
|
|
+ * based on the atomicity of the operation. The intended use case is
|
|
|
|
+ * the ARM memory type field of each PTE, where access to pages with
|
|
|
|
+ * Device memory type require alignment.
|
|
|
|
+ */
|
|
|
|
+ if (unlikely(flags & TLB_CHECK_ALIGNED)) {
|
|
|
|
+ int at_bits = memop_atomicity_bits(memop);
|
|
|
|
+ a_bits = MAX(a_bits, at_bits);
|
|
|
|
+ }
|
|
|
|
+ if (unlikely(addr & ((1 << a_bits) - 1))) {
|
|
|
|
+ cpu_unaligned_access(cpu, addr, access_type, mmu_idx, ra);
|
|
|
|
+ }
|
|
|
|
+ }
|
|
|
|
+
|
|
data->full = full;
|
|
data->full = full;
|
|
data->flags = flags;
|
|
data->flags = flags;
|
|
/* Compute haddr speculatively; depending on flags it might be invalid. */
|
|
/* Compute haddr speculatively; depending on flags it might be invalid. */
|
|
@@ -1699,7 +1737,6 @@ static void mmu_watch_or_dirty(CPUState *cpu, MMULookupPageData *data,
|
|
static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
|
|
uintptr_t ra, MMUAccessType type, MMULookupLocals *l)
|
|
{
|
|
{
|
|
- unsigned a_bits;
|
|
|
|
bool crosspage;
|
|
bool crosspage;
|
|
int flags;
|
|
int flags;
|
|
|
|
|
|
@@ -1708,12 +1745,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
|
|
|
|
tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
|
|
tcg_debug_assert(l->mmu_idx < NB_MMU_MODES);
|
|
|
|
|
|
- /* Handle CPU specific unaligned behaviour */
|
|
|
|
- a_bits = get_alignment_bits(l->memop);
|
|
|
|
- if (addr & ((1 << a_bits) - 1)) {
|
|
|
|
- cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
l->page[0].addr = addr;
|
|
l->page[0].addr = addr;
|
|
l->page[0].size = memop_size(l->memop);
|
|
l->page[0].size = memop_size(l->memop);
|
|
l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
|
|
l->page[1].addr = (addr + l->page[0].size - 1) & TARGET_PAGE_MASK;
|
|
@@ -1721,7 +1752,7 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
|
|
crosspage = (addr ^ l->page[1].addr) & TARGET_PAGE_MASK;
|
|
|
|
|
|
if (likely(!crosspage)) {
|
|
if (likely(!crosspage)) {
|
|
- mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
|
|
|
|
|
|
+ mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
|
|
|
|
|
|
flags = l->page[0].flags;
|
|
flags = l->page[0].flags;
|
|
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
|
|
if (unlikely(flags & (TLB_WATCHPOINT | TLB_NOTDIRTY))) {
|
|
@@ -1740,8 +1771,8 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
* Lookup both pages, recognizing exceptions from either. If the
|
|
* Lookup both pages, recognizing exceptions from either. If the
|
|
* second lookup potentially resized, refresh first CPUTLBEntryFull.
|
|
* second lookup potentially resized, refresh first CPUTLBEntryFull.
|
|
*/
|
|
*/
|
|
- mmu_lookup1(cpu, &l->page[0], l->mmu_idx, type, ra);
|
|
|
|
- if (mmu_lookup1(cpu, &l->page[1], l->mmu_idx, type, ra)) {
|
|
|
|
|
|
+ mmu_lookup1(cpu, &l->page[0], l->memop, l->mmu_idx, type, ra);
|
|
|
|
+ if (mmu_lookup1(cpu, &l->page[1], 0, l->mmu_idx, type, ra)) {
|
|
uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
|
|
uintptr_t index = tlb_index(cpu, l->mmu_idx, addr);
|
|
l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
|
|
l->page[0].full = &cpu->neg.tlb.d[l->mmu_idx].fulltlb[index];
|
|
}
|
|
}
|
|
@@ -1760,31 +1791,6 @@ static bool mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
tcg_debug_assert((flags & TLB_BSWAP) == 0);
|
|
tcg_debug_assert((flags & TLB_BSWAP) == 0);
|
|
}
|
|
}
|
|
|
|
|
|
- /*
|
|
|
|
- * This alignment check differs from the one above, in that this is
|
|
|
|
- * based on the atomicity of the operation. The intended use case is
|
|
|
|
- * the ARM memory type field of each PTE, where access to pages with
|
|
|
|
- * Device memory type require alignment.
|
|
|
|
- */
|
|
|
|
- if (unlikely(flags & TLB_CHECK_ALIGNED)) {
|
|
|
|
- MemOp size = l->memop & MO_SIZE;
|
|
|
|
-
|
|
|
|
- switch (l->memop & MO_ATOM_MASK) {
|
|
|
|
- case MO_ATOM_NONE:
|
|
|
|
- size = MO_8;
|
|
|
|
- break;
|
|
|
|
- case MO_ATOM_IFALIGN_PAIR:
|
|
|
|
- case MO_ATOM_WITHIN16_PAIR:
|
|
|
|
- size = size ? size - 1 : 0;
|
|
|
|
- break;
|
|
|
|
- default:
|
|
|
|
- break;
|
|
|
|
- }
|
|
|
|
- if (addr & ((1 << size) - 1)) {
|
|
|
|
- cpu_unaligned_access(cpu, addr, type, l->mmu_idx, ra);
|
|
|
|
- }
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
return crosspage;
|
|
return crosspage;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1797,34 +1803,18 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
{
|
|
{
|
|
uintptr_t mmu_idx = get_mmuidx(oi);
|
|
uintptr_t mmu_idx = get_mmuidx(oi);
|
|
MemOp mop = get_memop(oi);
|
|
MemOp mop = get_memop(oi);
|
|
- int a_bits = get_alignment_bits(mop);
|
|
|
|
uintptr_t index;
|
|
uintptr_t index;
|
|
CPUTLBEntry *tlbe;
|
|
CPUTLBEntry *tlbe;
|
|
vaddr tlb_addr;
|
|
vaddr tlb_addr;
|
|
void *hostaddr;
|
|
void *hostaddr;
|
|
CPUTLBEntryFull *full;
|
|
CPUTLBEntryFull *full;
|
|
|
|
+ bool did_tlb_fill = false;
|
|
|
|
|
|
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
|
|
tcg_debug_assert(mmu_idx < NB_MMU_MODES);
|
|
|
|
|
|
/* Adjust the given return address. */
|
|
/* Adjust the given return address. */
|
|
retaddr -= GETPC_ADJ;
|
|
retaddr -= GETPC_ADJ;
|
|
|
|
|
|
- /* Enforce guest required alignment. */
|
|
|
|
- if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
|
|
|
|
- /* ??? Maybe indicate atomic op to cpu_unaligned_access */
|
|
|
|
- cpu_unaligned_access(cpu, addr, MMU_DATA_STORE,
|
|
|
|
- mmu_idx, retaddr);
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
- /* Enforce qemu required alignment. */
|
|
|
|
- if (unlikely(addr & (size - 1))) {
|
|
|
|
- /* We get here if guest alignment was not requested,
|
|
|
|
- or was not enforced by cpu_unaligned_access above.
|
|
|
|
- We might widen the access and emulate, but for now
|
|
|
|
- mark an exception and exit the cpu loop. */
|
|
|
|
- goto stop_the_world;
|
|
|
|
- }
|
|
|
|
-
|
|
|
|
index = tlb_index(cpu, mmu_idx, addr);
|
|
index = tlb_index(cpu, mmu_idx, addr);
|
|
tlbe = tlb_entry(cpu, mmu_idx, addr);
|
|
tlbe = tlb_entry(cpu, mmu_idx, addr);
|
|
|
|
|
|
@@ -1833,8 +1823,9 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
if (!tlb_hit(tlb_addr, addr)) {
|
|
if (!tlb_hit(tlb_addr, addr)) {
|
|
if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
|
|
if (!victim_tlb_hit(cpu, mmu_idx, index, MMU_DATA_STORE,
|
|
addr & TARGET_PAGE_MASK)) {
|
|
addr & TARGET_PAGE_MASK)) {
|
|
- tlb_fill(cpu, addr, size,
|
|
|
|
- MMU_DATA_STORE, mmu_idx, retaddr);
|
|
|
|
|
|
+ tlb_fill_align(cpu, addr, MMU_DATA_STORE, mmu_idx,
|
|
|
|
+ mop, size, false, retaddr);
|
|
|
|
+ did_tlb_fill = true;
|
|
index = tlb_index(cpu, mmu_idx, addr);
|
|
index = tlb_index(cpu, mmu_idx, addr);
|
|
tlbe = tlb_entry(cpu, mmu_idx, addr);
|
|
tlbe = tlb_entry(cpu, mmu_idx, addr);
|
|
}
|
|
}
|
|
@@ -1848,15 +1839,32 @@ static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
* but addr_read will only be -1 if PAGE_READ was unset.
|
|
* but addr_read will only be -1 if PAGE_READ was unset.
|
|
*/
|
|
*/
|
|
if (unlikely(tlbe->addr_read == -1)) {
|
|
if (unlikely(tlbe->addr_read == -1)) {
|
|
- tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr);
|
|
|
|
|
|
+ tlb_fill_align(cpu, addr, MMU_DATA_LOAD, mmu_idx,
|
|
|
|
+ 0, size, false, retaddr);
|
|
/*
|
|
/*
|
|
* Since we don't support reads and writes to different
|
|
* Since we don't support reads and writes to different
|
|
* addresses, and we do have the proper page loaded for
|
|
* addresses, and we do have the proper page loaded for
|
|
- * write, this shouldn't ever return. But just in case,
|
|
|
|
- * handle via stop-the-world.
|
|
|
|
|
|
+ * write, this shouldn't ever return.
|
|
|
|
+ */
|
|
|
|
+ g_assert_not_reached();
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Enforce guest required alignment, if not handled by tlb_fill_align. */
|
|
|
|
+ if (!did_tlb_fill && (addr & ((1 << memop_alignment_bits(mop)) - 1))) {
|
|
|
|
+ cpu_unaligned_access(cpu, addr, MMU_DATA_STORE, mmu_idx, retaddr);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Enforce qemu required alignment. */
|
|
|
|
+ if (unlikely(addr & (size - 1))) {
|
|
|
|
+ /*
|
|
|
|
+ * We get here if guest alignment was not requested, or was not
|
|
|
|
+ * enforced by cpu_unaligned_access or tlb_fill_align above.
|
|
|
|
+ * We might widen the access and emulate, but for now
|
|
|
|
+ * mark an exception and exit the cpu loop.
|
|
*/
|
|
*/
|
|
goto stop_the_world;
|
|
goto stop_the_world;
|
|
}
|
|
}
|
|
|
|
+
|
|
/* Collect tlb flags for read. */
|
|
/* Collect tlb flags for read. */
|
|
tlb_addr |= tlbe->addr_read;
|
|
tlb_addr |= tlbe->addr_read;
|
|
|
|
|