|
@@ -24,17 +24,27 @@
|
|
|
#include "qemu/bitops.h"
|
|
|
#include "qemu/rcu.h"
|
|
|
#include "exec/cpu_ldst.h"
|
|
|
+#include "qemu/main-loop.h"
|
|
|
#include "exec/translate-all.h"
|
|
|
+#include "exec/page-protection.h"
|
|
|
#include "exec/helper-proto.h"
|
|
|
#include "qemu/atomic128.h"
|
|
|
#include "trace/trace-root.h"
|
|
|
#include "tcg/tcg-ldst.h"
|
|
|
-#include "internal.h"
|
|
|
+#include "internal-common.h"
|
|
|
+#include "internal-target.h"
|
|
|
|
|
|
__thread uintptr_t helper_retaddr;
|
|
|
|
|
|
//#define DEBUG_SIGNAL
|
|
|
|
|
|
+void cpu_interrupt(CPUState *cpu, int mask)
|
|
|
+{
|
|
|
+ g_assert(bql_locked());
|
|
|
+ cpu->interrupt_request |= mask;
|
|
|
+ qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* Adjust the pc to pass to cpu_restore_state; return the memop type.
|
|
|
*/
|
|
@@ -144,7 +154,7 @@ typedef struct PageFlagsNode {
|
|
|
|
|
|
static IntervalTreeRoot pageflags_root;
|
|
|
|
|
|
-static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
|
|
|
+static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
|
|
|
{
|
|
|
IntervalTreeNode *n;
|
|
|
|
|
@@ -153,7 +163,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
|
|
|
}
|
|
|
|
|
|
static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
|
|
|
- target_long last)
|
|
|
+ target_ulong last)
|
|
|
{
|
|
|
IntervalTreeNode *n;
|
|
|
|
|
@@ -520,19 +530,19 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-int page_check_range(target_ulong start, target_ulong len, int flags)
|
|
|
+bool page_check_range(target_ulong start, target_ulong len, int flags)
|
|
|
{
|
|
|
target_ulong last;
|
|
|
int locked; /* tri-state: =0: unlocked, +1: global, -1: local */
|
|
|
- int ret;
|
|
|
+ bool ret;
|
|
|
|
|
|
if (len == 0) {
|
|
|
- return 0; /* trivial length */
|
|
|
+ return true; /* trivial length */
|
|
|
}
|
|
|
|
|
|
last = start + len - 1;
|
|
|
if (last < start) {
|
|
|
- return -1; /* wrap around */
|
|
|
+ return false; /* wrap around */
|
|
|
}
|
|
|
|
|
|
locked = have_mmap_lock();
|
|
@@ -551,33 +561,33 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
|
|
|
p = pageflags_find(start, last);
|
|
|
}
|
|
|
if (!p) {
|
|
|
- ret = -1; /* entire region invalid */
|
|
|
+ ret = false; /* entire region invalid */
|
|
|
break;
|
|
|
}
|
|
|
}
|
|
|
if (start < p->itree.start) {
|
|
|
- ret = -1; /* initial bytes invalid */
|
|
|
+ ret = false; /* initial bytes invalid */
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
missing = flags & ~p->flags;
|
|
|
- if (missing & PAGE_READ) {
|
|
|
- ret = -1; /* page not readable */
|
|
|
+ if (missing & ~PAGE_WRITE) {
|
|
|
+ ret = false; /* page doesn't match */
|
|
|
break;
|
|
|
}
|
|
|
if (missing & PAGE_WRITE) {
|
|
|
if (!(p->flags & PAGE_WRITE_ORG)) {
|
|
|
- ret = -1; /* page not writable */
|
|
|
+ ret = false; /* page not writable */
|
|
|
break;
|
|
|
}
|
|
|
/* Asking about writable, but has been protected: undo. */
|
|
|
if (!page_unprotect(start, 0)) {
|
|
|
- ret = -1;
|
|
|
+ ret = false;
|
|
|
break;
|
|
|
}
|
|
|
/* TODO: page_unprotect should take a range, not a single page. */
|
|
|
if (last - start < TARGET_PAGE_SIZE) {
|
|
|
- ret = 0; /* ok */
|
|
|
+ ret = true; /* ok */
|
|
|
break;
|
|
|
}
|
|
|
start += TARGET_PAGE_SIZE;
|
|
@@ -585,7 +595,7 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
|
|
|
}
|
|
|
|
|
|
if (last <= p->itree.last) {
|
|
|
- ret = 0; /* ok */
|
|
|
+ ret = true; /* ok */
|
|
|
break;
|
|
|
}
|
|
|
start = p->itree.last + 1;
|
|
@@ -598,20 +608,69 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+bool page_check_range_empty(target_ulong start, target_ulong last)
|
|
|
+{
|
|
|
+ assert(last >= start);
|
|
|
+ assert_memory_lock();
|
|
|
+ return pageflags_find(start, last) == NULL;
|
|
|
+}
|
|
|
+
|
|
|
+target_ulong page_find_range_empty(target_ulong min, target_ulong max,
|
|
|
+ target_ulong len, target_ulong align)
|
|
|
+{
|
|
|
+ target_ulong len_m1, align_m1;
|
|
|
+
|
|
|
+ assert(min <= max);
|
|
|
+ assert(max <= GUEST_ADDR_MAX);
|
|
|
+ assert(len != 0);
|
|
|
+ assert(is_power_of_2(align));
|
|
|
+ assert_memory_lock();
|
|
|
+
|
|
|
+ len_m1 = len - 1;
|
|
|
+ align_m1 = align - 1;
|
|
|
+
|
|
|
+ /* Iteratively narrow the search region. */
|
|
|
+ while (1) {
|
|
|
+ PageFlagsNode *p;
|
|
|
+
|
|
|
+ /* Align min and double-check there's enough space remaining. */
|
|
|
+ min = (min + align_m1) & ~align_m1;
|
|
|
+ if (min > max) {
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ if (len_m1 > max - min) {
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+
|
|
|
+ p = pageflags_find(min, min + len_m1);
|
|
|
+ if (p == NULL) {
|
|
|
+ /* Found! */
|
|
|
+ return min;
|
|
|
+ }
|
|
|
+ if (max <= p->itree.last) {
|
|
|
+ /* Existing allocation fills the remainder of the search region. */
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ /* Skip across existing allocation. */
|
|
|
+ min = p->itree.last + 1;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
void page_protect(tb_page_addr_t address)
|
|
|
{
|
|
|
PageFlagsNode *p;
|
|
|
target_ulong start, last;
|
|
|
+ int host_page_size = qemu_real_host_page_size();
|
|
|
int prot;
|
|
|
|
|
|
assert_memory_lock();
|
|
|
|
|
|
- if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
|
|
|
+ if (host_page_size <= TARGET_PAGE_SIZE) {
|
|
|
start = address & TARGET_PAGE_MASK;
|
|
|
last = start + TARGET_PAGE_SIZE - 1;
|
|
|
} else {
|
|
|
- start = address & qemu_host_page_mask;
|
|
|
- last = start + qemu_host_page_size - 1;
|
|
|
+ start = address & -host_page_size;
|
|
|
+ last = start + host_page_size - 1;
|
|
|
}
|
|
|
|
|
|
p = pageflags_find(start, last);
|
|
@@ -622,7 +681,7 @@ void page_protect(tb_page_addr_t address)
|
|
|
|
|
|
if (unlikely(p->itree.last < last)) {
|
|
|
/* More than one protection region covers the one host page. */
|
|
|
- assert(TARGET_PAGE_SIZE < qemu_host_page_size);
|
|
|
+ assert(TARGET_PAGE_SIZE < host_page_size);
|
|
|
while ((p = pageflags_next(p, start, last)) != NULL) {
|
|
|
prot |= p->flags;
|
|
|
}
|
|
@@ -630,7 +689,7 @@ void page_protect(tb_page_addr_t address)
|
|
|
|
|
|
if (prot & PAGE_WRITE) {
|
|
|
pageflags_set_clear(start, last, 0, PAGE_WRITE);
|
|
|
- mprotect(g2h_untagged(start), qemu_host_page_size,
|
|
|
+ mprotect(g2h_untagged(start), last - start + 1,
|
|
|
prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
|
|
|
}
|
|
|
}
|
|
@@ -676,18 +735,19 @@ int page_unprotect(target_ulong address, uintptr_t pc)
|
|
|
}
|
|
|
#endif
|
|
|
} else {
|
|
|
+ int host_page_size = qemu_real_host_page_size();
|
|
|
target_ulong start, len, i;
|
|
|
int prot;
|
|
|
|
|
|
- if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
|
|
|
+ if (host_page_size <= TARGET_PAGE_SIZE) {
|
|
|
start = address & TARGET_PAGE_MASK;
|
|
|
len = TARGET_PAGE_SIZE;
|
|
|
prot = p->flags | PAGE_WRITE;
|
|
|
pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
|
|
|
current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
|
|
|
} else {
|
|
|
- start = address & qemu_host_page_mask;
|
|
|
- len = qemu_host_page_size;
|
|
|
+ start = address & -host_page_size;
|
|
|
+ len = host_page_size;
|
|
|
prot = 0;
|
|
|
|
|
|
for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
|
|
@@ -713,7 +773,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
|
|
|
if (prot & PAGE_EXEC) {
|
|
|
prot = (prot & ~PAGE_EXEC) | PAGE_READ;
|
|
|
}
|
|
|
- mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS);
|
|
|
+ mprotect((void *)g2h_untagged(start), len, prot & PAGE_RWX);
|
|
|
}
|
|
|
mmap_unlock();
|
|
|
|
|
@@ -721,7 +781,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
|
|
|
return current_tb_invalidated ? 2 : 1;
|
|
|
}
|
|
|
|
|
|
-static int probe_access_internal(CPUArchState *env, target_ulong addr,
|
|
|
+static int probe_access_internal(CPUArchState *env, vaddr addr,
|
|
|
int fault_size, MMUAccessType access_type,
|
|
|
bool nonfault, uintptr_t ra)
|
|
|
{
|
|
@@ -745,6 +805,10 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
|
|
|
if (guest_addr_valid_untagged(addr)) {
|
|
|
int page_flags = page_get_flags(addr);
|
|
|
if (page_flags & acc_flag) {
|
|
|
+ if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE)
|
|
|
+ && cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
|
|
|
+ return TLB_MMIO;
|
|
|
+ }
|
|
|
return 0; /* success */
|
|
|
}
|
|
|
maperr = !(page_flags & PAGE_VALID);
|
|
@@ -759,7 +823,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
|
|
|
cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
|
|
|
}
|
|
|
|
|
|
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
|
|
|
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
|
|
|
MMUAccessType access_type, int mmu_idx,
|
|
|
bool nonfault, void **phost, uintptr_t ra)
|
|
|
{
|
|
@@ -767,23 +831,23 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
|
|
|
|
|
|
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
|
|
|
flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
|
|
|
- *phost = flags ? NULL : g2h(env_cpu(env), addr);
|
|
|
+ *phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr);
|
|
|
return flags;
|
|
|
}
|
|
|
|
|
|
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
|
|
|
+void *probe_access(CPUArchState *env, vaddr addr, int size,
|
|
|
MMUAccessType access_type, int mmu_idx, uintptr_t ra)
|
|
|
{
|
|
|
int flags;
|
|
|
|
|
|
g_assert(-(addr | TARGET_PAGE_MASK) >= size);
|
|
|
flags = probe_access_internal(env, addr, size, access_type, false, ra);
|
|
|
- g_assert(flags == 0);
|
|
|
+ g_assert((flags & ~TLB_MMIO) == 0);
|
|
|
|
|
|
return size ? g2h(env_cpu(env), addr) : NULL;
|
|
|
}
|
|
|
|
|
|
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
|
|
|
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
|
|
|
void **hostp)
|
|
|
{
|
|
|
int flags;
|
|
@@ -809,7 +873,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
|
|
|
typedef struct TargetPageDataNode {
|
|
|
struct rcu_head rcu;
|
|
|
IntervalTreeNode itree;
|
|
|
- char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
|
|
|
+ char data[] __attribute__((aligned));
|
|
|
} TargetPageDataNode;
|
|
|
|
|
|
static IntervalTreeRoot targetdata_root;
|
|
@@ -847,7 +911,8 @@ void page_reset_target_data(target_ulong start, target_ulong last)
|
|
|
n_last = MIN(last, n->last);
|
|
|
p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
|
|
|
|
|
|
- memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
|
|
|
+ memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0,
|
|
|
+ p_len * TARGET_PAGE_DATA_SIZE);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -855,7 +920,7 @@ void *page_get_target_data(target_ulong address)
|
|
|
{
|
|
|
IntervalTreeNode *n;
|
|
|
TargetPageDataNode *t;
|
|
|
- target_ulong page, region;
|
|
|
+ target_ulong page, region, p_ofs;
|
|
|
|
|
|
page = address & TARGET_PAGE_MASK;
|
|
|
region = address & TBD_MASK;
|
|
@@ -871,7 +936,8 @@ void *page_get_target_data(target_ulong address)
|
|
|
mmap_lock();
|
|
|
n = interval_tree_iter_first(&targetdata_root, page, page);
|
|
|
if (!n) {
|
|
|
- t = g_new0(TargetPageDataNode, 1);
|
|
|
+ t = g_malloc0(sizeof(TargetPageDataNode)
|
|
|
+ + TPD_PAGES * TARGET_PAGE_DATA_SIZE);
|
|
|
n = &t->itree;
|
|
|
n->start = region;
|
|
|
n->last = region | ~TBD_MASK;
|
|
@@ -881,302 +947,192 @@ void *page_get_target_data(target_ulong address)
|
|
|
}
|
|
|
|
|
|
t = container_of(n, TargetPageDataNode, itree);
|
|
|
- return t->data[(page - region) >> TARGET_PAGE_BITS];
|
|
|
+ p_ofs = (page - region) >> TARGET_PAGE_BITS;
|
|
|
+ return t->data + p_ofs * TARGET_PAGE_DATA_SIZE;
|
|
|
}
|
|
|
#else
|
|
|
void page_reset_target_data(target_ulong start, target_ulong last) { }
|
|
|
#endif /* TARGET_PAGE_DATA_SIZE */
|
|
|
|
|
|
-/* The softmmu versions of these helpers are in cputlb.c. */
|
|
|
-
|
|
|
-/*
|
|
|
- * Verify that we have passed the correct MemOp to the correct function.
|
|
|
- *
|
|
|
- * We could present one function to target code, and dispatch based on
|
|
|
- * the MemOp, but so far we have worked hard to avoid an indirect function
|
|
|
- * call along the memory path.
|
|
|
- */
|
|
|
-static void validate_memop(MemOpIdx oi, MemOp expected)
|
|
|
-{
|
|
|
-#ifdef CONFIG_DEBUG_TCG
|
|
|
- MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
|
|
|
- assert(have == expected);
|
|
|
-#endif
|
|
|
-}
|
|
|
+/* The system-mode versions of these helpers are in cputlb.c. */
|
|
|
|
|
|
-void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
|
|
|
+static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
|
|
|
+ MemOp mop, uintptr_t ra, MMUAccessType type)
|
|
|
{
|
|
|
- cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
|
|
|
-}
|
|
|
-
|
|
|
-void helper_unaligned_st(CPUArchState *env, target_ulong addr)
|
|
|
-{
|
|
|
- cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
|
|
|
-}
|
|
|
-
|
|
|
-static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|
|
- MemOpIdx oi, uintptr_t ra, MMUAccessType type)
|
|
|
-{
|
|
|
- MemOp mop = get_memop(oi);
|
|
|
int a_bits = get_alignment_bits(mop);
|
|
|
void *ret;
|
|
|
|
|
|
/* Enforce guest required alignment. */
|
|
|
if (unlikely(addr & ((1 << a_bits) - 1))) {
|
|
|
- cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
|
|
|
+ cpu_loop_exit_sigbus(cpu, addr, type, ra);
|
|
|
}
|
|
|
|
|
|
- ret = g2h(env_cpu(env), addr);
|
|
|
+ ret = g2h(cpu, addr);
|
|
|
set_helper_retaddr(ra);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
+#include "ldst_atomicity.c.inc"
|
|
|
+
|
|
|
+static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
|
+ uintptr_t ra, MMUAccessType access_type)
|
|
|
{
|
|
|
void *haddr;
|
|
|
uint8_t ret;
|
|
|
|
|
|
- validate_memop(oi, MO_UB);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
|
|
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
|
|
+ haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type);
|
|
|
ret = ldub_p(haddr);
|
|
|
clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
+static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
|
+ uintptr_t ra, MMUAccessType access_type)
|
|
|
{
|
|
|
void *haddr;
|
|
|
uint16_t ret;
|
|
|
+ MemOp mop = get_memop(oi);
|
|
|
|
|
|
- validate_memop(oi, MO_BEUW);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
|
|
- ret = lduw_be_p(haddr);
|
|
|
- clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
-{
|
|
|
- void *haddr;
|
|
|
- uint32_t ret;
|
|
|
-
|
|
|
- validate_memop(oi, MO_BEUL);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
|
|
- ret = ldl_be_p(haddr);
|
|
|
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
|
|
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
|
|
|
+ ret = load_atom_2(cpu, ra, haddr, mop);
|
|
|
clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
-{
|
|
|
- void *haddr;
|
|
|
- uint64_t ret;
|
|
|
|
|
|
- validate_memop(oi, MO_BEUQ);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
|
|
- ret = ldq_be_p(haddr);
|
|
|
- clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
|
|
- return ret;
|
|
|
-}
|
|
|
-
|
|
|
-uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
-{
|
|
|
- void *haddr;
|
|
|
- uint16_t ret;
|
|
|
-
|
|
|
- validate_memop(oi, MO_LEUW);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
|
|
- ret = lduw_le_p(haddr);
|
|
|
- clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
|
|
+ if (mop & MO_BSWAP) {
|
|
|
+ ret = bswap16(ret);
|
|
|
+ }
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
+static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
|
+ uintptr_t ra, MMUAccessType access_type)
|
|
|
{
|
|
|
void *haddr;
|
|
|
uint32_t ret;
|
|
|
+ MemOp mop = get_memop(oi);
|
|
|
|
|
|
- validate_memop(oi, MO_LEUL);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
|
|
- ret = ldl_le_p(haddr);
|
|
|
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
|
|
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
|
|
|
+ ret = load_atom_4(cpu, ra, haddr, mop);
|
|
|
clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
|
|
- return ret;
|
|
|
-}
|
|
|
|
|
|
-uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
-{
|
|
|
- void *haddr;
|
|
|
- uint64_t ret;
|
|
|
-
|
|
|
- validate_memop(oi, MO_LEUQ);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
|
|
- ret = ldq_le_p(haddr);
|
|
|
- clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
|
|
+ if (mop & MO_BSWAP) {
|
|
|
+ ret = bswap32(ret);
|
|
|
+ }
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
+static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
|
+ uintptr_t ra, MMUAccessType access_type)
|
|
|
{
|
|
|
void *haddr;
|
|
|
- Int128 ret;
|
|
|
+ uint64_t ret;
|
|
|
+ MemOp mop = get_memop(oi);
|
|
|
|
|
|
- validate_memop(oi, MO_128 | MO_BE);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
|
|
- memcpy(&ret, haddr, 16);
|
|
|
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
|
|
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
|
|
|
+ ret = load_atom_8(cpu, ra, haddr, mop);
|
|
|
clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
|
|
|
|
|
- if (!HOST_BIG_ENDIAN) {
|
|
|
- ret = bswap128(ret);
|
|
|
+ if (mop & MO_BSWAP) {
|
|
|
+ ret = bswap64(ret);
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
+static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
{
|
|
|
void *haddr;
|
|
|
Int128 ret;
|
|
|
+ MemOp mop = get_memop(oi);
|
|
|
|
|
|
- validate_memop(oi, MO_128 | MO_LE);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
|
|
|
- memcpy(&ret, haddr, 16);
|
|
|
+ tcg_debug_assert((mop & MO_SIZE) == MO_128);
|
|
|
+ cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
|
|
|
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD);
|
|
|
+ ret = load_atom_16(cpu, ra, haddr, mop);
|
|
|
clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
|
|
|
|
|
- if (HOST_BIG_ENDIAN) {
|
|
|
+ if (mop & MO_BSWAP) {
|
|
|
ret = bswap128(ret);
|
|
|
}
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
-void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
+static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
{
|
|
|
void *haddr;
|
|
|
|
|
|
- validate_memop(oi, MO_UB);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
|
|
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
|
|
+ haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE);
|
|
|
stb_p(haddr, val);
|
|
|
clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
|
|
-}
|
|
|
-
|
|
|
-void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
-{
|
|
|
- void *haddr;
|
|
|
-
|
|
|
- validate_memop(oi, MO_BEUW);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
|
|
- stw_be_p(haddr, val);
|
|
|
- clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
|
|
}
|
|
|
|
|
|
-void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
+static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
{
|
|
|
void *haddr;
|
|
|
+ MemOp mop = get_memop(oi);
|
|
|
|
|
|
- validate_memop(oi, MO_BEUL);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
|
|
- stl_be_p(haddr, val);
|
|
|
- clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
|
|
-}
|
|
|
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
|
|
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
|
|
|
|
|
-void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
-{
|
|
|
- void *haddr;
|
|
|
-
|
|
|
- validate_memop(oi, MO_BEUQ);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
|
|
- stq_be_p(haddr, val);
|
|
|
+ if (mop & MO_BSWAP) {
|
|
|
+ val = bswap16(val);
|
|
|
+ }
|
|
|
+ store_atom_2(cpu, ra, haddr, mop, val);
|
|
|
clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
|
|
}
|
|
|
|
|
|
-void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
+static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
{
|
|
|
void *haddr;
|
|
|
+ MemOp mop = get_memop(oi);
|
|
|
|
|
|
- validate_memop(oi, MO_LEUW);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
|
|
- stw_le_p(haddr, val);
|
|
|
- clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
|
|
-}
|
|
|
-
|
|
|
-void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
-{
|
|
|
- void *haddr;
|
|
|
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
|
|
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
|
|
|
|
|
- validate_memop(oi, MO_LEUL);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
|
|
- stl_le_p(haddr, val);
|
|
|
+ if (mop & MO_BSWAP) {
|
|
|
+ val = bswap32(val);
|
|
|
+ }
|
|
|
+ store_atom_4(cpu, ra, haddr, mop, val);
|
|
|
clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
|
|
}
|
|
|
|
|
|
-void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
|
|
|
- MemOpIdx oi, uintptr_t ra)
|
|
|
+static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
{
|
|
|
void *haddr;
|
|
|
+ MemOp mop = get_memop(oi);
|
|
|
|
|
|
- validate_memop(oi, MO_LEUQ);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
|
|
- stq_le_p(haddr, val);
|
|
|
- clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
|
|
-}
|
|
|
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
|
|
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
|
|
|
|
|
-void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
- Int128 val, MemOpIdx oi, uintptr_t ra)
|
|
|
-{
|
|
|
- void *haddr;
|
|
|
-
|
|
|
- validate_memop(oi, MO_128 | MO_BE);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
|
|
- if (!HOST_BIG_ENDIAN) {
|
|
|
- val = bswap128(val);
|
|
|
+ if (mop & MO_BSWAP) {
|
|
|
+ val = bswap64(val);
|
|
|
}
|
|
|
- memcpy(haddr, &val, 16);
|
|
|
+ store_atom_8(cpu, ra, haddr, mop, val);
|
|
|
clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
|
|
}
|
|
|
|
|
|
-void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
- Int128 val, MemOpIdx oi, uintptr_t ra)
|
|
|
+static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
{
|
|
|
void *haddr;
|
|
|
+ MemOpIdx mop = get_memop(oi);
|
|
|
|
|
|
- validate_memop(oi, MO_128 | MO_LE);
|
|
|
- haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
|
|
|
- if (HOST_BIG_ENDIAN) {
|
|
|
+ cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
|
|
|
+ haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
|
|
|
+
|
|
|
+ if (mop & MO_BSWAP) {
|
|
|
val = bswap128(val);
|
|
|
}
|
|
|
- memcpy(haddr, &val, 16);
|
|
|
+ store_atom_16(cpu, ra, haddr, mop, val);
|
|
|
clear_helper_retaddr();
|
|
|
- qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
|
|
}
|
|
|
|
|
|
uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
|
|
@@ -1219,16 +1175,70 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
+{
|
|
|
+ void *haddr;
|
|
|
+ uint8_t ret;
|
|
|
+
|
|
|
+ haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
|
|
|
+ ret = ldub_p(haddr);
|
|
|
+ clear_helper_retaddr();
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
+{
|
|
|
+ void *haddr;
|
|
|
+ uint16_t ret;
|
|
|
+
|
|
|
+ haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
|
|
|
+ ret = lduw_p(haddr);
|
|
|
+ clear_helper_retaddr();
|
|
|
+ if (get_memop(oi) & MO_BSWAP) {
|
|
|
+ ret = bswap16(ret);
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
+{
|
|
|
+ void *haddr;
|
|
|
+ uint32_t ret;
|
|
|
+
|
|
|
+ haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
|
|
|
+ ret = ldl_p(haddr);
|
|
|
+ clear_helper_retaddr();
|
|
|
+ if (get_memop(oi) & MO_BSWAP) {
|
|
|
+ ret = bswap32(ret);
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
+{
|
|
|
+ void *haddr;
|
|
|
+ uint64_t ret;
|
|
|
+
|
|
|
+ haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
|
|
+ ret = ldq_p(haddr);
|
|
|
+ clear_helper_retaddr();
|
|
|
+ if (get_memop(oi) & MO_BSWAP) {
|
|
|
+ ret = bswap64(ret);
|
|
|
+ }
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
#include "ldst_common.c.inc"
|
|
|
|
|
|
/*
|
|
|
* Do not allow unaligned operations to proceed. Return the host address.
|
|
|
- *
|
|
|
- * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
|
|
|
*/
|
|
|
-static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|
|
- MemOpIdx oi, int size, int prot,
|
|
|
- uintptr_t retaddr)
|
|
|
+static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
|
|
|
+ int size, uintptr_t retaddr)
|
|
|
{
|
|
|
MemOp mop = get_memop(oi);
|
|
|
int a_bits = get_alignment_bits(mop);
|
|
@@ -1236,16 +1246,15 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|
|
|
|
|
/* Enforce guest required alignment. */
|
|
|
if (unlikely(addr & ((1 << a_bits) - 1))) {
|
|
|
- MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
|
|
|
- cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
|
|
|
+ cpu_loop_exit_sigbus(cpu, addr, MMU_DATA_STORE, retaddr);
|
|
|
}
|
|
|
|
|
|
/* Enforce qemu required alignment. */
|
|
|
if (unlikely(addr & (size - 1))) {
|
|
|
- cpu_loop_exit_atomic(env_cpu(env), retaddr);
|
|
|
+ cpu_loop_exit_atomic(cpu, retaddr);
|
|
|
}
|
|
|
|
|
|
- ret = g2h(env_cpu(env), addr);
|
|
|
+ ret = g2h(cpu, addr);
|
|
|
set_helper_retaddr(retaddr);
|
|
|
return ret;
|
|
|
}
|
|
@@ -1275,7 +1284,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
|
|
|
#include "atomic_template.h"
|
|
|
#endif
|
|
|
|
|
|
-#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
|
|
|
+#if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
|
|
|
#define DATA_SIZE 16
|
|
|
#include "atomic_template.h"
|
|
|
#endif
|