|
@@ -2192,6 +2192,64 @@ uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
|
|
return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
|
|
return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
|
+{
|
|
|
|
+ MemOp mop = get_memop(oi);
|
|
|
|
+ int mmu_idx = get_mmuidx(oi);
|
|
|
|
+ MemOpIdx new_oi;
|
|
|
|
+ unsigned a_bits;
|
|
|
|
+ uint64_t h, l;
|
|
|
|
+
|
|
|
|
+ tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128));
|
|
|
|
+ a_bits = get_alignment_bits(mop);
|
|
|
|
+
|
|
|
|
+ /* Handle CPU specific unaligned behaviour */
|
|
|
|
+ if (addr & ((1 << a_bits) - 1)) {
|
|
|
|
+ cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD,
|
|
|
|
+ mmu_idx, ra);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Construct an unaligned 64-bit replacement MemOpIdx. */
|
|
|
|
+ mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
|
|
|
|
+ new_oi = make_memop_idx(mop, mmu_idx);
|
|
|
|
+
|
|
|
|
+ h = helper_be_ldq_mmu(env, addr, new_oi, ra);
|
|
|
|
+ l = helper_be_ldq_mmu(env, addr + 8, new_oi, ra);
|
|
|
|
+
|
|
|
|
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
|
|
|
+ return int128_make128(l, h);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
|
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
|
+{
|
|
|
|
+ MemOp mop = get_memop(oi);
|
|
|
|
+ int mmu_idx = get_mmuidx(oi);
|
|
|
|
+ MemOpIdx new_oi;
|
|
|
|
+ unsigned a_bits;
|
|
|
|
+ uint64_t h, l;
|
|
|
|
+
|
|
|
|
+ tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128));
|
|
|
|
+ a_bits = get_alignment_bits(mop);
|
|
|
|
+
|
|
|
|
+ /* Handle CPU specific unaligned behaviour */
|
|
|
|
+ if (addr & ((1 << a_bits) - 1)) {
|
|
|
|
+ cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_LOAD,
|
|
|
|
+ mmu_idx, ra);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Construct an unaligned 64-bit replacement MemOpIdx. */
|
|
|
|
+ mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
|
|
|
|
+ new_oi = make_memop_idx(mop, mmu_idx);
|
|
|
|
+
|
|
|
|
+ l = helper_le_ldq_mmu(env, addr, new_oi, ra);
|
|
|
|
+ h = helper_le_ldq_mmu(env, addr + 8, new_oi, ra);
|
|
|
|
+
|
|
|
|
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
|
|
|
|
+ return int128_make128(l, h);
|
|
|
|
+}
|
|
|
|
+
|
|
/*
|
|
/*
|
|
* Store Helpers
|
|
* Store Helpers
|
|
*/
|
|
*/
|
|
@@ -2546,6 +2604,60 @@ void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
|
|
cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
|
|
cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
|
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
|
+{
|
|
|
|
+ MemOp mop = get_memop(oi);
|
|
|
|
+ int mmu_idx = get_mmuidx(oi);
|
|
|
|
+ MemOpIdx new_oi;
|
|
|
|
+ unsigned a_bits;
|
|
|
|
+
|
|
|
|
+ tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_BE|MO_128));
|
|
|
|
+ a_bits = get_alignment_bits(mop);
|
|
|
|
+
|
|
|
|
+ /* Handle CPU specific unaligned behaviour */
|
|
|
|
+ if (addr & ((1 << a_bits) - 1)) {
|
|
|
|
+ cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
|
|
|
|
+ mmu_idx, ra);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Construct an unaligned 64-bit replacement MemOpIdx. */
|
|
|
|
+ mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
|
|
|
|
+ new_oi = make_memop_idx(mop, mmu_idx);
|
|
|
|
+
|
|
|
|
+ helper_be_stq_mmu(env, addr, int128_gethi(val), new_oi, ra);
|
|
|
|
+ helper_be_stq_mmu(env, addr + 8, int128_getlo(val), new_oi, ra);
|
|
|
|
+
|
|
|
|
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
|
|
|
|
+ MemOpIdx oi, uintptr_t ra)
|
|
|
|
+{
|
|
|
|
+ MemOp mop = get_memop(oi);
|
|
|
|
+ int mmu_idx = get_mmuidx(oi);
|
|
|
|
+ MemOpIdx new_oi;
|
|
|
|
+ unsigned a_bits;
|
|
|
|
+
|
|
|
|
+ tcg_debug_assert((mop & (MO_BSWAP|MO_SSIZE)) == (MO_LE|MO_128));
|
|
|
|
+ a_bits = get_alignment_bits(mop);
|
|
|
|
+
|
|
|
|
+ /* Handle CPU specific unaligned behaviour */
|
|
|
|
+ if (addr & ((1 << a_bits) - 1)) {
|
|
|
|
+ cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
|
|
|
|
+ mmu_idx, ra);
|
|
|
|
+ }
|
|
|
|
+
|
|
|
|
+ /* Construct an unaligned 64-bit replacement MemOpIdx. */
|
|
|
|
+ mop = (mop & ~(MO_SIZE | MO_AMASK)) | MO_64 | MO_UNALN;
|
|
|
|
+ new_oi = make_memop_idx(mop, mmu_idx);
|
|
|
|
+
|
|
|
|
+ helper_le_stq_mmu(env, addr, int128_getlo(val), new_oi, ra);
|
|
|
|
+ helper_le_stq_mmu(env, addr + 8, int128_gethi(val), new_oi, ra);
|
|
|
|
+
|
|
|
|
+ qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
|
|
|
|
+}
|
|
|
|
+
|
|
#include "ldst_common.c.inc"
|
|
#include "ldst_common.c.inc"
|
|
|
|
|
|
/*
|
|
/*
|