|
@@ -16,10 +16,8 @@
|
|
|
|
|
|
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
|
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
- bool s1_is_el0, hwaddr *phys_ptr,
|
|
|
- MemTxAttrs *txattrs, int *prot,
|
|
|
- target_ulong *page_size_ptr,
|
|
|
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
|
|
+ bool s1_is_el0, GetPhysAddrResult *result,
|
|
|
+ ARMMMUFaultInfo *fi)
|
|
|
__attribute__((nonnull));
|
|
|
|
|
|
/* This mapping is common between ID_AA64MMFR0.PARANGE and TCR_ELx.{I}PS. */
|
|
@@ -204,18 +202,13 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|
|
{
|
|
|
if (arm_mmu_idx_is_stage1_of_2(mmu_idx) &&
|
|
|
!regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
|
|
|
- target_ulong s2size;
|
|
|
- hwaddr s2pa;
|
|
|
- int s2prot;
|
|
|
- int ret;
|
|
|
ARMMMUIdx s2_mmu_idx = *is_secure ? ARMMMUIdx_Stage2_S
|
|
|
: ARMMMUIdx_Stage2;
|
|
|
- ARMCacheAttrs cacheattrs = {};
|
|
|
- MemTxAttrs txattrs = {};
|
|
|
+ GetPhysAddrResult s2 = {};
|
|
|
+ int ret;
|
|
|
|
|
|
ret = get_phys_addr_lpae(env, addr, MMU_DATA_LOAD, s2_mmu_idx, false,
|
|
|
- &s2pa, &txattrs, &s2prot, &s2size, fi,
|
|
|
- &cacheattrs);
|
|
|
+ &s2, fi);
|
|
|
if (ret) {
|
|
|
assert(fi->type != ARMFault_None);
|
|
|
fi->s2addr = addr;
|
|
@@ -225,7 +218,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|
|
return ~0;
|
|
|
}
|
|
|
if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
|
|
|
- ptw_attrs_are_device(env, cacheattrs)) {
|
|
|
+ ptw_attrs_are_device(env, s2.cacheattrs)) {
|
|
|
/*
|
|
|
* PTW set and S1 walk touched S2 Device memory:
|
|
|
* generate Permission fault.
|
|
@@ -249,7 +242,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|
|
assert(!*is_secure);
|
|
|
}
|
|
|
|
|
|
- addr = s2pa;
|
|
|
+ addr = s2.phys;
|
|
|
}
|
|
|
return addr;
|
|
|
}
|
|
@@ -421,8 +414,7 @@ static int simple_ap_to_rw_prot(CPUARMState *env, ARMMMUIdx mmu_idx, int ap)
|
|
|
|
|
|
static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
- hwaddr *phys_ptr, int *prot,
|
|
|
- target_ulong *page_size,
|
|
|
+ bool is_secure, GetPhysAddrResult *result,
|
|
|
ARMMMUFaultInfo *fi)
|
|
|
{
|
|
|
int level = 1;
|
|
@@ -442,8 +434,7 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|
|
fi->type = ARMFault_Translation;
|
|
|
goto do_fault;
|
|
|
}
|
|
|
- desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
|
|
|
- mmu_idx, fi);
|
|
|
+ desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
|
|
|
if (fi->type != ARMFault_None) {
|
|
|
goto do_fault;
|
|
|
}
|
|
@@ -471,7 +462,7 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|
|
/* 1Mb section. */
|
|
|
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
|
|
|
ap = (desc >> 10) & 3;
|
|
|
- *page_size = 1024 * 1024;
|
|
|
+ result->page_size = 1024 * 1024;
|
|
|
} else {
|
|
|
/* Lookup l2 entry. */
|
|
|
if (type == 1) {
|
|
@@ -481,8 +472,7 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|
|
/* Fine pagetable. */
|
|
|
table = (desc & 0xfffff000) | ((address >> 8) & 0xffc);
|
|
|
}
|
|
|
- desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
|
|
|
- mmu_idx, fi);
|
|
|
+ desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
|
|
|
if (fi->type != ARMFault_None) {
|
|
|
goto do_fault;
|
|
|
}
|
|
@@ -493,12 +483,12 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|
|
case 1: /* 64k page. */
|
|
|
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
|
|
|
ap = (desc >> (4 + ((address >> 13) & 6))) & 3;
|
|
|
- *page_size = 0x10000;
|
|
|
+ result->page_size = 0x10000;
|
|
|
break;
|
|
|
case 2: /* 4k page. */
|
|
|
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
|
|
|
ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
|
|
|
- *page_size = 0x1000;
|
|
|
+ result->page_size = 0x1000;
|
|
|
break;
|
|
|
case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
|
|
|
if (type == 1) {
|
|
@@ -506,7 +496,7 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|
|
if (arm_feature(env, ARM_FEATURE_XSCALE)
|
|
|
|| arm_feature(env, ARM_FEATURE_V6)) {
|
|
|
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
|
|
|
- *page_size = 0x1000;
|
|
|
+ result->page_size = 0x1000;
|
|
|
} else {
|
|
|
/*
|
|
|
* UNPREDICTABLE in ARMv5; we choose to take a
|
|
@@ -517,7 +507,7 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|
|
}
|
|
|
} else {
|
|
|
phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
|
|
|
- *page_size = 0x400;
|
|
|
+ result->page_size = 0x400;
|
|
|
}
|
|
|
ap = (desc >> 4) & 3;
|
|
|
break;
|
|
@@ -526,14 +516,14 @@ static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|
|
g_assert_not_reached();
|
|
|
}
|
|
|
}
|
|
|
- *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
|
|
|
- *prot |= *prot ? PAGE_EXEC : 0;
|
|
|
- if (!(*prot & (1 << access_type))) {
|
|
|
+ result->prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
|
|
|
+ result->prot |= result->prot ? PAGE_EXEC : 0;
|
|
|
+ if (!(result->prot & (1 << access_type))) {
|
|
|
/* Access permission fault. */
|
|
|
fi->type = ARMFault_Permission;
|
|
|
goto do_fault;
|
|
|
}
|
|
|
- *phys_ptr = phys_addr;
|
|
|
+ result->phys = phys_addr;
|
|
|
return false;
|
|
|
do_fault:
|
|
|
fi->domain = domain;
|
|
@@ -543,8 +533,8 @@ do_fault:
|
|
|
|
|
|
static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
|
|
- target_ulong *page_size, ARMMMUFaultInfo *fi)
|
|
|
+ bool is_secure, GetPhysAddrResult *result,
|
|
|
+ ARMMMUFaultInfo *fi)
|
|
|
{
|
|
|
ARMCPU *cpu = env_archcpu(env);
|
|
|
int level = 1;
|
|
@@ -567,8 +557,7 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|
|
fi->type = ARMFault_Translation;
|
|
|
goto do_fault;
|
|
|
}
|
|
|
- desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
|
|
|
- mmu_idx, fi);
|
|
|
+ desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
|
|
|
if (fi->type != ARMFault_None) {
|
|
|
goto do_fault;
|
|
|
}
|
|
@@ -604,11 +593,11 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|
|
phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
|
|
|
phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
|
|
|
phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
|
|
|
- *page_size = 0x1000000;
|
|
|
+ result->page_size = 0x1000000;
|
|
|
} else {
|
|
|
/* Section. */
|
|
|
phys_addr = (desc & 0xfff00000) | (address & 0x000fffff);
|
|
|
- *page_size = 0x100000;
|
|
|
+ result->page_size = 0x100000;
|
|
|
}
|
|
|
ap = ((desc >> 10) & 3) | ((desc >> 13) & 4);
|
|
|
xn = desc & (1 << 4);
|
|
@@ -621,8 +610,7 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|
|
ns = extract32(desc, 3, 1);
|
|
|
/* Lookup l2 entry. */
|
|
|
table = (desc & 0xfffffc00) | ((address >> 10) & 0x3fc);
|
|
|
- desc = arm_ldl_ptw(env, table, regime_is_secure(env, mmu_idx),
|
|
|
- mmu_idx, fi);
|
|
|
+ desc = arm_ldl_ptw(env, table, is_secure, mmu_idx, fi);
|
|
|
if (fi->type != ARMFault_None) {
|
|
|
goto do_fault;
|
|
|
}
|
|
@@ -634,12 +622,12 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|
|
case 1: /* 64k page. */
|
|
|
phys_addr = (desc & 0xffff0000) | (address & 0xffff);
|
|
|
xn = desc & (1 << 15);
|
|
|
- *page_size = 0x10000;
|
|
|
+ result->page_size = 0x10000;
|
|
|
break;
|
|
|
case 2: case 3: /* 4k page. */
|
|
|
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
|
|
|
xn = desc & 1;
|
|
|
- *page_size = 0x1000;
|
|
|
+ result->page_size = 0x1000;
|
|
|
break;
|
|
|
default:
|
|
|
/* Never happens, but compiler isn't smart enough to tell. */
|
|
@@ -647,7 +635,7 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|
|
}
|
|
|
}
|
|
|
if (domain_prot == 3) {
|
|
|
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
|
+ result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
|
} else {
|
|
|
if (pxn && !regime_is_user(env, mmu_idx)) {
|
|
|
xn = 1;
|
|
@@ -665,14 +653,14 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|
|
fi->type = ARMFault_AccessFlag;
|
|
|
goto do_fault;
|
|
|
}
|
|
|
- *prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
|
|
|
+ result->prot = simple_ap_to_rw_prot(env, mmu_idx, ap >> 1);
|
|
|
} else {
|
|
|
- *prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
|
|
|
+ result->prot = ap_to_rw_prot(env, mmu_idx, ap, domain_prot);
|
|
|
}
|
|
|
- if (*prot && !xn) {
|
|
|
- *prot |= PAGE_EXEC;
|
|
|
+ if (result->prot && !xn) {
|
|
|
+ result->prot |= PAGE_EXEC;
|
|
|
}
|
|
|
- if (!(*prot & (1 << access_type))) {
|
|
|
+ if (!(result->prot & (1 << access_type))) {
|
|
|
/* Access permission fault. */
|
|
|
fi->type = ARMFault_Permission;
|
|
|
goto do_fault;
|
|
@@ -683,9 +671,9 @@ static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|
|
* the CPU doesn't support TZ or this is a non-secure translation
|
|
|
* regime, because the attribute will already be non-secure.
|
|
|
*/
|
|
|
- attrs->secure = false;
|
|
|
+ result->attrs.secure = false;
|
|
|
}
|
|
|
- *phys_ptr = phys_addr;
|
|
|
+ result->phys = phys_addr;
|
|
|
return false;
|
|
|
do_fault:
|
|
|
fi->domain = domain;
|
|
@@ -972,19 +960,13 @@ static bool check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, int level,
|
|
|
* table walk), must be true if this is stage 2 of a stage 1+2
|
|
|
* walk for an EL0 access. If @mmu_idx is anything else,
|
|
|
* @s1_is_el0 is ignored.
|
|
|
- * @phys_ptr: set to the physical address corresponding to the virtual address
|
|
|
- * @attrs: set to the memory transaction attributes to use
|
|
|
- * @prot: set to the permissions for the page containing phys_ptr
|
|
|
- * @page_size_ptr: set to the size of the page containing phys_ptr
|
|
|
+ * @result: set on translation success,
|
|
|
* @fi: set to fault info if the translation fails
|
|
|
- * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
|
|
|
*/
|
|
|
static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
|
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
- bool s1_is_el0, hwaddr *phys_ptr,
|
|
|
- MemTxAttrs *txattrs, int *prot,
|
|
|
- target_ulong *page_size_ptr,
|
|
|
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
|
|
+ bool s1_is_el0, GetPhysAddrResult *result,
|
|
|
+ ARMMMUFaultInfo *fi)
|
|
|
{
|
|
|
ARMCPU *cpu = env_archcpu(env);
|
|
|
/* Read an LPAE long-descriptor translation table. */
|
|
@@ -1302,16 +1284,16 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
|
|
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
|
|
|
ns = mmu_idx == ARMMMUIdx_Stage2;
|
|
|
xn = extract32(attrs, 11, 2);
|
|
|
- *prot = get_S2prot(env, ap, xn, s1_is_el0);
|
|
|
+ result->prot = get_S2prot(env, ap, xn, s1_is_el0);
|
|
|
} else {
|
|
|
ns = extract32(attrs, 3, 1);
|
|
|
xn = extract32(attrs, 12, 1);
|
|
|
pxn = extract32(attrs, 11, 1);
|
|
|
- *prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
|
|
|
+ result->prot = get_S1prot(env, mmu_idx, aarch64, ap, ns, xn, pxn);
|
|
|
}
|
|
|
|
|
|
fault_type = ARMFault_Permission;
|
|
|
- if (!(*prot & (1 << access_type))) {
|
|
|
+ if (!(result->prot & (1 << access_type))) {
|
|
|
goto do_fault;
|
|
|
}
|
|
|
|
|
@@ -1321,23 +1303,23 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
|
|
* the CPU doesn't support TZ or this is a non-secure translation
|
|
|
* regime, because the attribute will already be non-secure.
|
|
|
*/
|
|
|
- txattrs->secure = false;
|
|
|
+ result->attrs.secure = false;
|
|
|
}
|
|
|
/* When in aarch64 mode, and BTI is enabled, remember GP in the IOTLB. */
|
|
|
if (aarch64 && guarded && cpu_isar_feature(aa64_bti, cpu)) {
|
|
|
- arm_tlb_bti_gp(txattrs) = true;
|
|
|
+ arm_tlb_bti_gp(&result->attrs) = true;
|
|
|
}
|
|
|
|
|
|
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
|
|
|
- cacheattrs->is_s2_format = true;
|
|
|
- cacheattrs->attrs = extract32(attrs, 0, 4);
|
|
|
+ result->cacheattrs.is_s2_format = true;
|
|
|
+ result->cacheattrs.attrs = extract32(attrs, 0, 4);
|
|
|
} else {
|
|
|
/* Index into MAIR registers for cache attributes */
|
|
|
uint8_t attrindx = extract32(attrs, 0, 3);
|
|
|
uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
|
|
|
assert(attrindx <= 7);
|
|
|
- cacheattrs->is_s2_format = false;
|
|
|
- cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
|
|
|
+ result->cacheattrs.is_s2_format = false;
|
|
|
+ result->cacheattrs.attrs = extract64(mair, attrindx * 8, 8);
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -1346,13 +1328,13 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
|
|
* that case comes from TCR_ELx, which we extracted earlier.
|
|
|
*/
|
|
|
if (param.ds) {
|
|
|
- cacheattrs->shareability = param.sh;
|
|
|
+ result->cacheattrs.shareability = param.sh;
|
|
|
} else {
|
|
|
- cacheattrs->shareability = extract32(attrs, 6, 2);
|
|
|
+ result->cacheattrs.shareability = extract32(attrs, 6, 2);
|
|
|
}
|
|
|
|
|
|
- *phys_ptr = descaddr;
|
|
|
- *page_size_ptr = page_size;
|
|
|
+ result->phys = descaddr;
|
|
|
+ result->page_size = page_size;
|
|
|
return false;
|
|
|
|
|
|
do_fault:
|
|
@@ -1367,7 +1349,7 @@ do_fault:
|
|
|
|
|
|
static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
- hwaddr *phys_ptr, int *prot,
|
|
|
+ bool is_secure, GetPhysAddrResult *result,
|
|
|
ARMMMUFaultInfo *fi)
|
|
|
{
|
|
|
int n;
|
|
@@ -1377,12 +1359,12 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|
|
|
|
|
if (regime_translation_disabled(env, mmu_idx)) {
|
|
|
/* MPU disabled. */
|
|
|
- *phys_ptr = address;
|
|
|
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
|
+ result->phys = address;
|
|
|
+ result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
|
return false;
|
|
|
}
|
|
|
|
|
|
- *phys_ptr = address;
|
|
|
+ result->phys = address;
|
|
|
for (n = 7; n >= 0; n--) {
|
|
|
base = env->cp15.c6_region[n];
|
|
|
if ((base & 1) == 0) {
|
|
@@ -1418,16 +1400,16 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|
|
fi->level = 1;
|
|
|
return true;
|
|
|
}
|
|
|
- *prot = PAGE_READ | PAGE_WRITE;
|
|
|
+ result->prot = PAGE_READ | PAGE_WRITE;
|
|
|
break;
|
|
|
case 2:
|
|
|
- *prot = PAGE_READ;
|
|
|
+ result->prot = PAGE_READ;
|
|
|
if (!is_user) {
|
|
|
- *prot |= PAGE_WRITE;
|
|
|
+ result->prot |= PAGE_WRITE;
|
|
|
}
|
|
|
break;
|
|
|
case 3:
|
|
|
- *prot = PAGE_READ | PAGE_WRITE;
|
|
|
+ result->prot = PAGE_READ | PAGE_WRITE;
|
|
|
break;
|
|
|
case 5:
|
|
|
if (is_user) {
|
|
@@ -1435,10 +1417,10 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|
|
fi->level = 1;
|
|
|
return true;
|
|
|
}
|
|
|
- *prot = PAGE_READ;
|
|
|
+ result->prot = PAGE_READ;
|
|
|
break;
|
|
|
case 6:
|
|
|
- *prot = PAGE_READ;
|
|
|
+ result->prot = PAGE_READ;
|
|
|
break;
|
|
|
default:
|
|
|
/* Bad permission. */
|
|
@@ -1446,7 +1428,7 @@ static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|
|
fi->level = 1;
|
|
|
return true;
|
|
|
}
|
|
|
- *prot |= PAGE_EXEC;
|
|
|
+ result->prot |= PAGE_EXEC;
|
|
|
return false;
|
|
|
}
|
|
|
|
|
@@ -1507,7 +1489,7 @@ static bool m_is_system_region(CPUARMState *env, uint32_t address)
|
|
|
}
|
|
|
|
|
|
static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
|
|
- bool is_user)
|
|
|
+ bool is_secure, bool is_user)
|
|
|
{
|
|
|
/*
|
|
|
* Return true if we should use the default memory map as a
|
|
@@ -1520,8 +1502,7 @@ static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
|
|
}
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
- return env->v7m.mpu_ctrl[regime_is_secure(env, mmu_idx)]
|
|
|
- & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
|
|
|
+ return env->v7m.mpu_ctrl[is_secure] & R_V7M_MPU_CTRL_PRIVDEFENA_MASK;
|
|
|
} else {
|
|
|
return regime_sctlr(env, mmu_idx) & SCTLR_BR;
|
|
|
}
|
|
@@ -1529,17 +1510,16 @@ static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
|
|
|
|
|
static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
|
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
- hwaddr *phys_ptr, int *prot,
|
|
|
- target_ulong *page_size,
|
|
|
+ bool secure, GetPhysAddrResult *result,
|
|
|
ARMMMUFaultInfo *fi)
|
|
|
{
|
|
|
ARMCPU *cpu = env_archcpu(env);
|
|
|
int n;
|
|
|
bool is_user = regime_is_user(env, mmu_idx);
|
|
|
|
|
|
- *phys_ptr = address;
|
|
|
- *page_size = TARGET_PAGE_SIZE;
|
|
|
- *prot = 0;
|
|
|
+ result->phys = address;
|
|
|
+ result->page_size = TARGET_PAGE_SIZE;
|
|
|
+ result->prot = 0;
|
|
|
|
|
|
if (regime_translation_disabled(env, mmu_idx) ||
|
|
|
m_is_ppb_region(env, address)) {
|
|
@@ -1551,7 +1531,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
|
|
* which always does a direct read using address_space_ldl(), rather
|
|
|
* than going via this function, so we don't need to check that here.
|
|
|
*/
|
|
|
- get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
|
|
|
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
|
|
|
} else { /* MPU enabled */
|
|
|
for (n = (int)cpu->pmsav7_dregion - 1; n >= 0; n--) {
|
|
|
/* region search */
|
|
@@ -1593,7 +1573,7 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
|
|
if (ranges_overlap(base, rmask,
|
|
|
address & TARGET_PAGE_MASK,
|
|
|
TARGET_PAGE_SIZE)) {
|
|
|
- *page_size = 1;
|
|
|
+ result->page_size = 1;
|
|
|
}
|
|
|
continue;
|
|
|
}
|
|
@@ -1631,18 +1611,18 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
|
|
continue;
|
|
|
}
|
|
|
if (rsize < TARGET_PAGE_BITS) {
|
|
|
- *page_size = 1 << rsize;
|
|
|
+ result->page_size = 1 << rsize;
|
|
|
}
|
|
|
break;
|
|
|
}
|
|
|
|
|
|
if (n == -1) { /* no hits */
|
|
|
- if (!pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
|
|
|
+ if (!pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
|
|
|
/* background fault */
|
|
|
fi->type = ARMFault_Background;
|
|
|
return true;
|
|
|
}
|
|
|
- get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
|
|
|
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
|
|
|
} else { /* a MPU hit! */
|
|
|
uint32_t ap = extract32(env->pmsav7.dracr[n], 8, 3);
|
|
|
uint32_t xn = extract32(env->pmsav7.dracr[n], 12, 1);
|
|
@@ -1659,16 +1639,16 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
|
|
case 5:
|
|
|
break; /* no access */
|
|
|
case 3:
|
|
|
- *prot |= PAGE_WRITE;
|
|
|
+ result->prot |= PAGE_WRITE;
|
|
|
/* fall through */
|
|
|
case 2:
|
|
|
case 6:
|
|
|
- *prot |= PAGE_READ | PAGE_EXEC;
|
|
|
+ result->prot |= PAGE_READ | PAGE_EXEC;
|
|
|
break;
|
|
|
case 7:
|
|
|
/* for v7M, same as 6; for R profile a reserved value */
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
- *prot |= PAGE_READ | PAGE_EXEC;
|
|
|
+ result->prot |= PAGE_READ | PAGE_EXEC;
|
|
|
break;
|
|
|
}
|
|
|
/* fall through */
|
|
@@ -1684,16 +1664,16 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
|
|
case 1:
|
|
|
case 2:
|
|
|
case 3:
|
|
|
- *prot |= PAGE_WRITE;
|
|
|
+ result->prot |= PAGE_WRITE;
|
|
|
/* fall through */
|
|
|
case 5:
|
|
|
case 6:
|
|
|
- *prot |= PAGE_READ | PAGE_EXEC;
|
|
|
+ result->prot |= PAGE_READ | PAGE_EXEC;
|
|
|
break;
|
|
|
case 7:
|
|
|
/* for v7M, same as 6; for R profile a reserved value */
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
- *prot |= PAGE_READ | PAGE_EXEC;
|
|
|
+ result->prot |= PAGE_READ | PAGE_EXEC;
|
|
|
break;
|
|
|
}
|
|
|
/* fall through */
|
|
@@ -1706,20 +1686,19 @@ static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
|
|
|
|
|
/* execute never */
|
|
|
if (xn) {
|
|
|
- *prot &= ~PAGE_EXEC;
|
|
|
+ result->prot &= ~PAGE_EXEC;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
fi->type = ARMFault_Permission;
|
|
|
fi->level = 1;
|
|
|
- return !(*prot & (1 << access_type));
|
|
|
+ return !(result->prot & (1 << access_type));
|
|
|
}
|
|
|
|
|
|
bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
- hwaddr *phys_ptr, MemTxAttrs *txattrs,
|
|
|
- int *prot, bool *is_subpage,
|
|
|
+ bool secure, GetPhysAddrResult *result,
|
|
|
ARMMMUFaultInfo *fi, uint32_t *mregion)
|
|
|
{
|
|
|
/*
|
|
@@ -1728,21 +1707,21 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|
|
* mregion is (if not NULL) set to the region number which matched,
|
|
|
* or -1 if no region number is returned (MPU off, address did not
|
|
|
* hit a region, address hit in multiple regions).
|
|
|
- * We set is_subpage to true if the region hit doesn't cover the
|
|
|
- * entire TARGET_PAGE the address is within.
|
|
|
+ * If the region hit doesn't cover the entire TARGET_PAGE the address
|
|
|
+ * is within, then we set the result page_size to 1 to force the
|
|
|
+ * memory system to use a subpage.
|
|
|
*/
|
|
|
ARMCPU *cpu = env_archcpu(env);
|
|
|
bool is_user = regime_is_user(env, mmu_idx);
|
|
|
- uint32_t secure = regime_is_secure(env, mmu_idx);
|
|
|
int n;
|
|
|
int matchregion = -1;
|
|
|
bool hit = false;
|
|
|
uint32_t addr_page_base = address & TARGET_PAGE_MASK;
|
|
|
uint32_t addr_page_limit = addr_page_base + (TARGET_PAGE_SIZE - 1);
|
|
|
|
|
|
- *is_subpage = false;
|
|
|
- *phys_ptr = address;
|
|
|
- *prot = 0;
|
|
|
+ result->page_size = TARGET_PAGE_SIZE;
|
|
|
+ result->phys = address;
|
|
|
+ result->prot = 0;
|
|
|
if (mregion) {
|
|
|
*mregion = -1;
|
|
|
}
|
|
@@ -1759,7 +1738,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|
|
} else if (m_is_ppb_region(env, address)) {
|
|
|
hit = true;
|
|
|
} else {
|
|
|
- if (pmsav7_use_background_region(cpu, mmu_idx, is_user)) {
|
|
|
+ if (pmsav7_use_background_region(cpu, mmu_idx, secure, is_user)) {
|
|
|
hit = true;
|
|
|
}
|
|
|
|
|
@@ -1792,13 +1771,13 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|
|
ranges_overlap(base, limit - base + 1,
|
|
|
addr_page_base,
|
|
|
TARGET_PAGE_SIZE)) {
|
|
|
- *is_subpage = true;
|
|
|
+ result->page_size = 1;
|
|
|
}
|
|
|
continue;
|
|
|
}
|
|
|
|
|
|
if (base > addr_page_base || limit < addr_page_limit) {
|
|
|
- *is_subpage = true;
|
|
|
+ result->page_size = 1;
|
|
|
}
|
|
|
|
|
|
if (matchregion != -1) {
|
|
@@ -1824,7 +1803,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|
|
|
|
|
if (matchregion == -1) {
|
|
|
/* hit using the background region */
|
|
|
- get_phys_addr_pmsav7_default(env, mmu_idx, address, prot);
|
|
|
+ get_phys_addr_pmsav7_default(env, mmu_idx, address, &result->prot);
|
|
|
} else {
|
|
|
uint32_t ap = extract32(env->pmsav8.rbar[secure][matchregion], 1, 2);
|
|
|
uint32_t xn = extract32(env->pmsav8.rbar[secure][matchregion], 0, 1);
|
|
@@ -1839,9 +1818,9 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|
|
xn = 1;
|
|
|
}
|
|
|
|
|
|
- *prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
|
|
|
- if (*prot && !xn && !(pxn && !is_user)) {
|
|
|
- *prot |= PAGE_EXEC;
|
|
|
+ result->prot = simple_ap_to_rw_prot(env, mmu_idx, ap);
|
|
|
+ if (result->prot && !xn && !(pxn && !is_user)) {
|
|
|
+ result->prot |= PAGE_EXEC;
|
|
|
}
|
|
|
/*
|
|
|
* We don't need to look the attribute up in the MAIR0/MAIR1
|
|
@@ -1854,7 +1833,7 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|
|
|
|
|
fi->type = ARMFault_Permission;
|
|
|
fi->level = 1;
|
|
|
- return !(*prot & (1 << access_type));
|
|
|
+ return !(result->prot & (1 << access_type));
|
|
|
}
|
|
|
|
|
|
static bool v8m_is_sau_exempt(CPUARMState *env,
|
|
@@ -1874,8 +1853,8 @@ static bool v8m_is_sau_exempt(CPUARMState *env,
|
|
|
}
|
|
|
|
|
|
void v8m_security_lookup(CPUARMState *env, uint32_t address,
|
|
|
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
- V8M_SAttributes *sattrs)
|
|
|
+ MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
+ bool is_secure, V8M_SAttributes *sattrs)
|
|
|
{
|
|
|
/*
|
|
|
* Look up the security attributes for this address. Compare the
|
|
@@ -1903,7 +1882,7 @@ void v8m_security_lookup(CPUARMState *env, uint32_t address,
|
|
|
}
|
|
|
|
|
|
if (idau_exempt || v8m_is_sau_exempt(env, address, access_type)) {
|
|
|
- sattrs->ns = !regime_is_secure(env, mmu_idx);
|
|
|
+ sattrs->ns = !is_secure;
|
|
|
return;
|
|
|
}
|
|
|
|
|
@@ -1984,17 +1963,15 @@ void v8m_security_lookup(CPUARMState *env, uint32_t address,
|
|
|
|
|
|
static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
|
|
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
- hwaddr *phys_ptr, MemTxAttrs *txattrs,
|
|
|
- int *prot, target_ulong *page_size,
|
|
|
+ bool secure, GetPhysAddrResult *result,
|
|
|
ARMMMUFaultInfo *fi)
|
|
|
{
|
|
|
- uint32_t secure = regime_is_secure(env, mmu_idx);
|
|
|
V8M_SAttributes sattrs = {};
|
|
|
bool ret;
|
|
|
- bool mpu_is_subpage;
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
|
- v8m_security_lookup(env, address, access_type, mmu_idx, &sattrs);
|
|
|
+ v8m_security_lookup(env, address, access_type, mmu_idx,
|
|
|
+ secure, &sattrs);
|
|
|
if (access_type == MMU_INST_FETCH) {
|
|
|
/*
|
|
|
* Instruction fetches always use the MMU bank and the
|
|
@@ -2020,9 +1997,9 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
|
|
|
} else {
|
|
|
fi->type = ARMFault_QEMU_SFault;
|
|
|
}
|
|
|
- *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
|
|
|
- *phys_ptr = address;
|
|
|
- *prot = 0;
|
|
|
+ result->page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
|
|
|
+ result->phys = address;
|
|
|
+ result->prot = 0;
|
|
|
return true;
|
|
|
}
|
|
|
} else {
|
|
@@ -2032,7 +2009,7 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
|
|
|
* might downgrade a secure access to nonsecure.
|
|
|
*/
|
|
|
if (sattrs.ns) {
|
|
|
- txattrs->secure = false;
|
|
|
+ result->attrs.secure = false;
|
|
|
} else if (!secure) {
|
|
|
/*
|
|
|
* NS access to S memory must fault.
|
|
@@ -2045,17 +2022,19 @@ static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
|
|
|
* for M_FAKE_FSR_SFAULT in arm_v7m_cpu_do_interrupt().
|
|
|
*/
|
|
|
fi->type = ARMFault_QEMU_SFault;
|
|
|
- *page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
|
|
|
- *phys_ptr = address;
|
|
|
- *prot = 0;
|
|
|
+ result->page_size = sattrs.subpage ? 1 : TARGET_PAGE_SIZE;
|
|
|
+ result->phys = address;
|
|
|
+ result->prot = 0;
|
|
|
return true;
|
|
|
}
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, phys_ptr,
|
|
|
- txattrs, prot, &mpu_is_subpage, fi, NULL);
|
|
|
- *page_size = sattrs.subpage || mpu_is_subpage ? 1 : TARGET_PAGE_SIZE;
|
|
|
+ ret = pmsav8_mpu_lookup(env, address, access_type, mmu_idx, secure,
|
|
|
+ result, fi, NULL);
|
|
|
+ if (sattrs.subpage) {
|
|
|
+ result->page_size = 1;
|
|
|
+ }
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
@@ -2300,20 +2279,15 @@ static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
|
|
|
* @address: virtual address to get physical address for
|
|
|
* @access_type: 0 for read, 1 for write, 2 for execute
|
|
|
* @mmu_idx: MMU index indicating required translation regime
|
|
|
- * @phys_ptr: set to the physical address corresponding to the virtual address
|
|
|
- * @attrs: set to the memory transaction attributes to use
|
|
|
- * @prot: set to the permissions for the page containing phys_ptr
|
|
|
- * @page_size: set to the size of the page containing phys_ptr
|
|
|
+ * @result: set on translation success.
|
|
|
* @fi: set to fault info if the translation fails
|
|
|
- * @cacheattrs: (if non-NULL) set to the cacheability/shareability attributes
|
|
|
*/
|
|
|
bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
|
|
- target_ulong *page_size,
|
|
|
- ARMMMUFaultInfo *fi, ARMCacheAttrs *cacheattrs)
|
|
|
+ GetPhysAddrResult *result, ARMMMUFaultInfo *fi)
|
|
|
{
|
|
|
ARMMMUIdx s1_mmu_idx = stage_1_mmu_idx(mmu_idx);
|
|
|
+ bool is_secure = regime_is_secure(env, mmu_idx);
|
|
|
|
|
|
if (mmu_idx != s1_mmu_idx) {
|
|
|
/*
|
|
@@ -2322,43 +2296,52 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
*/
|
|
|
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
|
|
hwaddr ipa;
|
|
|
- int s2_prot;
|
|
|
+ int s1_prot;
|
|
|
int ret;
|
|
|
bool ipa_secure;
|
|
|
- ARMCacheAttrs cacheattrs2 = {};
|
|
|
+ ARMCacheAttrs cacheattrs1;
|
|
|
ARMMMUIdx s2_mmu_idx;
|
|
|
bool is_el0;
|
|
|
|
|
|
- ret = get_phys_addr(env, address, access_type, s1_mmu_idx, &ipa,
|
|
|
- attrs, prot, page_size, fi, cacheattrs);
|
|
|
+ ret = get_phys_addr(env, address, access_type, s1_mmu_idx,
|
|
|
+ result, fi);
|
|
|
|
|
|
/* If S1 fails or S2 is disabled, return early. */
|
|
|
if (ret || regime_translation_disabled(env, ARMMMUIdx_Stage2)) {
|
|
|
- *phys_ptr = ipa;
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
- ipa_secure = attrs->secure;
|
|
|
+ ipa = result->phys;
|
|
|
+ ipa_secure = result->attrs.secure;
|
|
|
if (arm_is_secure_below_el3(env)) {
|
|
|
if (ipa_secure) {
|
|
|
- attrs->secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
|
|
|
+ result->attrs.secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
|
|
|
} else {
|
|
|
- attrs->secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
|
|
|
+ result->attrs.secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
|
|
|
}
|
|
|
} else {
|
|
|
assert(!ipa_secure);
|
|
|
}
|
|
|
|
|
|
- s2_mmu_idx = attrs->secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
|
|
+ s2_mmu_idx = (result->attrs.secure
|
|
|
+ ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2);
|
|
|
is_el0 = mmu_idx == ARMMMUIdx_E10_0 || mmu_idx == ARMMMUIdx_SE10_0;
|
|
|
|
|
|
- /* S1 is done. Now do S2 translation. */
|
|
|
- ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx, is_el0,
|
|
|
- phys_ptr, attrs, &s2_prot,
|
|
|
- page_size, fi, &cacheattrs2);
|
|
|
+ /*
|
|
|
+ * S1 is done, now do S2 translation.
|
|
|
+ * Save the stage1 results so that we may merge
|
|
|
+ * prot and cacheattrs later.
|
|
|
+ */
|
|
|
+ s1_prot = result->prot;
|
|
|
+ cacheattrs1 = result->cacheattrs;
|
|
|
+ memset(result, 0, sizeof(*result));
|
|
|
+
|
|
|
+ ret = get_phys_addr_lpae(env, ipa, access_type, s2_mmu_idx,
|
|
|
+ is_el0, result, fi);
|
|
|
fi->s2addr = ipa;
|
|
|
+
|
|
|
/* Combine the S1 and S2 perms. */
|
|
|
- *prot &= s2_prot;
|
|
|
+ result->prot &= s1_prot;
|
|
|
|
|
|
/* If S2 fails, return early. */
|
|
|
if (ret) {
|
|
@@ -2374,20 +2357,21 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
* Outer Write-Back Read-Allocate Write-Allocate.
|
|
|
* Do not overwrite Tagged within attrs.
|
|
|
*/
|
|
|
- if (cacheattrs->attrs != 0xf0) {
|
|
|
- cacheattrs->attrs = 0xff;
|
|
|
+ if (cacheattrs1.attrs != 0xf0) {
|
|
|
+ cacheattrs1.attrs = 0xff;
|
|
|
}
|
|
|
- cacheattrs->shareability = 0;
|
|
|
+ cacheattrs1.shareability = 0;
|
|
|
}
|
|
|
- *cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
|
|
|
+ result->cacheattrs = combine_cacheattrs(env, cacheattrs1,
|
|
|
+ result->cacheattrs);
|
|
|
|
|
|
/* Check if IPA translates to secure or non-secure PA space. */
|
|
|
if (arm_is_secure_below_el3(env)) {
|
|
|
if (ipa_secure) {
|
|
|
- attrs->secure =
|
|
|
+ result->attrs.secure =
|
|
|
!(env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW));
|
|
|
} else {
|
|
|
- attrs->secure =
|
|
|
+ result->attrs.secure =
|
|
|
!((env->cp15.vtcr_el2 & (VTCR_NSA | VTCR_NSW))
|
|
|
|| (env->cp15.vstcr_el2 & (VSTCR_SA | VSTCR_SW)));
|
|
|
}
|
|
@@ -2406,8 +2390,8 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
* cannot upgrade an non-secure translation regime's attributes
|
|
|
* to secure.
|
|
|
*/
|
|
|
- attrs->secure = regime_is_secure(env, mmu_idx);
|
|
|
- attrs->user = regime_is_user(env, mmu_idx);
|
|
|
+ result->attrs.secure = is_secure;
|
|
|
+ result->attrs.user = regime_is_user(env, mmu_idx);
|
|
|
|
|
|
/*
|
|
|
* Fast Context Switch Extension. This doesn't exist at all in v8.
|
|
@@ -2424,20 +2408,20 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_PMSA)) {
|
|
|
bool ret;
|
|
|
- *page_size = TARGET_PAGE_SIZE;
|
|
|
+ result->page_size = TARGET_PAGE_SIZE;
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
/* PMSAv8 */
|
|
|
ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
|
|
|
- phys_ptr, attrs, prot, page_size, fi);
|
|
|
+ is_secure, result, fi);
|
|
|
} else if (arm_feature(env, ARM_FEATURE_V7)) {
|
|
|
/* PMSAv7 */
|
|
|
ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
|
|
|
- phys_ptr, prot, page_size, fi);
|
|
|
+ is_secure, result, fi);
|
|
|
} else {
|
|
|
/* Pre-v7 MPU */
|
|
|
ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
|
|
|
- phys_ptr, prot, fi);
|
|
|
+ is_secure, result, fi);
|
|
|
}
|
|
|
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
|
|
|
" mmu_idx %u -> %s (prot %c%c%c)\n",
|
|
@@ -2445,9 +2429,9 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
(access_type == MMU_DATA_STORE ? "writing" : "execute"),
|
|
|
(uint32_t)address, mmu_idx,
|
|
|
ret ? "Miss" : "Hit",
|
|
|
- *prot & PAGE_READ ? 'r' : '-',
|
|
|
- *prot & PAGE_WRITE ? 'w' : '-',
|
|
|
- *prot & PAGE_EXEC ? 'x' : '-');
|
|
|
+ result->prot & PAGE_READ ? 'r' : '-',
|
|
|
+ result->prot & PAGE_WRITE ? 'w' : '-',
|
|
|
+ result->prot & PAGE_EXEC ? 'x' : '-');
|
|
|
|
|
|
return ret;
|
|
|
}
|
|
@@ -2492,14 +2476,14 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
address = extract64(address, 0, 52);
|
|
|
}
|
|
|
}
|
|
|
- *phys_ptr = address;
|
|
|
- *prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
|
- *page_size = TARGET_PAGE_SIZE;
|
|
|
+ result->phys = address;
|
|
|
+ result->prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
|
+ result->page_size = TARGET_PAGE_SIZE;
|
|
|
|
|
|
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
|
|
|
hcr = arm_hcr_el2_eff(env);
|
|
|
- cacheattrs->shareability = 0;
|
|
|
- cacheattrs->is_s2_format = false;
|
|
|
+ result->cacheattrs.shareability = 0;
|
|
|
+ result->cacheattrs.is_s2_format = false;
|
|
|
if (hcr & HCR_DC) {
|
|
|
if (hcr & HCR_DCT) {
|
|
|
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
|
|
@@ -2512,24 +2496,23 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
} else {
|
|
|
memattr = 0x44; /* Normal, NC, No */
|
|
|
}
|
|
|
- cacheattrs->shareability = 2; /* outer sharable */
|
|
|
+ result->cacheattrs.shareability = 2; /* outer sharable */
|
|
|
} else {
|
|
|
memattr = 0x00; /* Device, nGnRnE */
|
|
|
}
|
|
|
- cacheattrs->attrs = memattr;
|
|
|
+ result->cacheattrs.attrs = memattr;
|
|
|
return 0;
|
|
|
}
|
|
|
|
|
|
if (regime_using_lpae_format(env, mmu_idx)) {
|
|
|
return get_phys_addr_lpae(env, address, access_type, mmu_idx, false,
|
|
|
- phys_ptr, attrs, prot, page_size,
|
|
|
- fi, cacheattrs);
|
|
|
+ result, fi);
|
|
|
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
|
|
|
return get_phys_addr_v6(env, address, access_type, mmu_idx,
|
|
|
- phys_ptr, attrs, prot, page_size, fi);
|
|
|
+ is_secure, result, fi);
|
|
|
} else {
|
|
|
return get_phys_addr_v5(env, address, access_type, mmu_idx,
|
|
|
- phys_ptr, prot, page_size, fi);
|
|
|
+ is_secure, result, fi);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -2538,21 +2521,16 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
|
|
{
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
CPUARMState *env = &cpu->env;
|
|
|
- hwaddr phys_addr;
|
|
|
- target_ulong page_size;
|
|
|
- int prot;
|
|
|
- bool ret;
|
|
|
+ GetPhysAddrResult res = {};
|
|
|
ARMMMUFaultInfo fi = {};
|
|
|
ARMMMUIdx mmu_idx = arm_mmu_idx(env);
|
|
|
- ARMCacheAttrs cacheattrs = {};
|
|
|
-
|
|
|
- *attrs = (MemTxAttrs) {};
|
|
|
+ bool ret;
|
|
|
|
|
|
- ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &phys_addr,
|
|
|
- attrs, &prot, &page_size, &fi, &cacheattrs);
|
|
|
+ ret = get_phys_addr(env, addr, MMU_DATA_LOAD, mmu_idx, &res, &fi);
|
|
|
+ *attrs = res.attrs;
|
|
|
|
|
|
if (ret) {
|
|
|
return -1;
|
|
|
}
|
|
|
- return phys_addr;
|
|
|
+ return res.phys;
|
|
|
}
|