|
@@ -51,13 +51,6 @@ typedef struct S1Translate {
|
|
* value being Stage2 vs Stage2_S distinguishes those.
|
|
* value being Stage2 vs Stage2_S distinguishes those.
|
|
*/
|
|
*/
|
|
ARMSecuritySpace in_space;
|
|
ARMSecuritySpace in_space;
|
|
- /*
|
|
|
|
- * in_secure: whether the translation regime is a Secure one.
|
|
|
|
- * This is always equal to arm_space_is_secure(in_space).
|
|
|
|
- * If a Secure ptw is "downgraded" to NonSecure by an NSTable bit,
|
|
|
|
- * this field is updated accordingly.
|
|
|
|
- */
|
|
|
|
- bool in_secure;
|
|
|
|
/*
|
|
/*
|
|
* in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
|
|
* in_debug: is this a QEMU debug access (gdbstub, etc)? Debug
|
|
* accesses will not update the guest page table access flags
|
|
* accesses will not update the guest page table access flags
|
|
@@ -70,7 +63,6 @@ typedef struct S1Translate {
|
|
* Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
|
|
* Stage 2 is indicated by in_mmu_idx set to ARMMMUIdx_Stage2{,_S}.
|
|
*/
|
|
*/
|
|
bool in_s1_is_el0;
|
|
bool in_s1_is_el0;
|
|
- bool out_secure;
|
|
|
|
bool out_rw;
|
|
bool out_rw;
|
|
bool out_be;
|
|
bool out_be;
|
|
ARMSecuritySpace out_space;
|
|
ARMSecuritySpace out_space;
|
|
@@ -165,22 +157,32 @@ static ARMMMUIdx ptw_idx_for_stage_2(CPUARMState *env, ARMMMUIdx stage2idx)
|
|
|
|
|
|
/*
|
|
/*
|
|
* We're OK to check the current state of the CPU here because
|
|
* We're OK to check the current state of the CPU here because
|
|
- * (1) we always invalidate all TLBs when the SCR_EL3.NS bit changes
|
|
|
|
|
|
+ * (1) we always invalidate all TLBs when the SCR_EL3.NS or SCR_EL3.NSE bit
|
|
|
|
+ * changes.
|
|
* (2) there's no way to do a lookup that cares about Stage 2 for a
|
|
* (2) there's no way to do a lookup that cares about Stage 2 for a
|
|
* different security state to the current one for AArch64, and AArch32
|
|
* different security state to the current one for AArch64, and AArch32
|
|
* never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
|
|
* never has a secure EL2. (AArch32 ATS12NSO[UP][RW] allow EL3 to do
|
|
* an NS stage 1+2 lookup while the NS bit is 0.)
|
|
* an NS stage 1+2 lookup while the NS bit is 0.)
|
|
*/
|
|
*/
|
|
- if (!arm_is_secure_below_el3(env) || !arm_el_is_aa64(env, 3)) {
|
|
|
|
|
|
+ if (!arm_el_is_aa64(env, 3)) {
|
|
return ARMMMUIdx_Phys_NS;
|
|
return ARMMMUIdx_Phys_NS;
|
|
}
|
|
}
|
|
- if (stage2idx == ARMMMUIdx_Stage2_S) {
|
|
|
|
- s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
|
|
|
|
- } else {
|
|
|
|
- s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
|
|
|
|
- }
|
|
|
|
- return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
|
|
|
|
|
|
|
|
|
|
+ switch (arm_security_space_below_el3(env)) {
|
|
|
|
+ case ARMSS_NonSecure:
|
|
|
|
+ return ARMMMUIdx_Phys_NS;
|
|
|
|
+ case ARMSS_Realm:
|
|
|
|
+ return ARMMMUIdx_Phys_Realm;
|
|
|
|
+ case ARMSS_Secure:
|
|
|
|
+ if (stage2idx == ARMMMUIdx_Stage2_S) {
|
|
|
|
+ s2walk_secure = !(env->cp15.vstcr_el2 & VSTCR_SW);
|
|
|
|
+ } else {
|
|
|
|
+ s2walk_secure = !(env->cp15.vtcr_el2 & VTCR_NSW);
|
|
|
|
+ }
|
|
|
|
+ return s2walk_secure ? ARMMMUIdx_Phys_S : ARMMMUIdx_Phys_NS;
|
|
|
|
+ default:
|
|
|
|
+ g_assert_not_reached();
|
|
|
|
+ }
|
|
}
|
|
}
|
|
|
|
|
|
static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|
static bool regime_translation_big_endian(CPUARMState *env, ARMMMUIdx mmu_idx)
|
|
@@ -206,11 +208,12 @@ static uint64_t regime_ttbr(CPUARMState *env, ARMMMUIdx mmu_idx, int ttbrn)
|
|
|
|
|
|
/* Return true if the specified stage of address translation is disabled */
|
|
/* Return true if the specified stage of address translation is disabled */
|
|
static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|
static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|
- bool is_secure)
|
|
|
|
|
|
+ ARMSecuritySpace space)
|
|
{
|
|
{
|
|
uint64_t hcr_el2;
|
|
uint64_t hcr_el2;
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
if (arm_feature(env, ARM_FEATURE_M)) {
|
|
|
|
+ bool is_secure = arm_space_is_secure(space);
|
|
switch (env->v7m.mpu_ctrl[is_secure] &
|
|
switch (env->v7m.mpu_ctrl[is_secure] &
|
|
(R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
|
|
(R_V7M_MPU_CTRL_ENABLE_MASK | R_V7M_MPU_CTRL_HFNMIENA_MASK)) {
|
|
case R_V7M_MPU_CTRL_ENABLE_MASK:
|
|
case R_V7M_MPU_CTRL_ENABLE_MASK:
|
|
@@ -229,18 +232,19 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
- hcr_el2 = arm_hcr_el2_eff_secstate(env, is_secure);
|
|
|
|
|
|
|
|
switch (mmu_idx) {
|
|
switch (mmu_idx) {
|
|
case ARMMMUIdx_Stage2:
|
|
case ARMMMUIdx_Stage2:
|
|
case ARMMMUIdx_Stage2_S:
|
|
case ARMMMUIdx_Stage2_S:
|
|
/* HCR.DC means HCR.VM behaves as 1 */
|
|
/* HCR.DC means HCR.VM behaves as 1 */
|
|
|
|
+ hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
|
|
return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
|
|
return (hcr_el2 & (HCR_DC | HCR_VM)) == 0;
|
|
|
|
|
|
case ARMMMUIdx_E10_0:
|
|
case ARMMMUIdx_E10_0:
|
|
case ARMMMUIdx_E10_1:
|
|
case ARMMMUIdx_E10_1:
|
|
case ARMMMUIdx_E10_1_PAN:
|
|
case ARMMMUIdx_E10_1_PAN:
|
|
/* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
|
|
/* TGE means that EL0/1 act as if SCTLR_EL1.M is zero */
|
|
|
|
+ hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
|
|
if (hcr_el2 & HCR_TGE) {
|
|
if (hcr_el2 & HCR_TGE) {
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
@@ -250,6 +254,7 @@ static bool regime_translation_disabled(CPUARMState *env, ARMMMUIdx mmu_idx,
|
|
case ARMMMUIdx_Stage1_E1:
|
|
case ARMMMUIdx_Stage1_E1:
|
|
case ARMMMUIdx_Stage1_E1_PAN:
|
|
case ARMMMUIdx_Stage1_E1_PAN:
|
|
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
|
|
/* HCR.DC means SCTLR_EL1.M behaves as 0 */
|
|
|
|
+ hcr_el2 = arm_hcr_el2_eff_secstate(env, space);
|
|
if (hcr_el2 & HCR_DC) {
|
|
if (hcr_el2 & HCR_DC) {
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
@@ -514,11 +519,21 @@ static ARMSecuritySpace S2_security_space(ARMSecuritySpace s1_space,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool fault_s1ns(ARMSecuritySpace space, ARMMMUIdx s2_mmu_idx)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * For stage 2 faults in Secure EL22, S1NS indicates
|
|
|
|
+ * whether the faulting IPA is in the Secure or NonSecure
|
|
|
|
+ * IPA space. For all other kinds of fault, it is false.
|
|
|
|
+ */
|
|
|
|
+ return space == ARMSS_Secure && regime_is_stage2(s2_mmu_idx)
|
|
|
|
+ && s2_mmu_idx == ARMMMUIdx_Stage2_S;
|
|
|
|
+}
|
|
|
|
+
|
|
/* Translate a S1 pagetable walk through S2 if needed. */
|
|
/* Translate a S1 pagetable walk through S2 if needed. */
|
|
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
|
static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
|
hwaddr addr, ARMMMUFaultInfo *fi)
|
|
hwaddr addr, ARMMMUFaultInfo *fi)
|
|
{
|
|
{
|
|
- bool is_secure = ptw->in_secure;
|
|
|
|
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
|
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
|
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
|
|
ARMMMUIdx s2_mmu_idx = ptw->in_ptw_idx;
|
|
uint8_t pte_attrs;
|
|
uint8_t pte_attrs;
|
|
@@ -534,7 +549,6 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
|
S1Translate s2ptw = {
|
|
S1Translate s2ptw = {
|
|
.in_mmu_idx = s2_mmu_idx,
|
|
.in_mmu_idx = s2_mmu_idx,
|
|
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
|
|
.in_ptw_idx = ptw_idx_for_stage_2(env, s2_mmu_idx),
|
|
- .in_secure = arm_space_is_secure(s2_space),
|
|
|
|
.in_space = s2_space,
|
|
.in_space = s2_space,
|
|
.in_debug = true,
|
|
.in_debug = true,
|
|
};
|
|
};
|
|
@@ -548,7 +562,6 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
|
pte_attrs = s2.cacheattrs.attrs;
|
|
pte_attrs = s2.cacheattrs.attrs;
|
|
ptw->out_host = NULL;
|
|
ptw->out_host = NULL;
|
|
ptw->out_rw = false;
|
|
ptw->out_rw = false;
|
|
- ptw->out_secure = s2.f.attrs.secure;
|
|
|
|
ptw->out_space = s2.f.attrs.space;
|
|
ptw->out_space = s2.f.attrs.space;
|
|
} else {
|
|
} else {
|
|
#ifdef CONFIG_TCG
|
|
#ifdef CONFIG_TCG
|
|
@@ -567,7 +580,6 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
|
ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
|
|
ptw->out_phys = full->phys_addr | (addr & ~TARGET_PAGE_MASK);
|
|
ptw->out_rw = full->prot & PAGE_WRITE;
|
|
ptw->out_rw = full->prot & PAGE_WRITE;
|
|
pte_attrs = full->pte_attrs;
|
|
pte_attrs = full->pte_attrs;
|
|
- ptw->out_secure = full->attrs.secure;
|
|
|
|
ptw->out_space = full->attrs.space;
|
|
ptw->out_space = full->attrs.space;
|
|
#else
|
|
#else
|
|
g_assert_not_reached();
|
|
g_assert_not_reached();
|
|
@@ -575,7 +587,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
|
}
|
|
}
|
|
|
|
|
|
if (regime_is_stage2(s2_mmu_idx)) {
|
|
if (regime_is_stage2(s2_mmu_idx)) {
|
|
- uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
|
|
|
|
|
|
+ uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
|
|
|
|
|
|
if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
|
|
if ((hcr & HCR_PTW) && S2_attrs_are_device(hcr, pte_attrs)) {
|
|
/*
|
|
/*
|
|
@@ -586,7 +598,7 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
|
fi->s2addr = addr;
|
|
fi->s2addr = addr;
|
|
fi->stage2 = true;
|
|
fi->stage2 = true;
|
|
fi->s1ptw = true;
|
|
fi->s1ptw = true;
|
|
- fi->s1ns = !is_secure;
|
|
|
|
|
|
+ fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
}
|
|
}
|
|
@@ -600,9 +612,9 @@ static bool S1_ptw_translate(CPUARMState *env, S1Translate *ptw,
|
|
fi->type = ARMFault_GPCFOnWalk;
|
|
fi->type = ARMFault_GPCFOnWalk;
|
|
}
|
|
}
|
|
fi->s2addr = addr;
|
|
fi->s2addr = addr;
|
|
- fi->stage2 = true;
|
|
|
|
- fi->s1ptw = true;
|
|
|
|
- fi->s1ns = !is_secure;
|
|
|
|
|
|
+ fi->stage2 = regime_is_stage2(s2_mmu_idx);
|
|
|
|
+ fi->s1ptw = fi->stage2;
|
|
|
|
+ fi->s1ns = fault_s1ns(ptw->in_space, s2_mmu_idx);
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -625,8 +637,8 @@ static uint32_t arm_ldl_ptw(CPUARMState *env, S1Translate *ptw,
|
|
} else {
|
|
} else {
|
|
/* Page tables are in MMIO. */
|
|
/* Page tables are in MMIO. */
|
|
MemTxAttrs attrs = {
|
|
MemTxAttrs attrs = {
|
|
- .secure = ptw->out_secure,
|
|
|
|
.space = ptw->out_space,
|
|
.space = ptw->out_space,
|
|
|
|
+ .secure = arm_space_is_secure(ptw->out_space),
|
|
};
|
|
};
|
|
AddressSpace *as = arm_addressspace(cs, attrs);
|
|
AddressSpace *as = arm_addressspace(cs, attrs);
|
|
MemTxResult result = MEMTX_OK;
|
|
MemTxResult result = MEMTX_OK;
|
|
@@ -671,8 +683,8 @@ static uint64_t arm_ldq_ptw(CPUARMState *env, S1Translate *ptw,
|
|
} else {
|
|
} else {
|
|
/* Page tables are in MMIO. */
|
|
/* Page tables are in MMIO. */
|
|
MemTxAttrs attrs = {
|
|
MemTxAttrs attrs = {
|
|
- .secure = ptw->out_secure,
|
|
|
|
.space = ptw->out_space,
|
|
.space = ptw->out_space,
|
|
|
|
+ .secure = arm_space_is_secure(ptw->out_space),
|
|
};
|
|
};
|
|
AddressSpace *as = arm_addressspace(cs, attrs);
|
|
AddressSpace *as = arm_addressspace(cs, attrs);
|
|
MemTxResult result = MEMTX_OK;
|
|
MemTxResult result = MEMTX_OK;
|
|
@@ -701,7 +713,6 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
|
|
|
|
|
|
if (unlikely(!host)) {
|
|
if (unlikely(!host)) {
|
|
fi->type = ARMFault_UnsuppAtomicUpdate;
|
|
fi->type = ARMFault_UnsuppAtomicUpdate;
|
|
- fi->s1ptw = true;
|
|
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -720,11 +731,17 @@ static uint64_t arm_casq_ptw(CPUARMState *env, uint64_t old_val,
|
|
env->tlb_fi = NULL;
|
|
env->tlb_fi = NULL;
|
|
|
|
|
|
if (unlikely(flags & TLB_INVALID_MASK)) {
|
|
if (unlikely(flags & TLB_INVALID_MASK)) {
|
|
|
|
+ /*
|
|
|
|
+ * We know this must be a stage 2 fault because the granule
|
|
|
|
+ * protection table does not separately track read and write
|
|
|
|
+ * permission, so all GPC faults are caught in S1_ptw_translate():
|
|
|
|
+ * we only get here for "readable but not writeable".
|
|
|
|
+ */
|
|
assert(fi->type != ARMFault_None);
|
|
assert(fi->type != ARMFault_None);
|
|
fi->s2addr = ptw->out_virt;
|
|
fi->s2addr = ptw->out_virt;
|
|
fi->stage2 = true;
|
|
fi->stage2 = true;
|
|
fi->s1ptw = true;
|
|
fi->s1ptw = true;
|
|
- fi->s1ns = !ptw->in_secure;
|
|
|
|
|
|
+ fi->s1ns = fault_s1ns(ptw->in_space, ptw->in_ptw_idx);
|
|
return 0;
|
|
return 0;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1544,6 +1561,25 @@ static int check_s2_mmu_setup(ARMCPU *cpu, bool is_aa64, uint64_t tcr,
|
|
return INT_MIN;
|
|
return INT_MIN;
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+static bool lpae_block_desc_valid(ARMCPU *cpu, bool ds,
|
|
|
|
+ ARMGranuleSize gran, int level)
|
|
|
|
+{
|
|
|
|
+ /*
|
|
|
|
+ * See pseudocode AArch46.BlockDescSupported(): block descriptors
|
|
|
|
+ * are not valid at all levels, depending on the page size.
|
|
|
|
+ */
|
|
|
|
+ switch (gran) {
|
|
|
|
+ case Gran4K:
|
|
|
|
+ return (level == 0 && ds) || level == 1 || level == 2;
|
|
|
|
+ case Gran16K:
|
|
|
|
+ return (level == 1 && ds) || level == 2;
|
|
|
|
+ case Gran64K:
|
|
|
|
+ return (level == 1 && arm_pamax(cpu) == 52) || level == 2;
|
|
|
|
+ default:
|
|
|
|
+ g_assert_not_reached();
|
|
|
|
+ }
|
|
|
|
+}
|
|
|
|
+
|
|
/**
|
|
/**
|
|
* get_phys_addr_lpae: perform one stage of page table walk, LPAE format
|
|
* get_phys_addr_lpae: perform one stage of page table walk, LPAE format
|
|
*
|
|
*
|
|
@@ -1766,7 +1802,6 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|
QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
|
|
QEMU_BUILD_BUG_ON(ARMMMUIdx_Phys_S + 1 != ARMMMUIdx_Phys_NS);
|
|
QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
|
|
QEMU_BUILD_BUG_ON(ARMMMUIdx_Stage2_S + 1 != ARMMMUIdx_Stage2);
|
|
ptw->in_ptw_idx += 1;
|
|
ptw->in_ptw_idx += 1;
|
|
- ptw->in_secure = false;
|
|
|
|
ptw->in_space = ARMSS_NonSecure;
|
|
ptw->in_space = ARMSS_NonSecure;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1780,8 +1815,10 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|
new_descriptor = descriptor;
|
|
new_descriptor = descriptor;
|
|
|
|
|
|
restart_atomic_update:
|
|
restart_atomic_update:
|
|
- if (!(descriptor & 1) || (!(descriptor & 2) && (level == 3))) {
|
|
|
|
- /* Invalid, or the Reserved level 3 encoding */
|
|
|
|
|
|
+ if (!(descriptor & 1) ||
|
|
|
|
+ (!(descriptor & 2) &&
|
|
|
|
+ !lpae_block_desc_valid(cpu, param.ds, param.gran, level))) {
|
|
|
|
+ /* Invalid, or a block descriptor at an invalid level */
|
|
goto do_translation_fault;
|
|
goto do_translation_fault;
|
|
}
|
|
}
|
|
|
|
|
|
@@ -1868,11 +1905,10 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|
* Extract attributes from the (modified) descriptor, and apply
|
|
* Extract attributes from the (modified) descriptor, and apply
|
|
* table descriptors. Stage 2 table descriptors do not include
|
|
* table descriptors. Stage 2 table descriptors do not include
|
|
* any attribute fields. HPD disables all the table attributes
|
|
* any attribute fields. HPD disables all the table attributes
|
|
- * except NSTable.
|
|
|
|
|
|
+ * except NSTable (which we have already handled).
|
|
*/
|
|
*/
|
|
attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
|
|
attrs = new_descriptor & (MAKE_64BIT_MASK(2, 10) | MAKE_64BIT_MASK(50, 14));
|
|
if (!regime_is_stage2(mmu_idx)) {
|
|
if (!regime_is_stage2(mmu_idx)) {
|
|
- attrs |= !ptw->in_secure << 5; /* NS */
|
|
|
|
if (!param.hpd) {
|
|
if (!param.hpd) {
|
|
attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
|
|
attrs |= extract64(tableattrs, 0, 2) << 53; /* XN, PXN */
|
|
/*
|
|
/*
|
|
@@ -2022,24 +2058,31 @@ static bool get_phys_addr_lpae(CPUARMState *env, S1Translate *ptw,
|
|
do_translation_fault:
|
|
do_translation_fault:
|
|
fi->type = ARMFault_Translation;
|
|
fi->type = ARMFault_Translation;
|
|
do_fault:
|
|
do_fault:
|
|
- fi->level = level;
|
|
|
|
- /* Tag the error as S2 for failed S1 PTW at S2 or ordinary S2. */
|
|
|
|
- fi->stage2 = fi->s1ptw || regime_is_stage2(mmu_idx);
|
|
|
|
- fi->s1ns = mmu_idx == ARMMMUIdx_Stage2;
|
|
|
|
|
|
+ if (fi->s1ptw) {
|
|
|
|
+ /* Retain the existing stage 2 fi->level */
|
|
|
|
+ assert(fi->stage2);
|
|
|
|
+ } else {
|
|
|
|
+ fi->level = level;
|
|
|
|
+ fi->stage2 = regime_is_stage2(mmu_idx);
|
|
|
|
+ }
|
|
|
|
+ fi->s1ns = fault_s1ns(ptw->in_space, mmu_idx);
|
|
return true;
|
|
return true;
|
|
}
|
|
}
|
|
|
|
|
|
-static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|
|
|
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
|
- bool is_secure, GetPhysAddrResult *result,
|
|
|
|
|
|
+static bool get_phys_addr_pmsav5(CPUARMState *env,
|
|
|
|
+ S1Translate *ptw,
|
|
|
|
+ uint32_t address,
|
|
|
|
+ MMUAccessType access_type,
|
|
|
|
+ GetPhysAddrResult *result,
|
|
ARMMMUFaultInfo *fi)
|
|
ARMMMUFaultInfo *fi)
|
|
{
|
|
{
|
|
int n;
|
|
int n;
|
|
uint32_t mask;
|
|
uint32_t mask;
|
|
uint32_t base;
|
|
uint32_t base;
|
|
|
|
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
|
bool is_user = regime_is_user(env, mmu_idx);
|
|
bool is_user = regime_is_user(env, mmu_idx);
|
|
|
|
|
|
- if (regime_translation_disabled(env, mmu_idx, is_secure)) {
|
|
|
|
|
|
+ if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
|
|
/* MPU disabled. */
|
|
/* MPU disabled. */
|
|
result->f.phys_addr = address;
|
|
result->f.phys_addr = address;
|
|
result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
result->f.prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
|
|
@@ -2194,20 +2237,24 @@ static bool pmsav7_use_background_region(ARMCPU *cpu, ARMMMUIdx mmu_idx,
|
|
return regime_sctlr(env, mmu_idx) & SCTLR_BR;
|
|
return regime_sctlr(env, mmu_idx) & SCTLR_BR;
|
|
}
|
|
}
|
|
|
|
|
|
-static bool get_phys_addr_pmsav7(CPUARMState *env, uint32_t address,
|
|
|
|
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
|
- bool secure, GetPhysAddrResult *result,
|
|
|
|
|
|
+static bool get_phys_addr_pmsav7(CPUARMState *env,
|
|
|
|
+ S1Translate *ptw,
|
|
|
|
+ uint32_t address,
|
|
|
|
+ MMUAccessType access_type,
|
|
|
|
+ GetPhysAddrResult *result,
|
|
ARMMMUFaultInfo *fi)
|
|
ARMMMUFaultInfo *fi)
|
|
{
|
|
{
|
|
ARMCPU *cpu = env_archcpu(env);
|
|
ARMCPU *cpu = env_archcpu(env);
|
|
int n;
|
|
int n;
|
|
|
|
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
|
bool is_user = regime_is_user(env, mmu_idx);
|
|
bool is_user = regime_is_user(env, mmu_idx);
|
|
|
|
+ bool secure = arm_space_is_secure(ptw->in_space);
|
|
|
|
|
|
result->f.phys_addr = address;
|
|
result->f.phys_addr = address;
|
|
result->f.lg_page_size = TARGET_PAGE_BITS;
|
|
result->f.lg_page_size = TARGET_PAGE_BITS;
|
|
result->f.prot = 0;
|
|
result->f.prot = 0;
|
|
|
|
|
|
- if (regime_translation_disabled(env, mmu_idx, secure) ||
|
|
|
|
|
|
+ if (regime_translation_disabled(env, mmu_idx, ptw->in_space) ||
|
|
m_is_ppb_region(env, address)) {
|
|
m_is_ppb_region(env, address)) {
|
|
/*
|
|
/*
|
|
* MPU disabled or M profile PPB access: use default memory map.
|
|
* MPU disabled or M profile PPB access: use default memory map.
|
|
@@ -2451,7 +2498,8 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
|
* are done in arm_v7m_load_vector(), which always does a direct
|
|
* are done in arm_v7m_load_vector(), which always does a direct
|
|
* read using address_space_ldl(), rather than going via this function.
|
|
* read using address_space_ldl(), rather than going via this function.
|
|
*/
|
|
*/
|
|
- if (regime_translation_disabled(env, mmu_idx, secure)) { /* MPU disabled */
|
|
|
|
|
|
+ if (regime_translation_disabled(env, mmu_idx, arm_secure_to_space(secure))) {
|
|
|
|
+ /* MPU disabled */
|
|
hit = true;
|
|
hit = true;
|
|
} else if (m_is_ppb_region(env, address)) {
|
|
} else if (m_is_ppb_region(env, address)) {
|
|
hit = true;
|
|
hit = true;
|
|
@@ -2720,12 +2768,16 @@ void v8m_security_lookup(CPUARMState *env, uint32_t address,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
-static bool get_phys_addr_pmsav8(CPUARMState *env, uint32_t address,
|
|
|
|
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
|
- bool secure, GetPhysAddrResult *result,
|
|
|
|
|
|
+static bool get_phys_addr_pmsav8(CPUARMState *env,
|
|
|
|
+ S1Translate *ptw,
|
|
|
|
+ uint32_t address,
|
|
|
|
+ MMUAccessType access_type,
|
|
|
|
+ GetPhysAddrResult *result,
|
|
ARMMMUFaultInfo *fi)
|
|
ARMMMUFaultInfo *fi)
|
|
{
|
|
{
|
|
V8M_SAttributes sattrs = {};
|
|
V8M_SAttributes sattrs = {};
|
|
|
|
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
|
|
|
+ bool secure = arm_space_is_secure(ptw->in_space);
|
|
bool ret;
|
|
bool ret;
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
if (arm_feature(env, ARM_FEATURE_M_SECURITY)) {
|
|
@@ -3029,12 +3081,14 @@ static ARMCacheAttrs combine_cacheattrs(uint64_t hcr,
|
|
* MMU disabled. S1 addresses within aa64 translation regimes are
|
|
* MMU disabled. S1 addresses within aa64 translation regimes are
|
|
* still checked for bounds -- see AArch64.S1DisabledOutput().
|
|
* still checked for bounds -- see AArch64.S1DisabledOutput().
|
|
*/
|
|
*/
|
|
-static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
|
|
|
|
|
|
+static bool get_phys_addr_disabled(CPUARMState *env,
|
|
|
|
+ S1Translate *ptw,
|
|
|
|
+ target_ulong address,
|
|
MMUAccessType access_type,
|
|
MMUAccessType access_type,
|
|
- ARMMMUIdx mmu_idx, bool is_secure,
|
|
|
|
GetPhysAddrResult *result,
|
|
GetPhysAddrResult *result,
|
|
ARMMMUFaultInfo *fi)
|
|
ARMMMUFaultInfo *fi)
|
|
{
|
|
{
|
|
|
|
+ ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
|
uint8_t memattr = 0x00; /* Device nGnRnE */
|
|
uint8_t memattr = 0x00; /* Device nGnRnE */
|
|
uint8_t shareability = 0; /* non-shareable */
|
|
uint8_t shareability = 0; /* non-shareable */
|
|
int r_el;
|
|
int r_el;
|
|
@@ -3080,7 +3134,7 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
|
|
|
|
|
|
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
|
|
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
|
|
if (r_el == 1) {
|
|
if (r_el == 1) {
|
|
- uint64_t hcr = arm_hcr_el2_eff_secstate(env, is_secure);
|
|
|
|
|
|
+ uint64_t hcr = arm_hcr_el2_eff_secstate(env, ptw->in_space);
|
|
if (hcr & HCR_DC) {
|
|
if (hcr & HCR_DC) {
|
|
if (hcr & HCR_DCT) {
|
|
if (hcr & HCR_DCT) {
|
|
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
|
|
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
|
|
@@ -3089,11 +3143,13 @@ static bool get_phys_addr_disabled(CPUARMState *env, target_ulong address,
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
- if (memattr == 0 && access_type == MMU_INST_FETCH) {
|
|
|
|
- if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
|
|
|
|
- memattr = 0xee; /* Normal, WT, RA, NT */
|
|
|
|
- } else {
|
|
|
|
- memattr = 0x44; /* Normal, NC, No */
|
|
|
|
|
|
+ if (memattr == 0) {
|
|
|
|
+ if (access_type == MMU_INST_FETCH) {
|
|
|
|
+ if (regime_sctlr(env, mmu_idx) & SCTLR_I) {
|
|
|
|
+ memattr = 0xee; /* Normal, WT, RA, NT */
|
|
|
|
+ } else {
|
|
|
|
+ memattr = 0x44; /* Normal, NC, No */
|
|
|
|
+ }
|
|
}
|
|
}
|
|
shareability = 2; /* outer shareable */
|
|
shareability = 2; /* outer shareable */
|
|
}
|
|
}
|
|
@@ -3117,7 +3173,6 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
|
{
|
|
{
|
|
hwaddr ipa;
|
|
hwaddr ipa;
|
|
int s1_prot, s1_lgpgsz;
|
|
int s1_prot, s1_lgpgsz;
|
|
- bool is_secure = ptw->in_secure;
|
|
|
|
ARMSecuritySpace in_space = ptw->in_space;
|
|
ARMSecuritySpace in_space = ptw->in_space;
|
|
bool ret, ipa_secure;
|
|
bool ret, ipa_secure;
|
|
ARMCacheAttrs cacheattrs1;
|
|
ARMCacheAttrs cacheattrs1;
|
|
@@ -3137,7 +3192,6 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
|
|
|
|
|
ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
|
|
ptw->in_s1_is_el0 = ptw->in_mmu_idx == ARMMMUIdx_Stage1_E0;
|
|
ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
|
ptw->in_mmu_idx = ipa_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
|
- ptw->in_secure = ipa_secure;
|
|
|
|
ptw->in_space = ipa_space;
|
|
ptw->in_space = ipa_space;
|
|
ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
|
|
ptw->in_ptw_idx = ptw_idx_for_stage_2(env, ptw->in_mmu_idx);
|
|
|
|
|
|
@@ -3180,7 +3234,7 @@ static bool get_phys_addr_twostage(CPUARMState *env, S1Translate *ptw,
|
|
}
|
|
}
|
|
|
|
|
|
/* Combine the S1 and S2 cache attributes. */
|
|
/* Combine the S1 and S2 cache attributes. */
|
|
- hcr = arm_hcr_el2_eff_secstate(env, is_secure);
|
|
|
|
|
|
+ hcr = arm_hcr_el2_eff_secstate(env, in_space);
|
|
if (hcr & HCR_DC) {
|
|
if (hcr & HCR_DC) {
|
|
/*
|
|
/*
|
|
* HCR.DC forces the first stage attributes to
|
|
* HCR.DC forces the first stage attributes to
|
|
@@ -3219,7 +3273,6 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
|
ARMMMUFaultInfo *fi)
|
|
ARMMMUFaultInfo *fi)
|
|
{
|
|
{
|
|
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
|
ARMMMUIdx mmu_idx = ptw->in_mmu_idx;
|
|
- bool is_secure = ptw->in_secure;
|
|
|
|
ARMMMUIdx s1_mmu_idx;
|
|
ARMMMUIdx s1_mmu_idx;
|
|
|
|
|
|
/*
|
|
/*
|
|
@@ -3227,8 +3280,8 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
|
* cannot upgrade a NonSecure translation regime's attributes
|
|
* cannot upgrade a NonSecure translation regime's attributes
|
|
* to Secure or Realm.
|
|
* to Secure or Realm.
|
|
*/
|
|
*/
|
|
- result->f.attrs.secure = is_secure;
|
|
|
|
result->f.attrs.space = ptw->in_space;
|
|
result->f.attrs.space = ptw->in_space;
|
|
|
|
+ result->f.attrs.secure = arm_space_is_secure(ptw->in_space);
|
|
|
|
|
|
switch (mmu_idx) {
|
|
switch (mmu_idx) {
|
|
case ARMMMUIdx_Phys_S:
|
|
case ARMMMUIdx_Phys_S:
|
|
@@ -3236,14 +3289,18 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
|
case ARMMMUIdx_Phys_Root:
|
|
case ARMMMUIdx_Phys_Root:
|
|
case ARMMMUIdx_Phys_Realm:
|
|
case ARMMMUIdx_Phys_Realm:
|
|
/* Checking Phys early avoids special casing later vs regime_el. */
|
|
/* Checking Phys early avoids special casing later vs regime_el. */
|
|
- return get_phys_addr_disabled(env, address, access_type, mmu_idx,
|
|
|
|
- is_secure, result, fi);
|
|
|
|
|
|
+ return get_phys_addr_disabled(env, ptw, address, access_type,
|
|
|
|
+ result, fi);
|
|
|
|
|
|
case ARMMMUIdx_Stage1_E0:
|
|
case ARMMMUIdx_Stage1_E0:
|
|
case ARMMMUIdx_Stage1_E1:
|
|
case ARMMMUIdx_Stage1_E1:
|
|
case ARMMMUIdx_Stage1_E1_PAN:
|
|
case ARMMMUIdx_Stage1_E1_PAN:
|
|
- /* First stage lookup uses second stage for ptw. */
|
|
|
|
- ptw->in_ptw_idx = is_secure ? ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
|
|
|
|
|
+ /*
|
|
|
|
+ * First stage lookup uses second stage for ptw; only
|
|
|
|
+ * Secure has both S and NS IPA and starts with Stage2_S.
|
|
|
|
+ */
|
|
|
|
+ ptw->in_ptw_idx = (ptw->in_space == ARMSS_Secure) ?
|
|
|
|
+ ARMMMUIdx_Stage2_S : ARMMMUIdx_Stage2;
|
|
break;
|
|
break;
|
|
|
|
|
|
case ARMMMUIdx_Stage2:
|
|
case ARMMMUIdx_Stage2:
|
|
@@ -3272,7 +3329,7 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
|
*/
|
|
*/
|
|
ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
|
|
ptw->in_mmu_idx = mmu_idx = s1_mmu_idx;
|
|
if (arm_feature(env, ARM_FEATURE_EL2) &&
|
|
if (arm_feature(env, ARM_FEATURE_EL2) &&
|
|
- !regime_translation_disabled(env, ARMMMUIdx_Stage2, is_secure)) {
|
|
|
|
|
|
+ !regime_translation_disabled(env, ARMMMUIdx_Stage2, ptw->in_space)) {
|
|
return get_phys_addr_twostage(env, ptw, address, access_type,
|
|
return get_phys_addr_twostage(env, ptw, address, access_type,
|
|
result, fi);
|
|
result, fi);
|
|
}
|
|
}
|
|
@@ -3305,16 +3362,16 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
/* PMSAv8 */
|
|
/* PMSAv8 */
|
|
- ret = get_phys_addr_pmsav8(env, address, access_type, mmu_idx,
|
|
|
|
- is_secure, result, fi);
|
|
|
|
|
|
+ ret = get_phys_addr_pmsav8(env, ptw, address, access_type,
|
|
|
|
+ result, fi);
|
|
} else if (arm_feature(env, ARM_FEATURE_V7)) {
|
|
} else if (arm_feature(env, ARM_FEATURE_V7)) {
|
|
/* PMSAv7 */
|
|
/* PMSAv7 */
|
|
- ret = get_phys_addr_pmsav7(env, address, access_type, mmu_idx,
|
|
|
|
- is_secure, result, fi);
|
|
|
|
|
|
+ ret = get_phys_addr_pmsav7(env, ptw, address, access_type,
|
|
|
|
+ result, fi);
|
|
} else {
|
|
} else {
|
|
/* Pre-v7 MPU */
|
|
/* Pre-v7 MPU */
|
|
- ret = get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
|
|
|
|
- is_secure, result, fi);
|
|
|
|
|
|
+ ret = get_phys_addr_pmsav5(env, ptw, address, access_type,
|
|
|
|
+ result, fi);
|
|
}
|
|
}
|
|
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
|
|
qemu_log_mask(CPU_LOG_MMU, "PMSA MPU lookup for %s at 0x%08" PRIx32
|
|
" mmu_idx %u -> %s (prot %c%c%c)\n",
|
|
" mmu_idx %u -> %s (prot %c%c%c)\n",
|
|
@@ -3331,9 +3388,9 @@ static bool get_phys_addr_nogpc(CPUARMState *env, S1Translate *ptw,
|
|
|
|
|
|
/* Definitely a real MMU, not an MPU */
|
|
/* Definitely a real MMU, not an MPU */
|
|
|
|
|
|
- if (regime_translation_disabled(env, mmu_idx, is_secure)) {
|
|
|
|
- return get_phys_addr_disabled(env, address, access_type, mmu_idx,
|
|
|
|
- is_secure, result, fi);
|
|
|
|
|
|
+ if (regime_translation_disabled(env, mmu_idx, ptw->in_space)) {
|
|
|
|
+ return get_phys_addr_disabled(env, ptw, address, access_type,
|
|
|
|
+ result, fi);
|
|
}
|
|
}
|
|
|
|
|
|
if (regime_using_lpae_format(env, mmu_idx)) {
|
|
if (regime_using_lpae_format(env, mmu_idx)) {
|
|
@@ -3363,17 +3420,17 @@ static bool get_phys_addr_gpc(CPUARMState *env, S1Translate *ptw,
|
|
return false;
|
|
return false;
|
|
}
|
|
}
|
|
|
|
|
|
-bool get_phys_addr_with_secure(CPUARMState *env, target_ulong address,
|
|
|
|
- MMUAccessType access_type, ARMMMUIdx mmu_idx,
|
|
|
|
- bool is_secure, GetPhysAddrResult *result,
|
|
|
|
- ARMMMUFaultInfo *fi)
|
|
|
|
|
|
+bool get_phys_addr_with_space_nogpc(CPUARMState *env, target_ulong address,
|
|
|
|
+ MMUAccessType access_type,
|
|
|
|
+ ARMMMUIdx mmu_idx, ARMSecuritySpace space,
|
|
|
|
+ GetPhysAddrResult *result,
|
|
|
|
+ ARMMMUFaultInfo *fi)
|
|
{
|
|
{
|
|
S1Translate ptw = {
|
|
S1Translate ptw = {
|
|
.in_mmu_idx = mmu_idx,
|
|
.in_mmu_idx = mmu_idx,
|
|
- .in_secure = is_secure,
|
|
|
|
- .in_space = arm_secure_to_space(is_secure),
|
|
|
|
|
|
+ .in_space = space,
|
|
};
|
|
};
|
|
- return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
|
|
|
|
|
|
+ return get_phys_addr_nogpc(env, &ptw, address, access_type, result, fi);
|
|
}
|
|
}
|
|
|
|
|
|
bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
@@ -3442,7 +3499,6 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
}
|
|
}
|
|
|
|
|
|
ptw.in_space = ss;
|
|
ptw.in_space = ss;
|
|
- ptw.in_secure = arm_space_is_secure(ss);
|
|
|
|
return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
|
|
return get_phys_addr_gpc(env, &ptw, address, access_type, result, fi);
|
|
}
|
|
}
|
|
|
|
|
|
@@ -3456,7 +3512,6 @@ hwaddr arm_cpu_get_phys_page_attrs_debug(CPUState *cs, vaddr addr,
|
|
S1Translate ptw = {
|
|
S1Translate ptw = {
|
|
.in_mmu_idx = mmu_idx,
|
|
.in_mmu_idx = mmu_idx,
|
|
.in_space = ss,
|
|
.in_space = ss,
|
|
- .in_secure = arm_space_is_secure(ss),
|
|
|
|
.in_debug = true,
|
|
.in_debug = true,
|
|
};
|
|
};
|
|
GetPhysAddrResult res = {};
|
|
GetPhysAddrResult res = {};
|