|
@@ -12,10 +12,10 @@
|
|
|
#include <zlib.h> /* For crc32 */
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
-static inline int get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
- int access_type, ARMMMUIdx mmu_idx,
|
|
|
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
|
|
- target_ulong *page_size);
|
|
|
+static inline bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
+ int access_type, ARMMMUIdx mmu_idx,
|
|
|
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
|
|
+ target_ulong *page_size, uint32_t *fsr);
|
|
|
|
|
|
/* Definitions for the PMCCNTR and PMCR registers */
|
|
|
#define PMCRD 0x8
|
|
@@ -1495,19 +1495,20 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
|
|
hwaddr phys_addr;
|
|
|
target_ulong page_size;
|
|
|
int prot;
|
|
|
- int ret;
|
|
|
+ uint32_t fsr;
|
|
|
+ bool ret;
|
|
|
uint64_t par64;
|
|
|
MemTxAttrs attrs = {};
|
|
|
|
|
|
ret = get_phys_addr(env, value, access_type, mmu_idx,
|
|
|
- &phys_addr, &attrs, &prot, &page_size);
|
|
|
+ &phys_addr, &attrs, &prot, &page_size, &fsr);
|
|
|
if (extended_addresses_enabled(env)) {
|
|
|
- /* ret is a DFSR/IFSR value for the long descriptor
|
|
|
+ /* fsr is a DFSR/IFSR value for the long descriptor
|
|
|
* translation table format, but with WnR always clear.
|
|
|
* Convert it to a 64-bit PAR.
|
|
|
*/
|
|
|
par64 = (1 << 11); /* LPAE bit always set */
|
|
|
- if (ret == 0) {
|
|
|
+ if (!ret) {
|
|
|
par64 |= phys_addr & ~0xfffULL;
|
|
|
if (!attrs.secure) {
|
|
|
par64 |= (1 << 9); /* NS */
|
|
@@ -1515,18 +1516,18 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
|
|
/* We don't set the ATTR or SH fields in the PAR. */
|
|
|
} else {
|
|
|
par64 |= 1; /* F */
|
|
|
- par64 |= (ret & 0x3f) << 1; /* FS */
|
|
|
+ par64 |= (fsr & 0x3f) << 1; /* FS */
|
|
|
/* Note that S2WLK and FSTAGE are always zero, because we don't
|
|
|
* implement virtualization and therefore there can't be a stage 2
|
|
|
* fault.
|
|
|
*/
|
|
|
}
|
|
|
} else {
|
|
|
- /* ret is a DFSR/IFSR value for the short descriptor
|
|
|
+ /* fsr is a DFSR/IFSR value for the short descriptor
|
|
|
* translation table format (with WnR always clear).
|
|
|
* Convert it to a 32-bit PAR.
|
|
|
*/
|
|
|
- if (ret == 0) {
|
|
|
+ if (!ret) {
|
|
|
/* We do not set any attribute bits in the PAR */
|
|
|
if (page_size == (1 << 24)
|
|
|
&& arm_feature(env, ARM_FEATURE_V7)) {
|
|
@@ -1538,8 +1539,8 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
|
|
par64 |= (1 << 9); /* NS */
|
|
|
}
|
|
|
} else {
|
|
|
- par64 = ((ret & (1 << 10)) >> 5) | ((ret & (1 << 12)) >> 6) |
|
|
|
- ((ret & 0xf) << 1) | 1;
|
|
|
+ par64 = ((fsr & (1 << 10)) >> 5) | ((fsr & (1 << 12)) >> 6) |
|
|
|
+ ((fsr & 0xf) << 1) | 1;
|
|
|
}
|
|
|
}
|
|
|
return par64;
|
|
@@ -1846,7 +1847,7 @@ static void vmsa_ttbr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
|
|
raw_write(env, ri, value);
|
|
|
}
|
|
|
|
|
|
-static const ARMCPRegInfo vmsa_cp_reginfo[] = {
|
|
|
+static const ARMCPRegInfo vmsa_pmsa_cp_reginfo[] = {
|
|
|
{ .name = "DFSR", .cp = 15, .crn = 5, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
.access = PL1_RW, .type = ARM_CP_ALIAS,
|
|
|
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.dfsr_s),
|
|
@@ -1856,6 +1857,18 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
|
|
|
.access = PL1_RW, .resetvalue = 0,
|
|
|
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.ifsr_s),
|
|
|
offsetoflow32(CPUARMState, cp15.ifsr_ns) } },
|
|
|
+ { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
|
|
|
+ .access = PL1_RW, .resetvalue = 0,
|
|
|
+ .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
|
|
|
+ offsetof(CPUARMState, cp15.dfar_ns) } },
|
|
|
+ { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
+ .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
+ .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
|
|
|
+ .resetvalue = 0, },
|
|
|
+ REGINFO_SENTINEL
|
|
|
+};
|
|
|
+
|
|
|
+static const ARMCPRegInfo vmsa_cp_reginfo[] = {
|
|
|
{ .name = "ESR_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
.opc0 = 3, .crn = 5, .crm = 2, .opc1 = 0, .opc2 = 0,
|
|
|
.access = PL1_RW,
|
|
@@ -1880,14 +1893,6 @@ static const ARMCPRegInfo vmsa_cp_reginfo[] = {
|
|
|
.resetfn = arm_cp_reset_ignore, .raw_writefn = vmsa_ttbcr_raw_write,
|
|
|
.bank_fieldoffsets = { offsetoflow32(CPUARMState, cp15.tcr_el[3]),
|
|
|
offsetoflow32(CPUARMState, cp15.tcr_el[1])} },
|
|
|
- { .name = "FAR_EL1", .state = ARM_CP_STATE_AA64,
|
|
|
- .opc0 = 3, .crn = 6, .crm = 0, .opc1 = 0, .opc2 = 0,
|
|
|
- .access = PL1_RW, .fieldoffset = offsetof(CPUARMState, cp15.far_el[1]),
|
|
|
- .resetvalue = 0, },
|
|
|
- { .name = "DFAR", .cp = 15, .opc1 = 0, .crn = 6, .crm = 0, .opc2 = 0,
|
|
|
- .access = PL1_RW, .resetvalue = 0,
|
|
|
- .bank_fieldoffsets = { offsetof(CPUARMState, cp15.dfar_s),
|
|
|
- offsetof(CPUARMState, cp15.dfar_ns) } },
|
|
|
REGINFO_SENTINEL
|
|
|
};
|
|
|
|
|
@@ -2063,19 +2068,18 @@ static const ARMCPRegInfo strongarm_cp_reginfo[] = {
|
|
|
|
|
|
static uint64_t mpidr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
|
|
{
|
|
|
- CPUState *cs = CPU(arm_env_get_cpu(env));
|
|
|
- uint32_t mpidr = cs->cpu_index;
|
|
|
- /* We don't support setting cluster ID ([8..11]) (known as Aff1
|
|
|
- * in later ARM ARM versions), or any of the higher affinity level fields,
|
|
|
- * so these bits always RAZ.
|
|
|
- */
|
|
|
+ ARMCPU *cpu = ARM_CPU(arm_env_get_cpu(env));
|
|
|
+ uint64_t mpidr = cpu->mp_affinity;
|
|
|
+
|
|
|
if (arm_feature(env, ARM_FEATURE_V7MP)) {
|
|
|
mpidr |= (1U << 31);
|
|
|
/* Cores which are uniprocessor (non-coherent)
|
|
|
* but still implement the MP extensions set
|
|
|
- * bit 30. (For instance, A9UP.) However we do
|
|
|
- * not currently model any of those cores.
|
|
|
+ * bit 30. (For instance, Cortex-R5).
|
|
|
*/
|
|
|
+ if (cpu->mp_is_up) {
|
|
|
+ mpidr |= (1u << 30);
|
|
|
+ }
|
|
|
}
|
|
|
return mpidr;
|
|
|
}
|
|
@@ -3196,7 +3200,8 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
|
|
if (arm_feature(env, ARM_FEATURE_V6K)) {
|
|
|
define_arm_cp_regs(cpu, v6k_cp_reginfo);
|
|
|
}
|
|
|
- if (arm_feature(env, ARM_FEATURE_V7MP)) {
|
|
|
+ if (arm_feature(env, ARM_FEATURE_V7MP) &&
|
|
|
+ !arm_feature(env, ARM_FEATURE_MPU)) {
|
|
|
define_arm_cp_regs(cpu, v7mp_cp_reginfo);
|
|
|
}
|
|
|
if (arm_feature(env, ARM_FEATURE_V7)) {
|
|
@@ -3348,6 +3353,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
|
|
assert(!arm_feature(env, ARM_FEATURE_V6));
|
|
|
define_arm_cp_regs(cpu, pmsav5_cp_reginfo);
|
|
|
} else {
|
|
|
+ define_arm_cp_regs(cpu, vmsa_pmsa_cp_reginfo);
|
|
|
define_arm_cp_regs(cpu, vmsa_cp_reginfo);
|
|
|
}
|
|
|
if (arm_feature(env, ARM_FEATURE_THUMB2EE)) {
|
|
@@ -3423,16 +3429,19 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
|
|
REGINFO_SENTINEL
|
|
|
};
|
|
|
ARMCPRegInfo id_v8_midr_cp_reginfo[] = {
|
|
|
- /* v8 MIDR -- the wildcard isn't necessary, and nor is the
|
|
|
- * variable-MIDR TI925 behaviour. Instead we have a single
|
|
|
- * (strictly speaking IMPDEF) alias of the MIDR, REVIDR.
|
|
|
- */
|
|
|
{ .name = "MIDR_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 0,
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->midr },
|
|
|
+ /* crn = 0 op1 = 0 crm = 0 op2 = 4,7 : AArch32 aliases of MIDR */
|
|
|
+ { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
|
|
|
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 4,
|
|
|
+ .access = PL1_R, .resetvalue = cpu->midr },
|
|
|
+ { .name = "MIDR", .type = ARM_CP_ALIAS | ARM_CP_CONST,
|
|
|
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 7,
|
|
|
+ .access = PL1_R, .resetvalue = cpu->midr },
|
|
|
{ .name = "REVIDR_EL1", .state = ARM_CP_STATE_BOTH,
|
|
|
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 0, .opc2 = 6,
|
|
|
- .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->midr },
|
|
|
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = cpu->revidr },
|
|
|
REGINFO_SENTINEL
|
|
|
};
|
|
|
ARMCPRegInfo id_cp_reginfo[] = {
|
|
@@ -3448,11 +3457,14 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
|
|
{ .name = "TCMTR",
|
|
|
.cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 2,
|
|
|
.access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
|
- { .name = "TLBTR",
|
|
|
- .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
|
|
|
- .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0 },
|
|
|
REGINFO_SENTINEL
|
|
|
};
|
|
|
+ /* TLBTR is specific to VMSA */
|
|
|
+ ARMCPRegInfo id_tlbtr_reginfo = {
|
|
|
+ .name = "TLBTR",
|
|
|
+ .cp = 15, .crn = 0, .crm = 0, .opc1 = 0, .opc2 = 3,
|
|
|
+ .access = PL1_R, .type = ARM_CP_CONST, .resetvalue = 0,
|
|
|
+ };
|
|
|
ARMCPRegInfo crn0_wi_reginfo = {
|
|
|
.name = "CRN0_WI", .cp = 15, .crn = 0, .crm = CP_ANY,
|
|
|
.opc1 = CP_ANY, .opc2 = CP_ANY, .access = PL1_W,
|
|
@@ -3474,6 +3486,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
|
|
for (r = id_cp_reginfo; r->type != ARM_CP_SENTINEL; r++) {
|
|
|
r->access = PL1_RW;
|
|
|
}
|
|
|
+ id_tlbtr_reginfo.access = PL1_RW;
|
|
|
}
|
|
|
if (arm_feature(env, ARM_FEATURE_V8)) {
|
|
|
define_arm_cp_regs(cpu, id_v8_midr_cp_reginfo);
|
|
@@ -3481,6 +3494,9 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
|
|
define_arm_cp_regs(cpu, id_pre_v8_midr_cp_reginfo);
|
|
|
}
|
|
|
define_arm_cp_regs(cpu, id_cp_reginfo);
|
|
|
+ if (!arm_feature(env, ARM_FEATURE_MPU)) {
|
|
|
+ define_one_arm_cp_reg(cpu, &id_tlbtr_reginfo);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_MPIDR)) {
|
|
@@ -5229,9 +5245,10 @@ static uint64_t arm_ldq_ptw(CPUState *cs, hwaddr addr, bool is_secure)
|
|
|
return address_space_ldq(cs->as, addr, attrs, NULL);
|
|
|
}
|
|
|
|
|
|
-static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
|
|
|
- ARMMMUIdx mmu_idx, hwaddr *phys_ptr,
|
|
|
- int *prot, target_ulong *page_size)
|
|
|
+static bool get_phys_addr_v5(CPUARMState *env, uint32_t address,
|
|
|
+ int access_type, ARMMMUIdx mmu_idx,
|
|
|
+ hwaddr *phys_ptr, int *prot,
|
|
|
+ target_ulong *page_size, uint32_t *fsr)
|
|
|
{
|
|
|
CPUState *cs = CPU(arm_env_get_cpu(env));
|
|
|
int code;
|
|
@@ -5302,20 +5319,25 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
|
|
|
ap = (desc >> (4 + ((address >> 9) & 6))) & 3;
|
|
|
*page_size = 0x1000;
|
|
|
break;
|
|
|
- case 3: /* 1k page. */
|
|
|
+ case 3: /* 1k page, or ARMv6/XScale "extended small (4k) page" */
|
|
|
if (type == 1) {
|
|
|
- if (arm_feature(env, ARM_FEATURE_XSCALE)) {
|
|
|
+ /* ARMv6/XScale extended small page format */
|
|
|
+ if (arm_feature(env, ARM_FEATURE_XSCALE)
|
|
|
+ || arm_feature(env, ARM_FEATURE_V6)) {
|
|
|
phys_addr = (desc & 0xfffff000) | (address & 0xfff);
|
|
|
+ *page_size = 0x1000;
|
|
|
} else {
|
|
|
- /* Page translation fault. */
|
|
|
+ /* UNPREDICTABLE in ARMv5; we choose to take a
|
|
|
+ * page translation fault.
|
|
|
+ */
|
|
|
code = 7;
|
|
|
goto do_fault;
|
|
|
}
|
|
|
} else {
|
|
|
phys_addr = (desc & 0xfffffc00) | (address & 0x3ff);
|
|
|
+ *page_size = 0x400;
|
|
|
}
|
|
|
ap = (desc >> 4) & 3;
|
|
|
- *page_size = 0x400;
|
|
|
break;
|
|
|
default:
|
|
|
/* Never happens, but compiler isn't smart enough to tell. */
|
|
@@ -5330,15 +5352,16 @@ static int get_phys_addr_v5(CPUARMState *env, uint32_t address, int access_type,
|
|
|
goto do_fault;
|
|
|
}
|
|
|
*phys_ptr = phys_addr;
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
do_fault:
|
|
|
- return code | (domain << 4);
|
|
|
+ *fsr = code | (domain << 4);
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
-static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
|
|
|
- ARMMMUIdx mmu_idx, hwaddr *phys_ptr,
|
|
|
- MemTxAttrs *attrs,
|
|
|
- int *prot, target_ulong *page_size)
|
|
|
+static bool get_phys_addr_v6(CPUARMState *env, uint32_t address,
|
|
|
+ int access_type, ARMMMUIdx mmu_idx,
|
|
|
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
|
|
+ target_ulong *page_size, uint32_t *fsr)
|
|
|
{
|
|
|
CPUState *cs = CPU(arm_env_get_cpu(env));
|
|
|
int code;
|
|
@@ -5392,6 +5415,8 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
|
|
|
if (desc & (1 << 18)) {
|
|
|
/* Supersection. */
|
|
|
phys_addr = (desc & 0xff000000) | (address & 0x00ffffff);
|
|
|
+ phys_addr |= (uint64_t)extract32(desc, 20, 4) << 32;
|
|
|
+ phys_addr |= (uint64_t)extract32(desc, 5, 4) << 36;
|
|
|
*page_size = 0x1000000;
|
|
|
} else {
|
|
|
/* Section. */
|
|
@@ -5469,9 +5494,10 @@ static int get_phys_addr_v6(CPUARMState *env, uint32_t address, int access_type,
|
|
|
attrs->secure = false;
|
|
|
}
|
|
|
*phys_ptr = phys_addr;
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
do_fault:
|
|
|
- return code | (domain << 4);
|
|
|
+ *fsr = code | (domain << 4);
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
/* Fault type for long-descriptor MMU fault reporting; this corresponds
|
|
@@ -5483,10 +5509,10 @@ typedef enum {
|
|
|
permission_fault = 3,
|
|
|
} MMUFaultType;
|
|
|
|
|
|
-static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
|
|
- int access_type, ARMMMUIdx mmu_idx,
|
|
|
- hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
|
|
- target_ulong *page_size_ptr)
|
|
|
+static bool get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
|
|
+ int access_type, ARMMMUIdx mmu_idx,
|
|
|
+ hwaddr *phys_ptr, MemTxAttrs *txattrs, int *prot,
|
|
|
+ target_ulong *page_size_ptr, uint32_t *fsr)
|
|
|
{
|
|
|
CPUState *cs = CPU(arm_env_get_cpu(env));
|
|
|
/* Read an LPAE long-descriptor translation table. */
|
|
@@ -5725,16 +5751,17 @@ static int get_phys_addr_lpae(CPUARMState *env, target_ulong address,
|
|
|
}
|
|
|
*phys_ptr = descaddr;
|
|
|
*page_size_ptr = page_size;
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
|
|
|
do_fault:
|
|
|
/* Long-descriptor format IFSR/DFSR value */
|
|
|
- return (1 << 9) | (fault_type << 2) | level;
|
|
|
+ *fsr = (1 << 9) | (fault_type << 2) | level;
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
-static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
|
|
|
- int access_type, ARMMMUIdx mmu_idx,
|
|
|
- hwaddr *phys_ptr, int *prot)
|
|
|
+static bool get_phys_addr_pmsav5(CPUARMState *env, uint32_t address,
|
|
|
+ int access_type, ARMMMUIdx mmu_idx,
|
|
|
+ hwaddr *phys_ptr, int *prot, uint32_t *fsr)
|
|
|
{
|
|
|
int n;
|
|
|
uint32_t mask;
|
|
@@ -5756,7 +5783,8 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
|
|
|
}
|
|
|
}
|
|
|
if (n < 0) {
|
|
|
- return 2;
|
|
|
+ *fsr = 2;
|
|
|
+ return true;
|
|
|
}
|
|
|
|
|
|
if (access_type == 2) {
|
|
@@ -5767,10 +5795,12 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
|
|
|
mask = (mask >> (n * 4)) & 0xf;
|
|
|
switch (mask) {
|
|
|
case 0:
|
|
|
- return 1;
|
|
|
+ *fsr = 1;
|
|
|
+ return true;
|
|
|
case 1:
|
|
|
if (is_user) {
|
|
|
- return 1;
|
|
|
+ *fsr = 1;
|
|
|
+ return true;
|
|
|
}
|
|
|
*prot = PAGE_READ | PAGE_WRITE;
|
|
|
break;
|
|
@@ -5785,7 +5815,8 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
|
|
|
break;
|
|
|
case 5:
|
|
|
if (is_user) {
|
|
|
- return 1;
|
|
|
+ *fsr = 1;
|
|
|
+ return true;
|
|
|
}
|
|
|
*prot = PAGE_READ;
|
|
|
break;
|
|
@@ -5794,10 +5825,11 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
|
|
|
break;
|
|
|
default:
|
|
|
/* Bad permission. */
|
|
|
- return 1;
|
|
|
+ *fsr = 1;
|
|
|
+ return true;
|
|
|
}
|
|
|
*prot |= PAGE_EXEC;
|
|
|
- return 0;
|
|
|
+ return false;
|
|
|
}
|
|
|
|
|
|
/* get_phys_addr - get the physical address for this virtual address
|
|
@@ -5806,8 +5838,8 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
|
|
|
* by doing a translation table walk on MMU based systems or using the
|
|
|
* MPU state on MPU based systems.
|
|
|
*
|
|
|
- * Returns 0 if the translation was successful. Otherwise, phys_ptr, attrs,
|
|
|
- * prot and page_size may not be filled in, and the return value provides
|
|
|
+ * Returns false if the translation was successful. Otherwise, phys_ptr, attrs,
|
|
|
+ * prot and page_size may not be filled in, and the populated fsr value provides
|
|
|
* information on why the translation aborted, in the format of a
|
|
|
* DFSR/IFSR fault register, with the following caveats:
|
|
|
* * we honour the short vs long DFSR format differences.
|
|
@@ -5823,11 +5855,12 @@ static int get_phys_addr_mpu(CPUARMState *env, uint32_t address,
|
|
|
* @attrs: set to the memory transaction attributes to use
|
|
|
* @prot: set to the permissions for the page containing phys_ptr
|
|
|
* @page_size: set to the size of the page containing phys_ptr
|
|
|
+ * @fsr: set to the DFSR/IFSR value on failure
|
|
|
*/
|
|
|
-static inline int get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
- int access_type, ARMMMUIdx mmu_idx,
|
|
|
- hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
|
|
- target_ulong *page_size)
|
|
|
+static inline bool get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
+ int access_type, ARMMMUIdx mmu_idx,
|
|
|
+ hwaddr *phys_ptr, MemTxAttrs *attrs, int *prot,
|
|
|
+ target_ulong *page_size, uint32_t *fsr)
|
|
|
{
|
|
|
if (mmu_idx == ARMMMUIdx_S12NSE0 || mmu_idx == ARMMMUIdx_S12NSE1) {
|
|
|
/* TODO: when we support EL2 we should here call ourselves recursively
|
|
@@ -5869,28 +5902,28 @@ static inline int get_phys_addr(CPUARMState *env, target_ulong address,
|
|
|
|
|
|
if (arm_feature(env, ARM_FEATURE_MPU)) {
|
|
|
*page_size = TARGET_PAGE_SIZE;
|
|
|
- return get_phys_addr_mpu(env, address, access_type, mmu_idx, phys_ptr,
|
|
|
- prot);
|
|
|
+ return get_phys_addr_pmsav5(env, address, access_type, mmu_idx,
|
|
|
+ phys_ptr, prot, fsr);
|
|
|
}
|
|
|
|
|
|
if (regime_using_lpae_format(env, mmu_idx)) {
|
|
|
return get_phys_addr_lpae(env, address, access_type, mmu_idx, phys_ptr,
|
|
|
- attrs, prot, page_size);
|
|
|
+ attrs, prot, page_size, fsr);
|
|
|
} else if (regime_sctlr(env, mmu_idx) & SCTLR_XP) {
|
|
|
return get_phys_addr_v6(env, address, access_type, mmu_idx, phys_ptr,
|
|
|
- attrs, prot, page_size);
|
|
|
+ attrs, prot, page_size, fsr);
|
|
|
} else {
|
|
|
return get_phys_addr_v5(env, address, access_type, mmu_idx, phys_ptr,
|
|
|
- prot, page_size);
|
|
|
+ prot, page_size, fsr);
|
|
|
}
|
|
|
}
|
|
|
|
|
|
/* Walk the page table and (if the mapping exists) add the page
|
|
|
- * to the TLB. Return 0 on success, or an ARM DFSR/IFSR fault
|
|
|
- * register format value on failure.
|
|
|
+ * to the TLB. Return false on success, or true on failure. Populate
|
|
|
+ * fsr with ARM DFSR/IFSR fault register format value on failure.
|
|
|
*/
|
|
|
-int arm_tlb_fill(CPUState *cs, vaddr address,
|
|
|
- int access_type, int mmu_idx)
|
|
|
+bool arm_tlb_fill(CPUState *cs, vaddr address,
|
|
|
+ int access_type, int mmu_idx, uint32_t *fsr)
|
|
|
{
|
|
|
ARMCPU *cpu = ARM_CPU(cs);
|
|
|
CPUARMState *env = &cpu->env;
|
|
@@ -5901,8 +5934,8 @@ int arm_tlb_fill(CPUState *cs, vaddr address,
|
|
|
MemTxAttrs attrs = {};
|
|
|
|
|
|
ret = get_phys_addr(env, address, access_type, mmu_idx, &phys_addr,
|
|
|
- &attrs, &prot, &page_size);
|
|
|
- if (ret == 0) {
|
|
|
+ &attrs, &prot, &page_size, fsr);
|
|
|
+ if (!ret) {
|
|
|
/* Map a single [sub]page. */
|
|
|
phys_addr &= TARGET_PAGE_MASK;
|
|
|
address &= TARGET_PAGE_MASK;
|
|
@@ -5921,13 +5954,14 @@ hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
|
|
hwaddr phys_addr;
|
|
|
target_ulong page_size;
|
|
|
int prot;
|
|
|
- int ret;
|
|
|
+ bool ret;
|
|
|
+ uint32_t fsr;
|
|
|
MemTxAttrs attrs = {};
|
|
|
|
|
|
ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr,
|
|
|
- &attrs, &prot, &page_size);
|
|
|
+ &attrs, &prot, &page_size, &fsr);
|
|
|
|
|
|
- if (ret != 0) {
|
|
|
+ if (ret) {
|
|
|
return -1;
|
|
|
}
|
|
|
|