|
@@ -165,6 +165,10 @@ static inline int get_a32_user_mem_index(DisasContext *s)
|
|
|
case ARMMMUIdx_MPriv:
|
|
|
case ARMMMUIdx_MNegPri:
|
|
|
return arm_to_core_mmu_idx(ARMMMUIdx_MUser);
|
|
|
+ case ARMMMUIdx_MSUser:
|
|
|
+ case ARMMMUIdx_MSPriv:
|
|
|
+ case ARMMMUIdx_MSNegPri:
|
|
|
+ return arm_to_core_mmu_idx(ARMMMUIdx_MSUser);
|
|
|
case ARMMMUIdx_S2NS:
|
|
|
default:
|
|
|
g_assert_not_reached();
|
|
@@ -960,7 +964,8 @@ static inline void gen_bx_excret(DisasContext *s, TCGv_i32 var)
|
|
|
* s->base.is_jmp that we need to do the rest of the work later.
|
|
|
*/
|
|
|
gen_bx(s, var);
|
|
|
- if (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
+ if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY) ||
|
|
|
+ (s->v7m_handler_mode && arm_dc_feature(s, ARM_FEATURE_M))) {
|
|
|
s->base.is_jmp = DISAS_BX_EXCRET;
|
|
|
}
|
|
|
}
|
|
@@ -969,9 +974,18 @@ static inline void gen_bx_excret_final_code(DisasContext *s)
|
|
|
{
|
|
|
/* Generate the code to finish possible exception return and end the TB */
|
|
|
TCGLabel *excret_label = gen_new_label();
|
|
|
+ uint32_t min_magic;
|
|
|
+
|
|
|
+ if (arm_dc_feature(s, ARM_FEATURE_M_SECURITY)) {
|
|
|
+ /* Covers FNC_RETURN and EXC_RETURN magic */
|
|
|
+ min_magic = FNC_RETURN_MIN_MAGIC;
|
|
|
+ } else {
|
|
|
+ /* EXC_RETURN magic only */
|
|
|
+ min_magic = EXC_RETURN_MIN_MAGIC;
|
|
|
+ }
|
|
|
|
|
|
/* Is the new PC value in the magic range indicating exception return? */
|
|
|
- tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], 0xff000000, excret_label);
|
|
|
+ tcg_gen_brcondi_i32(TCG_COND_GEU, cpu_R[15], min_magic, excret_label);
|
|
|
/* No: end the TB as we would for a DISAS_JMP */
|
|
|
if (is_singlestepping(s)) {
|
|
|
gen_singlestep_exception(s);
|
|
@@ -1013,6 +1027,20 @@ static inline void gen_bxns(DisasContext *s, int rm)
|
|
|
s->base.is_jmp = DISAS_EXIT;
|
|
|
}
|
|
|
|
|
|
+static inline void gen_blxns(DisasContext *s, int rm)
|
|
|
+{
|
|
|
+ TCGv_i32 var = load_reg(s, rm);
|
|
|
+
|
|
|
+ /* We don't need to sync condexec state, for the same reason as bxns.
|
|
|
+ * We do however need to set the PC, because the blxns helper reads it.
|
|
|
+ * The blxns helper may throw an exception.
|
|
|
+ */
|
|
|
+ gen_set_pc_im(s, s->pc);
|
|
|
+ gen_helper_v7m_blxns(cpu_env, var);
|
|
|
+ tcg_temp_free_i32(var);
|
|
|
+ s->base.is_jmp = DISAS_EXIT;
|
|
|
+}
|
|
|
+
|
|
|
/* Variant of store_reg which uses branch&exchange logic when storing
|
|
|
to r15 in ARM architecture v7 and above. The source must be a temporary
|
|
|
and will be marked as dead. */
|
|
@@ -9592,6 +9620,44 @@ static void disas_arm_insn(DisasContext *s, unsigned int insn)
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static bool thumb_insn_is_16bit(DisasContext *s, uint32_t insn)
|
|
|
+{
|
|
|
+ /* Return true if this is a 16 bit instruction. We must be precise
|
|
|
+ * about this (matching the decode). We assume that s->pc still
|
|
|
+ * points to the first 16 bits of the insn.
|
|
|
+ */
|
|
|
+ if ((insn >> 11) < 0x1d) {
|
|
|
+ /* Definitely a 16-bit instruction */
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
|
|
|
+ * first half of a 32-bit Thumb insn. Thumb-1 cores might
|
|
|
+ * end up actually treating this as two 16-bit insns, though,
|
|
|
+ * if it's half of a bl/blx pair that might span a page boundary.
|
|
|
+ */
|
|
|
+ if (arm_dc_feature(s, ARM_FEATURE_THUMB2)) {
|
|
|
+ /* Thumb2 cores (including all M profile ones) always treat
|
|
|
+ * 32-bit insns as 32-bit.
|
|
|
+ */
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((insn >> 11) == 0x1e && (s->pc < s->next_page_start - 3)) {
|
|
|
+ /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix, and the suffix
|
|
|
+ * is not on the next page; we merge this into a 32-bit
|
|
|
+ * insn.
|
|
|
+ */
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ /* 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF);
|
|
|
+ * 0b1111_1xxx_xxxx_xxxx : BL suffix;
|
|
|
+ * 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix on the end of a page
|
|
|
+ * -- handle as single 16 bit insn
|
|
|
+ */
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
/* Return true if this is a Thumb-2 logical op. */
|
|
|
static int
|
|
|
thumb2_logic_op(int op)
|
|
@@ -9677,9 +9743,9 @@ gen_thumb2_data_op(DisasContext *s, int op, int conds, uint32_t shifter_out,
|
|
|
|
|
|
/* Translate a 32-bit thumb instruction. Returns nonzero if the instruction
|
|
|
is not legal. */
|
|
|
-static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw1)
|
|
|
+static int disas_thumb2_insn(DisasContext *s, uint32_t insn)
|
|
|
{
|
|
|
- uint32_t insn, imm, shift, offset;
|
|
|
+ uint32_t imm, shift, offset;
|
|
|
uint32_t rd, rn, rm, rs;
|
|
|
TCGv_i32 tmp;
|
|
|
TCGv_i32 tmp2;
|
|
@@ -9691,52 +9757,9 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
|
|
int conds;
|
|
|
int logic_cc;
|
|
|
|
|
|
- if (!(arm_dc_feature(s, ARM_FEATURE_THUMB2)
|
|
|
- || arm_dc_feature(s, ARM_FEATURE_M))) {
|
|
|
- /* Thumb-1 cores may need to treat bl and blx as a pair of
|
|
|
- 16-bit instructions to get correct prefetch abort behavior. */
|
|
|
- insn = insn_hw1;
|
|
|
- if ((insn & (1 << 12)) == 0) {
|
|
|
- ARCH(5);
|
|
|
- /* Second half of blx. */
|
|
|
- offset = ((insn & 0x7ff) << 1);
|
|
|
- tmp = load_reg(s, 14);
|
|
|
- tcg_gen_addi_i32(tmp, tmp, offset);
|
|
|
- tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
|
|
|
-
|
|
|
- tmp2 = tcg_temp_new_i32();
|
|
|
- tcg_gen_movi_i32(tmp2, s->pc | 1);
|
|
|
- store_reg(s, 14, tmp2);
|
|
|
- gen_bx(s, tmp);
|
|
|
- return 0;
|
|
|
- }
|
|
|
- if (insn & (1 << 11)) {
|
|
|
- /* Second half of bl. */
|
|
|
- offset = ((insn & 0x7ff) << 1) | 1;
|
|
|
- tmp = load_reg(s, 14);
|
|
|
- tcg_gen_addi_i32(tmp, tmp, offset);
|
|
|
-
|
|
|
- tmp2 = tcg_temp_new_i32();
|
|
|
- tcg_gen_movi_i32(tmp2, s->pc | 1);
|
|
|
- store_reg(s, 14, tmp2);
|
|
|
- gen_bx(s, tmp);
|
|
|
- return 0;
|
|
|
- }
|
|
|
- if ((s->pc & ~TARGET_PAGE_MASK) == 0) {
|
|
|
- /* Instruction spans a page boundary. Implement it as two
|
|
|
- 16-bit instructions in case the second half causes an
|
|
|
- prefetch abort. */
|
|
|
- offset = ((int32_t)insn << 21) >> 9;
|
|
|
- tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + offset);
|
|
|
- return 0;
|
|
|
- }
|
|
|
- /* Fall through to 32-bit decode. */
|
|
|
- }
|
|
|
-
|
|
|
- insn = arm_lduw_code(env, s->pc, s->sctlr_b);
|
|
|
- s->pc += 2;
|
|
|
- insn |= (uint32_t)insn_hw1 << 16;
|
|
|
-
|
|
|
+ /* The only 32 bit insn that's allowed for Thumb1 is the combined
|
|
|
+ * BL/BLX prefix and suffix.
|
|
|
+ */
|
|
|
if ((insn & 0xf800e800) != 0xf000e800) {
|
|
|
ARCH(6T2);
|
|
|
}
|
|
@@ -9755,7 +9778,28 @@ static int disas_thumb2_insn(CPUARMState *env, DisasContext *s, uint16_t insn_hw
|
|
|
* - load/store doubleword, load/store exclusive, ldacq/strel,
|
|
|
* table branch.
|
|
|
*/
|
|
|
- if (insn & 0x01200000) {
|
|
|
+ if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_M) &&
|
|
|
+ arm_dc_feature(s, ARM_FEATURE_V8)) {
|
|
|
+ /* 0b1110_1001_0111_1111_1110_1001_0111_111
|
|
|
+ * - SG (v8M only)
|
|
|
+ * The bulk of the behaviour for this instruction is implemented
|
|
|
+ * in v7m_handle_execute_nsc(), which deals with the insn when
|
|
|
+ * it is executed by a CPU in non-secure state from memory
|
|
|
+ * which is Secure & NonSecure-Callable.
|
|
|
+ * Here we only need to handle the remaining cases:
|
|
|
+ * * in NS memory (including the "security extension not
|
|
|
+ * implemented" case) : NOP
|
|
|
+ * * in S memory but CPU already secure (clear IT bits)
|
|
|
+ * We know that the attribute for the memory this insn is
|
|
|
+ * in must match the current CPU state, because otherwise
|
|
|
+ * get_phys_addr_pmsav8 would have generated an exception.
|
|
|
+ */
|
|
|
+ if (s->v8m_secure) {
|
|
|
+ /* Like the IT insn, we don't need to generate any code */
|
|
|
+ s->condexec_cond = 0;
|
|
|
+ s->condexec_mask = 0;
|
|
|
+ }
|
|
|
+ } else if (insn & 0x01200000) {
|
|
|
/* 0b1110_1000_x11x_xxxx_xxxx_xxxx_xxxx_xxxx
|
|
|
* - load/store dual (post-indexed)
|
|
|
* 0b1111_1001_x10x_xxxx_xxxx_xxxx_xxxx_xxxx
|
|
@@ -11051,27 +11095,15 @@ illegal_op:
|
|
|
return 1;
|
|
|
}
|
|
|
|
|
|
-static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
|
|
+static void disas_thumb_insn(DisasContext *s, uint32_t insn)
|
|
|
{
|
|
|
- uint32_t val, insn, op, rm, rn, rd, shift, cond;
|
|
|
+ uint32_t val, op, rm, rn, rd, shift, cond;
|
|
|
int32_t offset;
|
|
|
int i;
|
|
|
TCGv_i32 tmp;
|
|
|
TCGv_i32 tmp2;
|
|
|
TCGv_i32 addr;
|
|
|
|
|
|
- if (s->condexec_mask) {
|
|
|
- cond = s->condexec_cond;
|
|
|
- if (cond != 0x0e) { /* Skip conditional when condition is AL. */
|
|
|
- s->condlabel = gen_new_label();
|
|
|
- arm_gen_test_cc(cond ^ 1, s->condlabel);
|
|
|
- s->condjmp = 1;
|
|
|
- }
|
|
|
- }
|
|
|
-
|
|
|
- insn = arm_lduw_code(env, s->pc, s->sctlr_b);
|
|
|
- s->pc += 2;
|
|
|
-
|
|
|
switch (insn >> 12) {
|
|
|
case 0: case 1:
|
|
|
|
|
@@ -11218,8 +11250,7 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
|
|
goto undef;
|
|
|
}
|
|
|
if (link) {
|
|
|
- /* BLXNS: not yet implemented */
|
|
|
- goto undef;
|
|
|
+ gen_blxns(s, rm);
|
|
|
} else {
|
|
|
gen_bxns(s, rm);
|
|
|
}
|
|
@@ -11803,8 +11834,21 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
|
|
|
|
|
case 14:
|
|
|
if (insn & (1 << 11)) {
|
|
|
- if (disas_thumb2_insn(env, s, insn))
|
|
|
- goto undef32;
|
|
|
+ /* thumb_insn_is_16bit() ensures we can't get here for
|
|
|
+ * a Thumb2 CPU, so this must be a thumb1 split BL/BLX:
|
|
|
+ * 0b1110_1xxx_xxxx_xxxx : BLX suffix (or UNDEF)
|
|
|
+ */
|
|
|
+ assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
|
|
|
+ ARCH(5);
|
|
|
+ offset = ((insn & 0x7ff) << 1);
|
|
|
+ tmp = load_reg(s, 14);
|
|
|
+ tcg_gen_addi_i32(tmp, tmp, offset);
|
|
|
+ tcg_gen_andi_i32(tmp, tmp, 0xfffffffc);
|
|
|
+
|
|
|
+ tmp2 = tcg_temp_new_i32();
|
|
|
+ tcg_gen_movi_i32(tmp2, s->pc | 1);
|
|
|
+ store_reg(s, 14, tmp2);
|
|
|
+ gen_bx(s, tmp);
|
|
|
break;
|
|
|
}
|
|
|
/* unconditional branch */
|
|
@@ -11815,15 +11859,30 @@ static void disas_thumb_insn(CPUARMState *env, DisasContext *s)
|
|
|
break;
|
|
|
|
|
|
case 15:
|
|
|
- if (disas_thumb2_insn(env, s, insn))
|
|
|
- goto undef32;
|
|
|
+ /* thumb_insn_is_16bit() ensures we can't get here for
|
|
|
+ * a Thumb2 CPU, so this must be a thumb1 split BL/BLX.
|
|
|
+ */
|
|
|
+ assert(!arm_dc_feature(s, ARM_FEATURE_THUMB2));
|
|
|
+
|
|
|
+ if (insn & (1 << 11)) {
|
|
|
+ /* 0b1111_1xxx_xxxx_xxxx : BL suffix */
|
|
|
+ offset = ((insn & 0x7ff) << 1) | 1;
|
|
|
+ tmp = load_reg(s, 14);
|
|
|
+ tcg_gen_addi_i32(tmp, tmp, offset);
|
|
|
+
|
|
|
+ tmp2 = tcg_temp_new_i32();
|
|
|
+ tcg_gen_movi_i32(tmp2, s->pc | 1);
|
|
|
+ store_reg(s, 14, tmp2);
|
|
|
+ gen_bx(s, tmp);
|
|
|
+ } else {
|
|
|
+ /* 0b1111_0xxx_xxxx_xxxx : BL/BLX prefix */
|
|
|
+ uint32_t uoffset = ((int32_t)insn << 21) >> 9;
|
|
|
+
|
|
|
+ tcg_gen_movi_i32(cpu_R[14], s->pc + 2 + uoffset);
|
|
|
+ }
|
|
|
break;
|
|
|
}
|
|
|
return;
|
|
|
-undef32:
|
|
|
- gen_exception_insn(s, 4, EXCP_UDEF, syn_uncategorized(),
|
|
|
- default_exception_el(s));
|
|
|
- return;
|
|
|
illegal_op:
|
|
|
undef:
|
|
|
gen_exception_insn(s, 2, EXCP_UDEF, syn_uncategorized(),
|
|
@@ -11834,29 +11893,14 @@ static bool insn_crosses_page(CPUARMState *env, DisasContext *s)
|
|
|
{
|
|
|
/* Return true if the insn at dc->pc might cross a page boundary.
|
|
|
* (False positives are OK, false negatives are not.)
|
|
|
+ * We know this is a Thumb insn, and our caller ensures we are
|
|
|
+ * only called if dc->pc is less than 4 bytes from the page
|
|
|
+ * boundary, so we cross the page if the first 16 bits indicate
|
|
|
+ * that this is a 32 bit insn.
|
|
|
*/
|
|
|
- uint16_t insn;
|
|
|
-
|
|
|
- if ((s->pc & 3) == 0) {
|
|
|
- /* At a 4-aligned address we can't be crossing a page */
|
|
|
- return false;
|
|
|
- }
|
|
|
-
|
|
|
- /* This must be a Thumb insn */
|
|
|
- insn = arm_lduw_code(env, s->pc, s->sctlr_b);
|
|
|
+ uint16_t insn = arm_lduw_code(env, s->pc, s->sctlr_b);
|
|
|
|
|
|
- if ((insn >> 11) >= 0x1d) {
|
|
|
- /* Top five bits 0b11101 / 0b11110 / 0b11111 : this is the
|
|
|
- * First half of a 32-bit Thumb insn. Thumb-1 cores might
|
|
|
- * end up actually treating this as two 16-bit insns (see the
|
|
|
- * code at the start of disas_thumb2_insn()) but we don't bother
|
|
|
- * to check for that as it is unlikely, and false positives here
|
|
|
- * are harmless.
|
|
|
- */
|
|
|
- return true;
|
|
|
- }
|
|
|
- /* Definitely a 16-bit insn, can't be crossing a page. */
|
|
|
- return false;
|
|
|
+ return !thumb_insn_is_16bit(s, insn);
|
|
|
}
|
|
|
|
|
|
static int arm_tr_init_disas_context(DisasContextBase *dcbase,
|
|
@@ -12089,16 +12133,88 @@ static void arm_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
in init_disas_context by adjusting max_insns. */
|
|
|
}
|
|
|
|
|
|
+static bool thumb_insn_is_unconditional(DisasContext *s, uint32_t insn)
|
|
|
+{
|
|
|
+ /* Return true if this Thumb insn is always unconditional,
|
|
|
+ * even inside an IT block. This is true of only a very few
|
|
|
+ * instructions: BKPT, HLT, and SG.
|
|
|
+ *
|
|
|
+ * A larger class of instructions are UNPREDICTABLE if used
|
|
|
+ * inside an IT block; we do not need to detect those here, because
|
|
|
+ * what we do by default (perform the cc check and update the IT
|
|
|
+ * bits state machine) is a permitted CONSTRAINED UNPREDICTABLE
|
|
|
+ * choice for those situations.
|
|
|
+ *
|
|
|
+ * insn is either a 16-bit or a 32-bit instruction; the two are
|
|
|
+ * distinguishable because for the 16-bit case the top 16 bits
|
|
|
+ * are zeroes, and that isn't a valid 32-bit encoding.
|
|
|
+ */
|
|
|
+ if ((insn & 0xffffff00) == 0xbe00) {
|
|
|
+ /* BKPT */
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ if ((insn & 0xffffffc0) == 0xba80 && arm_dc_feature(s, ARM_FEATURE_V8) &&
|
|
|
+ !arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
+ /* HLT: v8A only. This is unconditional even when it is going to
|
|
|
+ * UNDEF; see the v8A ARM ARM DDI0487B.a H3.3.
|
|
|
+ * For v7 cores this was a plain old undefined encoding and so
|
|
|
+ * honours its cc check. (We might be using the encoding as
|
|
|
+ * a semihosting trap, but we don't change the cc check behaviour
|
|
|
+ * on that account, because a debugger connected to a real v7A
|
|
|
+ * core and emulating semihosting traps by catching the UNDEF
|
|
|
+ * exception would also only see cases where the cc check passed.
|
|
|
+ * No guest code should be trying to do a HLT semihosting trap
|
|
|
+ * in an IT block anyway.
|
|
|
+ */
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (insn == 0xe97fe97f && arm_dc_feature(s, ARM_FEATURE_V8) &&
|
|
|
+ arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
+ /* SG: v8M only */
|
|
|
+ return true;
|
|
|
+ }
|
|
|
+
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
static void thumb_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu)
|
|
|
{
|
|
|
DisasContext *dc = container_of(dcbase, DisasContext, base);
|
|
|
CPUARMState *env = cpu->env_ptr;
|
|
|
+ uint32_t insn;
|
|
|
+ bool is_16bit;
|
|
|
|
|
|
if (arm_pre_translate_insn(dc)) {
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
- disas_thumb_insn(env, dc);
|
|
|
+ insn = arm_lduw_code(env, dc->pc, dc->sctlr_b);
|
|
|
+ is_16bit = thumb_insn_is_16bit(dc, insn);
|
|
|
+ dc->pc += 2;
|
|
|
+ if (!is_16bit) {
|
|
|
+ uint32_t insn2 = arm_lduw_code(env, dc->pc, dc->sctlr_b);
|
|
|
+
|
|
|
+ insn = insn << 16 | insn2;
|
|
|
+ dc->pc += 2;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (dc->condexec_mask && !thumb_insn_is_unconditional(dc, insn)) {
|
|
|
+ uint32_t cond = dc->condexec_cond;
|
|
|
+
|
|
|
+ if (cond != 0x0e) { /* Skip conditional when condition is AL. */
|
|
|
+ dc->condlabel = gen_new_label();
|
|
|
+ arm_gen_test_cc(cond ^ 1, dc->condlabel);
|
|
|
+ dc->condjmp = 1;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ if (is_16bit) {
|
|
|
+ disas_thumb_insn(dc, insn);
|
|
|
+ } else {
|
|
|
+ disas_thumb2_insn(dc, insn);
|
|
|
+ }
|
|
|
|
|
|
/* Advance the Thumb condexec condition. */
|
|
|
if (dc->condexec_mask) {
|