|
@@ -30,6 +30,7 @@
|
|
|
#include "qemu/guest-random.h"
|
|
|
#include "qapi/error.h"
|
|
|
|
|
|
+
|
|
|
/* CSR function table public API */
|
|
|
void riscv_get_csr_ops(int csrno, riscv_csr_operations *ops)
|
|
|
{
|
|
@@ -121,6 +122,10 @@ static RISCVException ctr(CPURISCVState *env, int csrno)
|
|
|
|
|
|
if ((csrno >= CSR_CYCLE && csrno <= CSR_INSTRET) ||
|
|
|
(csrno >= CSR_CYCLEH && csrno <= CSR_INSTRETH)) {
|
|
|
+ if (!riscv_cpu_cfg(env)->ext_zicntr) {
|
|
|
+ return RISCV_EXCP_ILLEGAL_INST;
|
|
|
+ }
|
|
|
+
|
|
|
goto skip_ext_pmu_check;
|
|
|
}
|
|
|
|
|
@@ -183,7 +188,8 @@ static RISCVException zcmt(CPURISCVState *env, int csrno)
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
static RISCVException mctr(CPURISCVState *env, int csrno)
|
|
|
{
|
|
|
- int pmu_num = riscv_cpu_cfg(env)->pmu_num;
|
|
|
+ RISCVCPU *cpu = env_archcpu(env);
|
|
|
+ uint32_t pmu_avail_ctrs = cpu->pmu_avail_ctrs;
|
|
|
int ctr_index;
|
|
|
int base_csrno = CSR_MHPMCOUNTER3;
|
|
|
|
|
@@ -192,7 +198,7 @@ static RISCVException mctr(CPURISCVState *env, int csrno)
|
|
|
base_csrno += 0x80;
|
|
|
}
|
|
|
ctr_index = csrno - base_csrno;
|
|
|
- if (!pmu_num || ctr_index >= pmu_num) {
|
|
|
+ if ((BIT(ctr_index) & pmu_avail_ctrs >> 3) == 0) {
|
|
|
/* The PMU is not enabled or counter is out of range */
|
|
|
return RISCV_EXCP_ILLEGAL_INST;
|
|
|
}
|
|
@@ -523,9 +529,12 @@ static RISCVException pmp(CPURISCVState *env, int csrno)
|
|
|
return RISCV_EXCP_ILLEGAL_INST;
|
|
|
}
|
|
|
|
|
|
-static RISCVException epmp(CPURISCVState *env, int csrno)
|
|
|
+static RISCVException have_mseccfg(CPURISCVState *env, int csrno)
|
|
|
{
|
|
|
- if (riscv_cpu_cfg(env)->epmp) {
|
|
|
+ if (riscv_cpu_cfg(env)->ext_smepmp) {
|
|
|
+ return RISCV_EXCP_NONE;
|
|
|
+ }
|
|
|
+ if (riscv_cpu_cfg(env)->ext_zkr) {
|
|
|
return RISCV_EXCP_NONE;
|
|
|
}
|
|
|
|
|
@@ -1117,21 +1126,16 @@ static RISCVException write_stimecmph(CPURISCVState *env, int csrno,
|
|
|
return RISCV_EXCP_NONE;
|
|
|
}
|
|
|
|
|
|
-/* Machine constants */
|
|
|
-
|
|
|
-#define M_MODE_INTERRUPTS ((uint64_t)(MIP_MSIP | MIP_MTIP | MIP_MEIP))
|
|
|
-#define S_MODE_INTERRUPTS ((uint64_t)(MIP_SSIP | MIP_STIP | MIP_SEIP | \
|
|
|
- MIP_LCOFIP))
|
|
|
-#define VS_MODE_INTERRUPTS ((uint64_t)(MIP_VSSIP | MIP_VSTIP | MIP_VSEIP))
|
|
|
-#define HS_MODE_INTERRUPTS ((uint64_t)(MIP_SGEIP | VS_MODE_INTERRUPTS))
|
|
|
-
|
|
|
#define VSTOPI_NUM_SRCS 5
|
|
|
|
|
|
-static const uint64_t delegable_ints = S_MODE_INTERRUPTS |
|
|
|
- VS_MODE_INTERRUPTS;
|
|
|
-static const uint64_t vs_delegable_ints = VS_MODE_INTERRUPTS;
|
|
|
+#define LOCAL_INTERRUPTS (~0x1FFF)
|
|
|
+
|
|
|
+static const uint64_t delegable_ints =
|
|
|
+ S_MODE_INTERRUPTS | VS_MODE_INTERRUPTS | MIP_LCOFIP;
|
|
|
+static const uint64_t vs_delegable_ints =
|
|
|
+ (VS_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & ~MIP_LCOFIP;
|
|
|
static const uint64_t all_ints = M_MODE_INTERRUPTS | S_MODE_INTERRUPTS |
|
|
|
- HS_MODE_INTERRUPTS;
|
|
|
+ HS_MODE_INTERRUPTS | LOCAL_INTERRUPTS;
|
|
|
#define DELEGABLE_EXCPS ((1ULL << (RISCV_EXCP_INST_ADDR_MIS)) | \
|
|
|
(1ULL << (RISCV_EXCP_INST_ACCESS_FAULT)) | \
|
|
|
(1ULL << (RISCV_EXCP_ILLEGAL_INST)) | \
|
|
@@ -1162,12 +1166,32 @@ static const target_ulong vs_delegable_excps = DELEGABLE_EXCPS &
|
|
|
static const target_ulong sstatus_v1_10_mask = SSTATUS_SIE | SSTATUS_SPIE |
|
|
|
SSTATUS_UIE | SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS |
|
|
|
SSTATUS_SUM | SSTATUS_MXR | SSTATUS_VS;
|
|
|
-static const target_ulong sip_writable_mask = SIP_SSIP | MIP_USIP | MIP_UEIP |
|
|
|
- SIP_LCOFIP;
|
|
|
+
|
|
|
+/*
|
|
|
+ * Spec allows for bits 13:63 to be either read-only or writable.
|
|
|
+ * So far we have interrupt LCOFIP in that region which is writable.
|
|
|
+ *
|
|
|
+ * Also, spec allows to inject virtual interrupts in this region even
|
|
|
+ * without any hardware interrupts for that interrupt number.
|
|
|
+ *
|
|
|
+ * For now interrupt in 13:63 region are all kept writable. 13 being
|
|
|
+ * LCOFIP and 14:63 being virtual only. Change this in future if we
|
|
|
+ * introduce more interrupts that are not writable.
|
|
|
+ */
|
|
|
+
|
|
|
+/* Bit STIP can be an alias of mip.STIP that's why it's writable in mvip. */
|
|
|
+static const target_ulong mvip_writable_mask = MIP_SSIP | MIP_STIP | MIP_SEIP |
|
|
|
+ LOCAL_INTERRUPTS;
|
|
|
+static const target_ulong mvien_writable_mask = MIP_SSIP | MIP_SEIP |
|
|
|
+ LOCAL_INTERRUPTS;
|
|
|
+
|
|
|
+static const target_ulong sip_writable_mask = SIP_SSIP | LOCAL_INTERRUPTS;
|
|
|
static const target_ulong hip_writable_mask = MIP_VSSIP;
|
|
|
static const target_ulong hvip_writable_mask = MIP_VSSIP | MIP_VSTIP |
|
|
|
- MIP_VSEIP;
|
|
|
-static const target_ulong vsip_writable_mask = MIP_VSSIP;
|
|
|
+ MIP_VSEIP | LOCAL_INTERRUPTS;
|
|
|
+static const target_ulong hvien_writable_mask = LOCAL_INTERRUPTS;
|
|
|
+
|
|
|
+static const target_ulong vsip_writable_mask = MIP_VSSIP | LOCAL_INTERRUPTS;
|
|
|
|
|
|
const bool valid_vm_1_10_32[16] = {
|
|
|
[VM_1_10_MBARE] = true,
|
|
@@ -1525,7 +1549,7 @@ static RISCVException rmw_mie64(CPURISCVState *env, int csrno,
|
|
|
env->mie = (env->mie & ~mask) | (new_val & mask);
|
|
|
|
|
|
if (!riscv_has_ext(env, RVH)) {
|
|
|
- env->mie &= ~((uint64_t)MIP_SGEIP);
|
|
|
+ env->mie &= ~((uint64_t)HS_MODE_INTERRUPTS);
|
|
|
}
|
|
|
|
|
|
return RISCV_EXCP_NONE;
|
|
@@ -1562,6 +1586,52 @@ static RISCVException rmw_mieh(CPURISCVState *env, int csrno,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+static RISCVException rmw_mvien64(CPURISCVState *env, int csrno,
|
|
|
+ uint64_t *ret_val,
|
|
|
+ uint64_t new_val, uint64_t wr_mask)
|
|
|
+{
|
|
|
+ uint64_t mask = wr_mask & mvien_writable_mask;
|
|
|
+
|
|
|
+ if (ret_val) {
|
|
|
+ *ret_val = env->mvien;
|
|
|
+ }
|
|
|
+
|
|
|
+ env->mvien = (env->mvien & ~mask) | (new_val & mask);
|
|
|
+
|
|
|
+ return RISCV_EXCP_NONE;
|
|
|
+}
|
|
|
+
|
|
|
+static RISCVException rmw_mvien(CPURISCVState *env, int csrno,
|
|
|
+ target_ulong *ret_val,
|
|
|
+ target_ulong new_val, target_ulong wr_mask)
|
|
|
+{
|
|
|
+ uint64_t rval;
|
|
|
+ RISCVException ret;
|
|
|
+
|
|
|
+ ret = rmw_mvien64(env, csrno, &rval, new_val, wr_mask);
|
|
|
+ if (ret_val) {
|
|
|
+ *ret_val = rval;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static RISCVException rmw_mvienh(CPURISCVState *env, int csrno,
|
|
|
+ target_ulong *ret_val,
|
|
|
+ target_ulong new_val, target_ulong wr_mask)
|
|
|
+{
|
|
|
+ uint64_t rval;
|
|
|
+ RISCVException ret;
|
|
|
+
|
|
|
+ ret = rmw_mvien64(env, csrno, &rval,
|
|
|
+ ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
|
|
|
+ if (ret_val) {
|
|
|
+ *ret_val = rval >> 32;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static int read_mtopi(CPURISCVState *env, int csrno, target_ulong *val)
|
|
|
{
|
|
|
int irq;
|
|
@@ -1703,6 +1773,11 @@ static int rmw_xireg(CPURISCVState *env, int csrno, target_ulong *val,
|
|
|
priv = PRV_M;
|
|
|
break;
|
|
|
case CSR_SIREG:
|
|
|
+ if (env->priv == PRV_S && env->mvien & MIP_SEIP &&
|
|
|
+ env->siselect >= ISELECT_IMSIC_EIDELIVERY &&
|
|
|
+ env->siselect <= ISELECT_IMSIC_EIE63) {
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
iprio = env->siprio;
|
|
|
isel = env->siselect;
|
|
|
priv = PRV_S;
|
|
@@ -1769,6 +1844,9 @@ static int rmw_xtopei(CPURISCVState *env, int csrno, target_ulong *val,
|
|
|
priv = PRV_M;
|
|
|
break;
|
|
|
case CSR_STOPEI:
|
|
|
+ if (env->mvien & MIP_SEIP && env->priv == PRV_S) {
|
|
|
+ goto done;
|
|
|
+ }
|
|
|
priv = PRV_S;
|
|
|
break;
|
|
|
case CSR_VSTOPEI:
|
|
@@ -2360,6 +2438,143 @@ static RISCVException rmw_miph(CPURISCVState *env, int csrno,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * The function is written for two use-cases:
|
|
|
+ * 1- To access mvip csr as is for m-mode access.
|
|
|
+ * 2- To access sip as a combination of mip and mvip for s-mode.
|
|
|
+ *
|
|
|
+ * Both report bits 1, 5, 9 and 13:63 but with the exception of
|
|
|
+ * STIP being read-only zero in case of mvip when sstc extension
|
|
|
+ * is present.
|
|
|
+ * Also, sip needs to be read-only zero when both mideleg[i] and
|
|
|
+ * mvien[i] are zero but mvip needs to be an alias of mip.
|
|
|
+ */
|
|
|
+static RISCVException rmw_mvip64(CPURISCVState *env, int csrno,
|
|
|
+ uint64_t *ret_val,
|
|
|
+ uint64_t new_val, uint64_t wr_mask)
|
|
|
+{
|
|
|
+ RISCVCPU *cpu = env_archcpu(env);
|
|
|
+ target_ulong ret_mip = 0;
|
|
|
+ RISCVException ret;
|
|
|
+ uint64_t old_mvip;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * mideleg[i] mvien[i]
|
|
|
+ * 0 0 No delegation. mvip[i] is alias of mip[i].
|
|
|
+ * 0 1 mvip[i] becomes source of interrupt, mip bypassed.
|
|
|
+ * 1 X mip[i] is source of interrupt and mvip[i] aliases
|
|
|
+ * mip[i].
|
|
|
+ *
|
|
|
+ * So alias condition would be for bits:
|
|
|
+ * ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (mideleg | ~mvien)) |
|
|
|
+ * (!sstc & MIP_STIP)
|
|
|
+ *
|
|
|
+ * Non-alias condition will be for bits:
|
|
|
+ * (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & (~mideleg & mvien)
|
|
|
+ *
|
|
|
+ * alias_mask denotes the bits that come from mip nalias_mask denotes bits
|
|
|
+ * that come from hvip.
|
|
|
+ */
|
|
|
+ uint64_t alias_mask = ((S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
|
|
|
+ (env->mideleg | ~env->mvien)) | MIP_STIP;
|
|
|
+ uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
|
|
|
+ (~env->mideleg & env->mvien);
|
|
|
+ uint64_t wr_mask_mvip;
|
|
|
+ uint64_t wr_mask_mip;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * mideleg[i] mvien[i]
|
|
|
+ * 0 0 sip[i] read-only zero.
|
|
|
+ * 0 1 sip[i] alias of mvip[i].
|
|
|
+ * 1 X sip[i] alias of mip[i].
|
|
|
+ *
|
|
|
+ * Both alias and non-alias mask remain same for sip except for bits
|
|
|
+ * which are zero in both mideleg and mvien.
|
|
|
+ */
|
|
|
+ if (csrno == CSR_SIP) {
|
|
|
+ /* Remove bits that are zero in both mideleg and mvien. */
|
|
|
+ alias_mask &= (env->mideleg | env->mvien);
|
|
|
+ nalias_mask &= (env->mideleg | env->mvien);
|
|
|
+ }
|
|
|
+
|
|
|
+ /*
|
|
|
+ * If sstc is present, mvip.STIP is not an alias of mip.STIP so clear
|
|
|
+ * that our in mip returned value.
|
|
|
+ */
|
|
|
+ if (cpu->cfg.ext_sstc && (env->priv == PRV_M) &&
|
|
|
+ get_field(env->menvcfg, MENVCFG_STCE)) {
|
|
|
+ alias_mask &= ~MIP_STIP;
|
|
|
+ }
|
|
|
+
|
|
|
+ wr_mask_mip = wr_mask & alias_mask & mvip_writable_mask;
|
|
|
+ wr_mask_mvip = wr_mask & nalias_mask & mvip_writable_mask;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * For bits set in alias_mask, mvip needs to be alias of mip, so forward
|
|
|
+ * this to rmw_mip.
|
|
|
+ */
|
|
|
+ ret = rmw_mip(env, CSR_MIP, &ret_mip, new_val, wr_mask_mip);
|
|
|
+ if (ret != RISCV_EXCP_NONE) {
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ old_mvip = env->mvip;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Write to mvip. Update only non-alias bits. Alias bits were updated
|
|
|
+ * in mip in rmw_mip above.
|
|
|
+ */
|
|
|
+ if (wr_mask_mvip) {
|
|
|
+ env->mvip = (env->mvip & ~wr_mask_mvip) | (new_val & wr_mask_mvip);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Given mvip is separate source from mip, we need to trigger interrupt
|
|
|
+ * from here separately. Normally this happen from riscv_cpu_update_mip.
|
|
|
+ */
|
|
|
+ riscv_cpu_interrupt(env);
|
|
|
+ }
|
|
|
+
|
|
|
+ if (ret_val) {
|
|
|
+ ret_mip &= alias_mask;
|
|
|
+ old_mvip &= nalias_mask;
|
|
|
+
|
|
|
+ *ret_val = old_mvip | ret_mip;
|
|
|
+ }
|
|
|
+
|
|
|
+ return RISCV_EXCP_NONE;
|
|
|
+}
|
|
|
+
|
|
|
+static RISCVException rmw_mvip(CPURISCVState *env, int csrno,
|
|
|
+ target_ulong *ret_val,
|
|
|
+ target_ulong new_val, target_ulong wr_mask)
|
|
|
+{
|
|
|
+ uint64_t rval;
|
|
|
+ RISCVException ret;
|
|
|
+
|
|
|
+ ret = rmw_mvip64(env, csrno, &rval, new_val, wr_mask);
|
|
|
+ if (ret_val) {
|
|
|
+ *ret_val = rval;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static RISCVException rmw_mviph(CPURISCVState *env, int csrno,
|
|
|
+ target_ulong *ret_val,
|
|
|
+ target_ulong new_val, target_ulong wr_mask)
|
|
|
+{
|
|
|
+ uint64_t rval;
|
|
|
+ RISCVException ret;
|
|
|
+
|
|
|
+ ret = rmw_mvip64(env, csrno, &rval,
|
|
|
+ ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
|
|
|
+ if (ret_val) {
|
|
|
+ *ret_val = rval >> 32;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
/* Supervisor Trap Setup */
|
|
|
static RISCVException read_sstatus_i128(CPURISCVState *env, int csrno,
|
|
|
Int128 *val)
|
|
@@ -2404,16 +2619,36 @@ static RISCVException rmw_vsie64(CPURISCVState *env, int csrno,
|
|
|
uint64_t *ret_val,
|
|
|
uint64_t new_val, uint64_t wr_mask)
|
|
|
{
|
|
|
+ uint64_t alias_mask = (LOCAL_INTERRUPTS | VS_MODE_INTERRUPTS) &
|
|
|
+ env->hideleg;
|
|
|
+ uint64_t nalias_mask = LOCAL_INTERRUPTS & (~env->hideleg & env->hvien);
|
|
|
+ uint64_t rval, rval_vs, vsbits;
|
|
|
+ uint64_t wr_mask_vsie;
|
|
|
+ uint64_t wr_mask_mie;
|
|
|
RISCVException ret;
|
|
|
- uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
|
|
|
|
|
|
/* Bring VS-level bits to correct position */
|
|
|
- new_val = (new_val & (VS_MODE_INTERRUPTS >> 1)) << 1;
|
|
|
- wr_mask = (wr_mask & (VS_MODE_INTERRUPTS >> 1)) << 1;
|
|
|
+ vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
|
|
|
+ new_val &= ~(VS_MODE_INTERRUPTS >> 1);
|
|
|
+ new_val |= vsbits << 1;
|
|
|
+
|
|
|
+ vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
|
|
|
+ wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
|
|
|
+ wr_mask |= vsbits << 1;
|
|
|
+
|
|
|
+ wr_mask_mie = wr_mask & alias_mask;
|
|
|
+ wr_mask_vsie = wr_mask & nalias_mask;
|
|
|
+
|
|
|
+ ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask_mie);
|
|
|
+
|
|
|
+ rval_vs = env->vsie & nalias_mask;
|
|
|
+ env->vsie = (env->vsie & ~wr_mask_vsie) | (new_val & wr_mask_vsie);
|
|
|
|
|
|
- ret = rmw_mie64(env, csrno, &rval, new_val, wr_mask & mask);
|
|
|
if (ret_val) {
|
|
|
- *ret_val = (rval & mask) >> 1;
|
|
|
+ rval &= alias_mask;
|
|
|
+ vsbits = rval & VS_MODE_INTERRUPTS;
|
|
|
+ rval &= ~VS_MODE_INTERRUPTS;
|
|
|
+ *ret_val = rval | (vsbits >> 1) | rval_vs;
|
|
|
}
|
|
|
|
|
|
return ret;
|
|
@@ -2454,20 +2689,37 @@ static RISCVException rmw_sie64(CPURISCVState *env, int csrno,
|
|
|
uint64_t *ret_val,
|
|
|
uint64_t new_val, uint64_t wr_mask)
|
|
|
{
|
|
|
+ uint64_t nalias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) &
|
|
|
+ (~env->mideleg & env->mvien);
|
|
|
+ uint64_t alias_mask = (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS) & env->mideleg;
|
|
|
+ uint64_t sie_mask = wr_mask & nalias_mask;
|
|
|
RISCVException ret;
|
|
|
- uint64_t mask = env->mideleg & S_MODE_INTERRUPTS;
|
|
|
|
|
|
+ /*
|
|
|
+ * mideleg[i] mvien[i]
|
|
|
+ * 0 0 sie[i] read-only zero.
|
|
|
+ * 0 1 sie[i] is a separate writable bit.
|
|
|
+ * 1 X sie[i] alias of mie[i].
|
|
|
+ *
|
|
|
+ * Both alias and non-alias mask remain same for sip except for bits
|
|
|
+ * which are zero in both mideleg and mvien.
|
|
|
+ */
|
|
|
if (env->virt_enabled) {
|
|
|
if (env->hvictl & HVICTL_VTI) {
|
|
|
return RISCV_EXCP_VIRT_INSTRUCTION_FAULT;
|
|
|
}
|
|
|
ret = rmw_vsie64(env, CSR_VSIE, ret_val, new_val, wr_mask);
|
|
|
+ if (ret_val) {
|
|
|
+ *ret_val &= alias_mask;
|
|
|
+ }
|
|
|
} else {
|
|
|
- ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & mask);
|
|
|
- }
|
|
|
+ ret = rmw_mie64(env, csrno, ret_val, new_val, wr_mask & alias_mask);
|
|
|
+ if (ret_val) {
|
|
|
+ *ret_val &= alias_mask;
|
|
|
+ *ret_val |= env->sie & nalias_mask;
|
|
|
+ }
|
|
|
|
|
|
- if (ret_val) {
|
|
|
- *ret_val &= mask;
|
|
|
+ env->sie = (env->sie & ~sie_mask) | (new_val & sie_mask);
|
|
|
}
|
|
|
|
|
|
return ret;
|
|
@@ -2609,21 +2861,36 @@ static RISCVException write_stval(CPURISCVState *env, int csrno,
|
|
|
return RISCV_EXCP_NONE;
|
|
|
}
|
|
|
|
|
|
+static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
|
|
|
+ uint64_t *ret_val,
|
|
|
+ uint64_t new_val, uint64_t wr_mask);
|
|
|
+
|
|
|
static RISCVException rmw_vsip64(CPURISCVState *env, int csrno,
|
|
|
uint64_t *ret_val,
|
|
|
uint64_t new_val, uint64_t wr_mask)
|
|
|
{
|
|
|
RISCVException ret;
|
|
|
uint64_t rval, mask = env->hideleg & VS_MODE_INTERRUPTS;
|
|
|
+ uint64_t vsbits;
|
|
|
|
|
|
- /* Bring VS-level bits to correct position */
|
|
|
- new_val = (new_val & (VS_MODE_INTERRUPTS >> 1)) << 1;
|
|
|
- wr_mask = (wr_mask & (VS_MODE_INTERRUPTS >> 1)) << 1;
|
|
|
+ /* Add virtualized bits into vsip mask. */
|
|
|
+ mask |= env->hvien & ~env->hideleg;
|
|
|
|
|
|
- ret = rmw_mip64(env, csrno, &rval, new_val,
|
|
|
- wr_mask & mask & vsip_writable_mask);
|
|
|
+ /* Bring VS-level bits to correct position */
|
|
|
+ vsbits = new_val & (VS_MODE_INTERRUPTS >> 1);
|
|
|
+ new_val &= ~(VS_MODE_INTERRUPTS >> 1);
|
|
|
+ new_val |= vsbits << 1;
|
|
|
+ vsbits = wr_mask & (VS_MODE_INTERRUPTS >> 1);
|
|
|
+ wr_mask &= ~(VS_MODE_INTERRUPTS >> 1);
|
|
|
+ wr_mask |= vsbits << 1;
|
|
|
+
|
|
|
+ ret = rmw_hvip64(env, csrno, &rval, new_val,
|
|
|
+ wr_mask & mask & vsip_writable_mask);
|
|
|
if (ret_val) {
|
|
|
- *ret_val = (rval & mask) >> 1;
|
|
|
+ rval &= mask;
|
|
|
+ vsbits = rval & VS_MODE_INTERRUPTS;
|
|
|
+ rval &= ~VS_MODE_INTERRUPTS;
|
|
|
+ *ret_val = rval | (vsbits >> 1);
|
|
|
}
|
|
|
|
|
|
return ret;
|
|
@@ -2665,7 +2932,7 @@ static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
|
|
|
uint64_t new_val, uint64_t wr_mask)
|
|
|
{
|
|
|
RISCVException ret;
|
|
|
- uint64_t mask = env->mideleg & sip_writable_mask;
|
|
|
+ uint64_t mask = (env->mideleg | env->mvien) & sip_writable_mask;
|
|
|
|
|
|
if (env->virt_enabled) {
|
|
|
if (env->hvictl & HVICTL_VTI) {
|
|
@@ -2673,11 +2940,12 @@ static RISCVException rmw_sip64(CPURISCVState *env, int csrno,
|
|
|
}
|
|
|
ret = rmw_vsip64(env, CSR_VSIP, ret_val, new_val, wr_mask);
|
|
|
} else {
|
|
|
- ret = rmw_mip64(env, csrno, ret_val, new_val, wr_mask & mask);
|
|
|
+ ret = rmw_mvip64(env, csrno, ret_val, new_val, wr_mask & mask);
|
|
|
}
|
|
|
|
|
|
if (ret_val) {
|
|
|
- *ret_val &= env->mideleg & S_MODE_INTERRUPTS;
|
|
|
+ *ret_val &= (env->mideleg | env->mvien) &
|
|
|
+ (S_MODE_INTERRUPTS | LOCAL_INTERRUPTS);
|
|
|
}
|
|
|
|
|
|
return ret;
|
|
@@ -2842,6 +3110,7 @@ static int read_vstopi(CPURISCVState *env, int csrno, target_ulong *val)
|
|
|
|
|
|
*val = (iid & TOPI_IID_MASK) << TOPI_IID_SHIFT;
|
|
|
*val |= iprio;
|
|
|
+
|
|
|
return RISCV_EXCP_NONE;
|
|
|
}
|
|
|
|
|
@@ -2913,6 +3182,52 @@ static RISCVException write_hedeleg(CPURISCVState *env, int csrno,
|
|
|
return RISCV_EXCP_NONE;
|
|
|
}
|
|
|
|
|
|
+static RISCVException rmw_hvien64(CPURISCVState *env, int csrno,
|
|
|
+ uint64_t *ret_val,
|
|
|
+ uint64_t new_val, uint64_t wr_mask)
|
|
|
+{
|
|
|
+ uint64_t mask = wr_mask & hvien_writable_mask;
|
|
|
+
|
|
|
+ if (ret_val) {
|
|
|
+ *ret_val = env->hvien;
|
|
|
+ }
|
|
|
+
|
|
|
+ env->hvien = (env->hvien & ~mask) | (new_val & mask);
|
|
|
+
|
|
|
+ return RISCV_EXCP_NONE;
|
|
|
+}
|
|
|
+
|
|
|
+static RISCVException rmw_hvien(CPURISCVState *env, int csrno,
|
|
|
+ target_ulong *ret_val,
|
|
|
+ target_ulong new_val, target_ulong wr_mask)
|
|
|
+{
|
|
|
+ uint64_t rval;
|
|
|
+ RISCVException ret;
|
|
|
+
|
|
|
+ ret = rmw_hvien64(env, csrno, &rval, new_val, wr_mask);
|
|
|
+ if (ret_val) {
|
|
|
+ *ret_val = rval;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
+static RISCVException rmw_hvienh(CPURISCVState *env, int csrno,
|
|
|
+ target_ulong *ret_val,
|
|
|
+ target_ulong new_val, target_ulong wr_mask)
|
|
|
+{
|
|
|
+ uint64_t rval;
|
|
|
+ RISCVException ret;
|
|
|
+
|
|
|
+ ret = rmw_hvien64(env, csrno, &rval,
|
|
|
+ ((uint64_t)new_val) << 32, ((uint64_t)wr_mask) << 32);
|
|
|
+ if (ret_val) {
|
|
|
+ *ret_val = rval >> 32;
|
|
|
+ }
|
|
|
+
|
|
|
+ return ret;
|
|
|
+}
|
|
|
+
|
|
|
static RISCVException rmw_hideleg64(CPURISCVState *env, int csrno,
|
|
|
uint64_t *ret_val,
|
|
|
uint64_t new_val, uint64_t wr_mask)
|
|
@@ -2958,16 +3273,94 @@ static RISCVException rmw_hidelegh(CPURISCVState *env, int csrno,
|
|
|
return ret;
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * The function is written for two use-cases:
|
|
|
+ * 1- To access hvip csr as is for HS-mode access.
|
|
|
+ * 2- To access vsip as a combination of hvip, and mip for vs-mode.
|
|
|
+ *
|
|
|
+ * Both report bits 2, 6, 10 and 13:63.
|
|
|
+ * vsip needs to be read-only zero when both hideleg[i] and
|
|
|
+ * hvien[i] are zero.
|
|
|
+ */
|
|
|
static RISCVException rmw_hvip64(CPURISCVState *env, int csrno,
|
|
|
uint64_t *ret_val,
|
|
|
uint64_t new_val, uint64_t wr_mask)
|
|
|
{
|
|
|
RISCVException ret;
|
|
|
+ uint64_t old_hvip;
|
|
|
+ uint64_t ret_mip;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * For bits 10, 6 and 2, vsip[i] is an alias of hip[i]. These bits are
|
|
|
+ * present in hip, hvip and mip. Where mip[i] is alias of hip[i] and hvip[i]
|
|
|
+ * is OR'ed in hip[i] to inject virtual interrupts from hypervisor. These
|
|
|
+ * bits are actually being maintained in mip so we read them from there.
|
|
|
+ * This way we have a single source of truth and allows for easier
|
|
|
+ * implementation.
|
|
|
+ *
|
|
|
+ * For bits 13:63 we have:
|
|
|
+ *
|
|
|
+ * hideleg[i] hvien[i]
|
|
|
+ * 0 0 No delegation. vsip[i] readonly zero.
|
|
|
+ * 0 1 vsip[i] is alias of hvip[i], sip bypassed.
|
|
|
+ * 1 X vsip[i] is alias of sip[i], hvip bypassed.
|
|
|
+ *
|
|
|
+ * alias_mask denotes the bits that come from sip (mip here given we
|
|
|
+ * maintain all bits there). nalias_mask denotes bits that come from
|
|
|
+ * hvip.
|
|
|
+ */
|
|
|
+ uint64_t alias_mask = (env->hideleg | ~env->hvien) | VS_MODE_INTERRUPTS;
|
|
|
+ uint64_t nalias_mask = (~env->hideleg & env->hvien);
|
|
|
+ uint64_t wr_mask_hvip;
|
|
|
+ uint64_t wr_mask_mip;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Both alias and non-alias mask remain same for vsip except:
|
|
|
+ * 1- For VS* bits if they are zero in hideleg.
|
|
|
+ * 2- For 13:63 bits if they are zero in both hideleg and hvien.
|
|
|
+ */
|
|
|
+ if (csrno == CSR_VSIP) {
|
|
|
+ /* zero-out VS* bits that are not delegated to VS mode. */
|
|
|
+ alias_mask &= (env->hideleg | ~VS_MODE_INTERRUPTS);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * zero-out 13:63 bits that are zero in both hideleg and hvien.
|
|
|
+ * nalias_mask mask can not contain any VS* bits so only second
|
|
|
+ * condition applies on it.
|
|
|
+ */
|
|
|
+ nalias_mask &= (env->hideleg | env->hvien);
|
|
|
+ alias_mask &= (env->hideleg | env->hvien);
|
|
|
+ }
|
|
|
+
|
|
|
+ wr_mask_hvip = wr_mask & nalias_mask & hvip_writable_mask;
|
|
|
+ wr_mask_mip = wr_mask & alias_mask & hvip_writable_mask;
|
|
|
+
|
|
|
+ /* Aliased bits, bits 10, 6, 2 need to come from mip. */
|
|
|
+ ret = rmw_mip64(env, csrno, &ret_mip, new_val, wr_mask_mip);
|
|
|
+ if (ret != RISCV_EXCP_NONE) {
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+
|
|
|
+ old_hvip = env->hvip;
|
|
|
+
|
|
|
+ if (wr_mask_hvip) {
|
|
|
+ env->hvip = (env->hvip & ~wr_mask_hvip) | (new_val & wr_mask_hvip);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Given hvip is separate source from mip, we need to trigger interrupt
|
|
|
+ * from here separately. Normally this happen from riscv_cpu_update_mip.
|
|
|
+ */
|
|
|
+ riscv_cpu_interrupt(env);
|
|
|
+ }
|
|
|
|
|
|
- ret = rmw_mip64(env, csrno, ret_val, new_val,
|
|
|
- wr_mask & hvip_writable_mask);
|
|
|
if (ret_val) {
|
|
|
- *ret_val &= VS_MODE_INTERRUPTS;
|
|
|
+ /* Only take VS* bits from mip. */
|
|
|
+ ret_mip &= alias_mask;
|
|
|
+
|
|
|
+ /* Take in non-delegated 13:63 bits from hvip. */
|
|
|
+ old_hvip &= nalias_mask;
|
|
|
+
|
|
|
+ *ret_val = ret_mip | old_hvip;
|
|
|
}
|
|
|
|
|
|
return ret;
|
|
@@ -3858,7 +4251,7 @@ static inline RISCVException riscv_csrrw_check(CPURISCVState *env,
|
|
|
int csr_min_priv = csr_ops[csrno].min_priv_ver;
|
|
|
|
|
|
/* ensure the CSR extension is enabled */
|
|
|
- if (!riscv_cpu_cfg(env)->ext_icsr) {
|
|
|
+ if (!riscv_cpu_cfg(env)->ext_zicsr) {
|
|
|
return RISCV_EXCP_ILLEGAL_INST;
|
|
|
}
|
|
|
|
|
@@ -4165,14 +4558,14 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
|
|
|
[CSR_MTOPI] = { "mtopi", aia_any, read_mtopi },
|
|
|
|
|
|
/* Virtual Interrupts for Supervisor Level (AIA) */
|
|
|
- [CSR_MVIEN] = { "mvien", aia_any, read_zero, write_ignore },
|
|
|
- [CSR_MVIP] = { "mvip", aia_any, read_zero, write_ignore },
|
|
|
+ [CSR_MVIEN] = { "mvien", aia_any, NULL, NULL, rmw_mvien },
|
|
|
+ [CSR_MVIP] = { "mvip", aia_any, NULL, NULL, rmw_mvip },
|
|
|
|
|
|
/* Machine-Level High-Half CSRs (AIA) */
|
|
|
[CSR_MIDELEGH] = { "midelegh", aia_any32, NULL, NULL, rmw_midelegh },
|
|
|
[CSR_MIEH] = { "mieh", aia_any32, NULL, NULL, rmw_mieh },
|
|
|
- [CSR_MVIENH] = { "mvienh", aia_any32, read_zero, write_ignore },
|
|
|
- [CSR_MVIPH] = { "mviph", aia_any32, read_zero, write_ignore },
|
|
|
+ [CSR_MVIENH] = { "mvienh", aia_any32, NULL, NULL, rmw_mvienh },
|
|
|
+ [CSR_MVIPH] = { "mviph", aia_any32, NULL, NULL, rmw_mviph },
|
|
|
[CSR_MIPH] = { "miph", aia_any32, NULL, NULL, rmw_miph },
|
|
|
|
|
|
/* Execution environment configuration */
|
|
@@ -4346,14 +4739,13 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
|
|
|
.min_priv_ver = PRIV_VERSION_1_12_0 },
|
|
|
|
|
|
/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
|
|
|
- [CSR_HVIEN] = { "hvien", aia_hmode, read_zero, write_ignore },
|
|
|
+ [CSR_HVIEN] = { "hvien", aia_hmode, NULL, NULL, rmw_hvien },
|
|
|
[CSR_HVICTL] = { "hvictl", aia_hmode, read_hvictl,
|
|
|
write_hvictl },
|
|
|
[CSR_HVIPRIO1] = { "hviprio1", aia_hmode, read_hviprio1,
|
|
|
write_hviprio1 },
|
|
|
[CSR_HVIPRIO2] = { "hviprio2", aia_hmode, read_hviprio2,
|
|
|
write_hviprio2 },
|
|
|
-
|
|
|
/*
|
|
|
* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA)
|
|
|
*/
|
|
@@ -4368,8 +4760,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
|
|
|
/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
|
|
|
[CSR_HIDELEGH] = { "hidelegh", aia_hmode32, NULL, NULL,
|
|
|
rmw_hidelegh },
|
|
|
- [CSR_HVIENH] = { "hvienh", aia_hmode32, read_zero,
|
|
|
- write_ignore },
|
|
|
+ [CSR_HVIENH] = { "hvienh", aia_hmode32, NULL, NULL, rmw_hvienh },
|
|
|
[CSR_HVIPH] = { "hviph", aia_hmode32, NULL, NULL, rmw_hviph },
|
|
|
[CSR_HVIPRIO1H] = { "hviprio1h", aia_hmode32, read_hviprio1h,
|
|
|
write_hviprio1h },
|
|
@@ -4379,7 +4770,7 @@ riscv_csr_operations csr_ops[CSR_TABLE_SIZE] = {
|
|
|
[CSR_VSIPH] = { "vsiph", aia_hmode32, NULL, NULL, rmw_vsiph },
|
|
|
|
|
|
/* Physical Memory Protection */
|
|
|
- [CSR_MSECCFG] = { "mseccfg", epmp, read_mseccfg, write_mseccfg,
|
|
|
+ [CSR_MSECCFG] = { "mseccfg", have_mseccfg, read_mseccfg, write_mseccfg,
|
|
|
.min_priv_ver = PRIV_VERSION_1_11_0 },
|
|
|
[CSR_PMPCFG0] = { "pmpcfg0", pmp, read_pmpcfg, write_pmpcfg },
|
|
|
[CSR_PMPCFG1] = { "pmpcfg1", pmp, read_pmpcfg, write_pmpcfg },
|