|
@@ -32,38 +32,14 @@
|
|
|
|
|
|
#ifdef CONFIG_DEBUG_TCG
|
|
|
static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = {
|
|
|
- "zero",
|
|
|
- "ra",
|
|
|
- "sp",
|
|
|
- "gp",
|
|
|
- "tp",
|
|
|
- "t0",
|
|
|
- "t1",
|
|
|
- "t2",
|
|
|
- "s0",
|
|
|
- "s1",
|
|
|
- "a0",
|
|
|
- "a1",
|
|
|
- "a2",
|
|
|
- "a3",
|
|
|
- "a4",
|
|
|
- "a5",
|
|
|
- "a6",
|
|
|
- "a7",
|
|
|
- "s2",
|
|
|
- "s3",
|
|
|
- "s4",
|
|
|
- "s5",
|
|
|
- "s6",
|
|
|
- "s7",
|
|
|
- "s8",
|
|
|
- "s9",
|
|
|
- "s10",
|
|
|
- "s11",
|
|
|
- "t3",
|
|
|
- "t4",
|
|
|
- "t5",
|
|
|
- "t6"
|
|
|
+ "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2",
|
|
|
+ "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5",
|
|
|
+ "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7",
|
|
|
+ "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6",
|
|
|
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
|
|
|
+ "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15",
|
|
|
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
|
|
|
+ "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
|
|
|
};
|
|
|
#endif
|
|
|
|
|
@@ -100,6 +76,16 @@ static const int tcg_target_reg_alloc_order[] = {
|
|
|
TCG_REG_A5,
|
|
|
TCG_REG_A6,
|
|
|
TCG_REG_A7,
|
|
|
+
|
|
|
+ /* Vector registers and TCG_REG_V0 reserved for mask. */
|
|
|
+ TCG_REG_V1, TCG_REG_V2, TCG_REG_V3, TCG_REG_V4,
|
|
|
+ TCG_REG_V5, TCG_REG_V6, TCG_REG_V7, TCG_REG_V8,
|
|
|
+ TCG_REG_V9, TCG_REG_V10, TCG_REG_V11, TCG_REG_V12,
|
|
|
+ TCG_REG_V13, TCG_REG_V14, TCG_REG_V15, TCG_REG_V16,
|
|
|
+ TCG_REG_V17, TCG_REG_V18, TCG_REG_V19, TCG_REG_V20,
|
|
|
+ TCG_REG_V21, TCG_REG_V22, TCG_REG_V23, TCG_REG_V24,
|
|
|
+ TCG_REG_V25, TCG_REG_V26, TCG_REG_V27, TCG_REG_V28,
|
|
|
+ TCG_REG_V29, TCG_REG_V30, TCG_REG_V31,
|
|
|
};
|
|
|
|
|
|
static const int tcg_target_call_iarg_regs[] = {
|
|
@@ -127,6 +113,9 @@ static TCGReg tcg_target_call_oarg_reg(TCGCallReturnKind kind, int slot)
|
|
|
#define TCG_CT_CONST_J12 0x1000
|
|
|
|
|
|
#define ALL_GENERAL_REGS MAKE_64BIT_MASK(0, 32)
|
|
|
+#define ALL_VECTOR_REGS MAKE_64BIT_MASK(32, 32)
|
|
|
+#define ALL_DVECTOR_REG_GROUPS 0x5555555500000000
|
|
|
+#define ALL_QVECTOR_REG_GROUPS 0x1111111100000000
|
|
|
|
|
|
#define sextreg sextract64
|
|
|
|
|
@@ -176,6 +165,31 @@ static bool tcg_target_const_match(int64_t val, int ct,
|
|
|
* RISC-V Base ISA opcodes (IM)
|
|
|
*/
|
|
|
|
|
|
+#define V_OPIVV (0x0 << 12)
|
|
|
+#define V_OPFVV (0x1 << 12)
|
|
|
+#define V_OPMVV (0x2 << 12)
|
|
|
+#define V_OPIVI (0x3 << 12)
|
|
|
+#define V_OPIVX (0x4 << 12)
|
|
|
+#define V_OPFVF (0x5 << 12)
|
|
|
+#define V_OPMVX (0x6 << 12)
|
|
|
+#define V_OPCFG (0x7 << 12)
|
|
|
+
|
|
|
+/* NF <= 7 && NF >= 0 */
|
|
|
+#define V_NF(x) (x << 29)
|
|
|
+#define V_UNIT_STRIDE (0x0 << 20)
|
|
|
+#define V_UNIT_STRIDE_WHOLE_REG (0x8 << 20)
|
|
|
+
|
|
|
+typedef enum {
|
|
|
+ VLMUL_M1 = 0, /* LMUL=1 */
|
|
|
+ VLMUL_M2, /* LMUL=2 */
|
|
|
+ VLMUL_M4, /* LMUL=4 */
|
|
|
+ VLMUL_M8, /* LMUL=8 */
|
|
|
+ VLMUL_RESERVED,
|
|
|
+ VLMUL_MF8, /* LMUL=1/8 */
|
|
|
+ VLMUL_MF4, /* LMUL=1/4 */
|
|
|
+ VLMUL_MF2, /* LMUL=1/2 */
|
|
|
+} RISCVVlmul;
|
|
|
+
|
|
|
typedef enum {
|
|
|
OPC_ADD = 0x33,
|
|
|
OPC_ADDI = 0x13,
|
|
@@ -271,6 +285,30 @@ typedef enum {
|
|
|
/* Zicond: integer conditional operations */
|
|
|
OPC_CZERO_EQZ = 0x0e005033,
|
|
|
OPC_CZERO_NEZ = 0x0e007033,
|
|
|
+
|
|
|
+ /* V: Vector extension 1.0 */
|
|
|
+ OPC_VSETVLI = 0x57 | V_OPCFG,
|
|
|
+ OPC_VSETIVLI = 0xc0000057 | V_OPCFG,
|
|
|
+ OPC_VSETVL = 0x80000057 | V_OPCFG,
|
|
|
+
|
|
|
+ OPC_VLE8_V = 0x7 | V_UNIT_STRIDE,
|
|
|
+ OPC_VLE16_V = 0x5007 | V_UNIT_STRIDE,
|
|
|
+ OPC_VLE32_V = 0x6007 | V_UNIT_STRIDE,
|
|
|
+ OPC_VLE64_V = 0x7007 | V_UNIT_STRIDE,
|
|
|
+ OPC_VSE8_V = 0x27 | V_UNIT_STRIDE,
|
|
|
+ OPC_VSE16_V = 0x5027 | V_UNIT_STRIDE,
|
|
|
+ OPC_VSE32_V = 0x6027 | V_UNIT_STRIDE,
|
|
|
+ OPC_VSE64_V = 0x7027 | V_UNIT_STRIDE,
|
|
|
+
|
|
|
+ OPC_VL1RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0),
|
|
|
+ OPC_VL2RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1),
|
|
|
+ OPC_VL4RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3),
|
|
|
+ OPC_VL8RE64_V = 0x2007007 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7),
|
|
|
+
|
|
|
+ OPC_VS1R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(0),
|
|
|
+ OPC_VS2R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(1),
|
|
|
+ OPC_VS4R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(3),
|
|
|
+ OPC_VS8R_V = 0x2000027 | V_UNIT_STRIDE_WHOLE_REG | V_NF(7),
|
|
|
} RISCVInsn;
|
|
|
|
|
|
/*
|
|
@@ -363,6 +401,35 @@ static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm)
|
|
|
return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm);
|
|
|
}
|
|
|
|
|
|
+/* Type-OPIVV/OPMVV/OPIVX/OPMVX, Vector load and store */
|
|
|
+
|
|
|
+static int32_t encode_v(RISCVInsn opc, TCGReg d, TCGReg s1,
|
|
|
+ TCGReg s2, bool vm)
|
|
|
+{
|
|
|
+ return opc | (d & 0x1f) << 7 | (s1 & 0x1f) << 15 |
|
|
|
+ (s2 & 0x1f) << 20 | (vm << 25);
|
|
|
+}
|
|
|
+
|
|
|
+/* Vector vtype */
|
|
|
+
|
|
|
+static uint32_t encode_vtype(bool vta, bool vma,
|
|
|
+ MemOp vsew, RISCVVlmul vlmul)
|
|
|
+{
|
|
|
+ return vma << 7 | vta << 6 | vsew << 3 | vlmul;
|
|
|
+}
|
|
|
+
|
|
|
+static int32_t encode_vset(RISCVInsn opc, TCGReg rd,
|
|
|
+ TCGArg rs1, uint32_t vtype)
|
|
|
+{
|
|
|
+ return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (vtype & 0x7ff) << 20;
|
|
|
+}
|
|
|
+
|
|
|
+static int32_t encode_vseti(RISCVInsn opc, TCGReg rd,
|
|
|
+ uint32_t uimm, uint32_t vtype)
|
|
|
+{
|
|
|
+ return opc | (rd & 0x1f) << 7 | (uimm & 0x1f) << 15 | (vtype & 0x3ff) << 20;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* RISC-V instruction emitters
|
|
|
*/
|
|
@@ -475,6 +542,38 @@ static bool patch_reloc(tcg_insn_unit *code_ptr, int type,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * RISC-V vector instruction emitters
|
|
|
+ */
|
|
|
+
|
|
|
+typedef struct VsetCache {
|
|
|
+ uint32_t movi_insn;
|
|
|
+ uint32_t vset_insn;
|
|
|
+} VsetCache;
|
|
|
+
|
|
|
+static VsetCache riscv_vset_cache[3][4];
|
|
|
+
|
|
|
+static void set_vtype(TCGContext *s, TCGType type, MemOp vsew)
|
|
|
+{
|
|
|
+ const VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew];
|
|
|
+
|
|
|
+ s->riscv_cur_type = type;
|
|
|
+ s->riscv_cur_vsew = vsew;
|
|
|
+
|
|
|
+ if (p->movi_insn) {
|
|
|
+ tcg_out32(s, p->movi_insn);
|
|
|
+ }
|
|
|
+ tcg_out32(s, p->vset_insn);
|
|
|
+}
|
|
|
+
|
|
|
+static MemOp set_vtype_len(TCGContext *s, TCGType type)
|
|
|
+{
|
|
|
+ if (type != s->riscv_cur_type) {
|
|
|
+ set_vtype(s, type, MO_64);
|
|
|
+ }
|
|
|
+ return s->riscv_cur_vsew;
|
|
|
+}
|
|
|
+
|
|
|
/*
|
|
|
* TCG intrinsics
|
|
|
*/
|
|
@@ -681,18 +780,101 @@ static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void tcg_out_vec_ldst(TCGContext *s, RISCVInsn opc, TCGReg data,
|
|
|
+ TCGReg addr, intptr_t offset)
|
|
|
+{
|
|
|
+ tcg_debug_assert(data >= TCG_REG_V0);
|
|
|
+ tcg_debug_assert(addr < TCG_REG_V0);
|
|
|
+
|
|
|
+ if (offset) {
|
|
|
+ tcg_debug_assert(addr != TCG_REG_ZERO);
|
|
|
+ if (offset == sextreg(offset, 0, 12)) {
|
|
|
+ tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_TMP0, addr, offset);
|
|
|
+ } else {
|
|
|
+ tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, offset);
|
|
|
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP0, addr);
|
|
|
+ }
|
|
|
+ addr = TCG_REG_TMP0;
|
|
|
+ }
|
|
|
+ tcg_out32(s, encode_v(opc, data, addr, 0, true));
|
|
|
+}
|
|
|
+
|
|
|
static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg,
|
|
|
TCGReg arg1, intptr_t arg2)
|
|
|
{
|
|
|
- RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_LW : OPC_LD;
|
|
|
- tcg_out_ldst(s, insn, arg, arg1, arg2);
|
|
|
+ RISCVInsn insn;
|
|
|
+
|
|
|
+ switch (type) {
|
|
|
+ case TCG_TYPE_I32:
|
|
|
+ tcg_out_ldst(s, OPC_LW, arg, arg1, arg2);
|
|
|
+ break;
|
|
|
+ case TCG_TYPE_I64:
|
|
|
+ tcg_out_ldst(s, OPC_LD, arg, arg1, arg2);
|
|
|
+ break;
|
|
|
+ case TCG_TYPE_V64:
|
|
|
+ case TCG_TYPE_V128:
|
|
|
+ case TCG_TYPE_V256:
|
|
|
+ if (type >= riscv_lg2_vlenb) {
|
|
|
+ static const RISCVInsn whole_reg_ld[] = {
|
|
|
+ OPC_VL1RE64_V, OPC_VL2RE64_V, OPC_VL4RE64_V, OPC_VL8RE64_V
|
|
|
+ };
|
|
|
+ unsigned idx = type - riscv_lg2_vlenb;
|
|
|
+
|
|
|
+ tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_ld));
|
|
|
+ insn = whole_reg_ld[idx];
|
|
|
+ } else {
|
|
|
+ static const RISCVInsn unit_stride_ld[] = {
|
|
|
+ OPC_VLE8_V, OPC_VLE16_V, OPC_VLE32_V, OPC_VLE64_V
|
|
|
+ };
|
|
|
+ MemOp prev_vsew = set_vtype_len(s, type);
|
|
|
+
|
|
|
+ tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_ld));
|
|
|
+ insn = unit_stride_ld[prev_vsew];
|
|
|
+ }
|
|
|
+ tcg_out_vec_ldst(s, insn, arg, arg1, arg2);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ g_assert_not_reached();
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg,
|
|
|
TCGReg arg1, intptr_t arg2)
|
|
|
{
|
|
|
- RISCVInsn insn = type == TCG_TYPE_I32 ? OPC_SW : OPC_SD;
|
|
|
- tcg_out_ldst(s, insn, arg, arg1, arg2);
|
|
|
+ RISCVInsn insn;
|
|
|
+
|
|
|
+ switch (type) {
|
|
|
+ case TCG_TYPE_I32:
|
|
|
+ tcg_out_ldst(s, OPC_SW, arg, arg1, arg2);
|
|
|
+ break;
|
|
|
+ case TCG_TYPE_I64:
|
|
|
+ tcg_out_ldst(s, OPC_SD, arg, arg1, arg2);
|
|
|
+ break;
|
|
|
+ case TCG_TYPE_V64:
|
|
|
+ case TCG_TYPE_V128:
|
|
|
+ case TCG_TYPE_V256:
|
|
|
+ if (type >= riscv_lg2_vlenb) {
|
|
|
+ static const RISCVInsn whole_reg_st[] = {
|
|
|
+ OPC_VS1R_V, OPC_VS2R_V, OPC_VS4R_V, OPC_VS8R_V
|
|
|
+ };
|
|
|
+ unsigned idx = type - riscv_lg2_vlenb;
|
|
|
+
|
|
|
+ tcg_debug_assert(idx < ARRAY_SIZE(whole_reg_st));
|
|
|
+ insn = whole_reg_st[idx];
|
|
|
+ } else {
|
|
|
+ static const RISCVInsn unit_stride_st[] = {
|
|
|
+ OPC_VSE8_V, OPC_VSE16_V, OPC_VSE32_V, OPC_VSE64_V
|
|
|
+ };
|
|
|
+ MemOp prev_vsew = set_vtype_len(s, type);
|
|
|
+
|
|
|
+ tcg_debug_assert(prev_vsew < ARRAY_SIZE(unit_stride_st));
|
|
|
+ insn = unit_stride_st[prev_vsew];
|
|
|
+ }
|
|
|
+ tcg_out_vec_ldst(s, insn, arg, arg1, arg2);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ g_assert_not_reached();
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val,
|
|
@@ -766,6 +948,23 @@ static void tcg_out_addsub2(TCGContext *s,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static bool tcg_out_dup_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
+ TCGReg dst, TCGReg src)
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static bool tcg_out_dupm_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
+ TCGReg dst, TCGReg base, intptr_t offset)
|
|
|
+{
|
|
|
+ return false;
|
|
|
+}
|
|
|
+
|
|
|
+static void tcg_out_dupi_vec(TCGContext *s, TCGType type, unsigned vece,
|
|
|
+ TCGReg dst, int64_t arg)
|
|
|
+{
|
|
|
+}
|
|
|
+
|
|
|
static const struct {
|
|
|
RISCVInsn op;
|
|
|
bool swap;
|
|
@@ -1104,12 +1303,19 @@ static void tcg_out_cltz(TCGContext *s, TCGType type, RISCVInsn insn,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void init_setting_vtype(TCGContext *s)
|
|
|
+{
|
|
|
+ s->riscv_cur_type = TCG_TYPE_COUNT;
|
|
|
+}
|
|
|
+
|
|
|
static void tcg_out_call_int(TCGContext *s, const tcg_insn_unit *arg, bool tail)
|
|
|
{
|
|
|
TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA;
|
|
|
ptrdiff_t offset = tcg_pcrel_diff(s, arg);
|
|
|
int ret;
|
|
|
|
|
|
+ init_setting_vtype(s);
|
|
|
+
|
|
|
tcg_debug_assert((offset & 1) == 0);
|
|
|
if (offset == sextreg(offset, 0, 20)) {
|
|
|
/* short jump: -2097150 to 2097152 */
|
|
@@ -1247,6 +1453,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
|
|
|
ldst->oi = oi;
|
|
|
ldst->addrlo_reg = addr_reg;
|
|
|
|
|
|
+ init_setting_vtype(s);
|
|
|
+
|
|
|
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_AREG0, mask_ofs);
|
|
|
tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, TCG_AREG0, table_ofs);
|
|
|
|
|
@@ -1308,6 +1516,8 @@ static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
|
|
|
ldst->oi = oi;
|
|
|
ldst->addrlo_reg = addr_reg;
|
|
|
|
|
|
+ init_setting_vtype(s);
|
|
|
+
|
|
|
/* We are expecting alignment max 7, so we can always use andi. */
|
|
|
tcg_debug_assert(a_mask == sextreg(a_mask, 0, 12));
|
|
|
tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
|
|
@@ -1881,6 +2091,46 @@ static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void tcg_out_vec_op(TCGContext *s, TCGOpcode opc,
|
|
|
+ unsigned vecl, unsigned vece,
|
|
|
+ const TCGArg args[TCG_MAX_OP_ARGS],
|
|
|
+ const int const_args[TCG_MAX_OP_ARGS])
|
|
|
+{
|
|
|
+ TCGType type = vecl + TCG_TYPE_V64;
|
|
|
+ TCGArg a0, a1, a2;
|
|
|
+
|
|
|
+ a0 = args[0];
|
|
|
+ a1 = args[1];
|
|
|
+ a2 = args[2];
|
|
|
+
|
|
|
+ switch (opc) {
|
|
|
+ case INDEX_op_ld_vec:
|
|
|
+ tcg_out_ld(s, type, a0, a1, a2);
|
|
|
+ break;
|
|
|
+ case INDEX_op_st_vec:
|
|
|
+ tcg_out_st(s, type, a0, a1, a2);
|
|
|
+ break;
|
|
|
+ case INDEX_op_mov_vec: /* Always emitted via tcg_out_mov. */
|
|
|
+ case INDEX_op_dup_vec: /* Always emitted via tcg_out_dup_vec. */
|
|
|
+ default:
|
|
|
+ g_assert_not_reached();
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+void tcg_expand_vec_op(TCGOpcode opc, TCGType type, unsigned vece,
|
|
|
+ TCGArg a0, ...)
|
|
|
+{
|
|
|
+ g_assert_not_reached();
|
|
|
+}
|
|
|
+
|
|
|
+int tcg_can_emit_vec_op(TCGOpcode opc, TCGType type, unsigned vece)
|
|
|
+{
|
|
|
+ switch (opc) {
|
|
|
+ default:
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
|
|
|
{
|
|
|
switch (op) {
|
|
@@ -2020,6 +2270,10 @@ static TCGConstraintSetIndex tcg_target_op_def(TCGOpcode op)
|
|
|
case INDEX_op_qemu_st_a64_i64:
|
|
|
return C_O0_I2(rZ, r);
|
|
|
|
|
|
+ case INDEX_op_st_vec:
|
|
|
+ return C_O0_I2(v, r);
|
|
|
+ case INDEX_op_ld_vec:
|
|
|
+ return C_O1_I1(v, r);
|
|
|
default:
|
|
|
g_assert_not_reached();
|
|
|
}
|
|
@@ -2093,7 +2347,65 @@ static void tcg_target_qemu_prologue(TCGContext *s)
|
|
|
|
|
|
static void tcg_out_tb_start(TCGContext *s)
|
|
|
{
|
|
|
- /* nothing to do */
|
|
|
+ init_setting_vtype(s);
|
|
|
+}
|
|
|
+
|
|
|
+static bool vtype_check(unsigned vtype)
|
|
|
+{
|
|
|
+ unsigned long tmp;
|
|
|
+
|
|
|
+ /* vsetvl tmp, zero, vtype */
|
|
|
+ asm(".insn r 0x57, 7, 0x40, %0, zero, %1" : "=r"(tmp) : "r"(vtype));
|
|
|
+ return tmp != 0;
|
|
|
+}
|
|
|
+
|
|
|
+static void probe_frac_lmul_1(TCGType type, MemOp vsew)
|
|
|
+{
|
|
|
+ VsetCache *p = &riscv_vset_cache[type - TCG_TYPE_V64][vsew];
|
|
|
+ unsigned avl = tcg_type_size(type) >> vsew;
|
|
|
+ int lmul = type - riscv_lg2_vlenb;
|
|
|
+ unsigned vtype = encode_vtype(true, true, vsew, lmul & 7);
|
|
|
+ bool lmul_eq_avl = true;
|
|
|
+
|
|
|
+ /* Guaranteed by Zve64x. */
|
|
|
+ assert(lmul < 3);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * For LMUL < -3, the host vector size is so large that TYPE
|
|
|
+ * is smaller than the minimum 1/8 fraction.
|
|
|
+ *
|
|
|
+ * For other fractional LMUL settings, implementations must
|
|
|
+ * support SEW settings between SEW_MIN and LMUL * ELEN, inclusive.
|
|
|
+ * So if ELEN = 64, LMUL = 1/2, then SEW will support e8, e16, e32,
|
|
|
+ * but e64 may not be supported. In other words, the hardware only
|
|
|
+ * guarantees SEW_MIN <= SEW <= LMUL * ELEN. Check.
|
|
|
+ */
|
|
|
+ if (lmul < 0 && (lmul < -3 || !vtype_check(vtype))) {
|
|
|
+ vtype = encode_vtype(true, true, vsew, VLMUL_M1);
|
|
|
+ lmul_eq_avl = false;
|
|
|
+ }
|
|
|
+
|
|
|
+ if (avl < 32) {
|
|
|
+ p->vset_insn = encode_vseti(OPC_VSETIVLI, TCG_REG_ZERO, avl, vtype);
|
|
|
+ } else if (lmul_eq_avl) {
|
|
|
+ /* rd != 0 and rs1 == 0 uses vlmax */
|
|
|
+ p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_TMP0, TCG_REG_ZERO, vtype);
|
|
|
+ } else {
|
|
|
+ p->movi_insn = encode_i(OPC_ADDI, TCG_REG_TMP0, TCG_REG_ZERO, avl);
|
|
|
+ p->vset_insn = encode_vset(OPC_VSETVLI, TCG_REG_ZERO, TCG_REG_TMP0, vtype);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void probe_frac_lmul(void)
|
|
|
+{
|
|
|
+ /* Match riscv_lg2_vlenb to TCG_TYPE_V64. */
|
|
|
+ QEMU_BUILD_BUG_ON(TCG_TYPE_V64 != 3);
|
|
|
+
|
|
|
+ for (TCGType t = TCG_TYPE_V64; t <= TCG_TYPE_V256; t++) {
|
|
|
+ for (MemOp e = MO_8; e <= MO_64; e++) {
|
|
|
+ probe_frac_lmul_1(t, e);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void tcg_target_init(TCGContext *s)
|
|
@@ -2101,7 +2413,7 @@ static void tcg_target_init(TCGContext *s)
|
|
|
tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff;
|
|
|
tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff;
|
|
|
|
|
|
- tcg_target_call_clobber_regs = -1u;
|
|
|
+ tcg_target_call_clobber_regs = -1;
|
|
|
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0);
|
|
|
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1);
|
|
|
tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2);
|
|
@@ -2123,6 +2435,32 @@ static void tcg_target_init(TCGContext *s)
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP);
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP);
|
|
|
tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP);
|
|
|
+
|
|
|
+ if (cpuinfo & CPUINFO_ZVE64X) {
|
|
|
+ switch (riscv_lg2_vlenb) {
|
|
|
+ case TCG_TYPE_V64:
|
|
|
+ tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
|
|
|
+ tcg_target_available_regs[TCG_TYPE_V128] = ALL_DVECTOR_REG_GROUPS;
|
|
|
+ tcg_target_available_regs[TCG_TYPE_V256] = ALL_QVECTOR_REG_GROUPS;
|
|
|
+ s->reserved_regs |= (~ALL_QVECTOR_REG_GROUPS & ALL_VECTOR_REGS);
|
|
|
+ break;
|
|
|
+ case TCG_TYPE_V128:
|
|
|
+ tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
|
|
|
+ tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
|
|
|
+ tcg_target_available_regs[TCG_TYPE_V256] = ALL_DVECTOR_REG_GROUPS;
|
|
|
+ s->reserved_regs |= (~ALL_DVECTOR_REG_GROUPS & ALL_VECTOR_REGS);
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ /* Guaranteed by Zve64x. */
|
|
|
+ tcg_debug_assert(riscv_lg2_vlenb >= TCG_TYPE_V256);
|
|
|
+ tcg_target_available_regs[TCG_TYPE_V64] = ALL_VECTOR_REGS;
|
|
|
+ tcg_target_available_regs[TCG_TYPE_V128] = ALL_VECTOR_REGS;
|
|
|
+ tcg_target_available_regs[TCG_TYPE_V256] = ALL_VECTOR_REGS;
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ tcg_regset_set_reg(s->reserved_regs, TCG_REG_V0);
|
|
|
+ probe_frac_lmul();
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
typedef struct {
|