|
@@ -29,11 +29,32 @@ static void gen_gvec_fn3_qc(uint32_t rd_ofs, uint32_t rn_ofs, uint32_t rm_ofs,
|
|
{
|
|
{
|
|
TCGv_ptr qc_ptr = tcg_temp_new_ptr();
|
|
TCGv_ptr qc_ptr = tcg_temp_new_ptr();
|
|
|
|
|
|
|
|
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
|
|
tcg_gen_addi_ptr(qc_ptr, tcg_env, offsetof(CPUARMState, vfp.qc));
|
|
tcg_gen_addi_ptr(qc_ptr, tcg_env, offsetof(CPUARMState, vfp.qc));
|
|
tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr,
|
|
tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, qc_ptr,
|
|
opr_sz, max_sz, 0, fn);
|
|
opr_sz, max_sz, 0, fn);
|
|
}
|
|
}
|
|
|
|
|
|
|
|
+void gen_gvec_sqdmulh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static gen_helper_gvec_3_ptr * const fns[2] = {
|
|
|
|
+ gen_helper_neon_sqdmulh_h, gen_helper_neon_sqdmulh_s
|
|
|
|
+ };
|
|
|
|
+ tcg_debug_assert(vece >= 1 && vece <= 2);
|
|
|
|
+ gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_gvec_sqrdmulh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static gen_helper_gvec_3_ptr * const fns[2] = {
|
|
|
|
+ gen_helper_neon_sqrdmulh_h, gen_helper_neon_sqrdmulh_s
|
|
|
|
+ };
|
|
|
|
+ tcg_debug_assert(vece >= 1 && vece <= 2);
|
|
|
|
+ gen_gvec_fn3_qc(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, fns[vece - 1]);
|
|
|
|
+}
|
|
|
|
+
|
|
void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
{
|
|
{
|
|
@@ -933,21 +954,17 @@ void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
/* CMTST : test is "if (X & Y != 0)". */
|
|
/* CMTST : test is "if (X & Y != 0)". */
|
|
static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
static void gen_cmtst_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
{
|
|
{
|
|
- tcg_gen_and_i32(d, a, b);
|
|
|
|
- tcg_gen_negsetcond_i32(TCG_COND_NE, d, d, tcg_constant_i32(0));
|
|
|
|
|
|
+ tcg_gen_negsetcond_i32(TCG_COND_TSTNE, d, a, b);
|
|
}
|
|
}
|
|
|
|
|
|
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
{
|
|
{
|
|
- tcg_gen_and_i64(d, a, b);
|
|
|
|
- tcg_gen_negsetcond_i64(TCG_COND_NE, d, d, tcg_constant_i64(0));
|
|
|
|
|
|
+ tcg_gen_negsetcond_i64(TCG_COND_TSTNE, d, a, b);
|
|
}
|
|
}
|
|
|
|
|
|
static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
static void gen_cmtst_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
{
|
|
{
|
|
- tcg_gen_and_vec(vece, d, a, b);
|
|
|
|
- tcg_gen_dupi_vec(vece, a, 0);
|
|
|
|
- tcg_gen_cmp_vec(TCG_COND_NE, vece, d, d, a);
|
|
|
|
|
|
+ tcg_gen_cmp_vec(TCG_COND_TSTNE, vece, d, a, b);
|
|
}
|
|
}
|
|
|
|
|
|
void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
@@ -1217,21 +1234,113 @@ void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
}
|
|
}
|
|
|
|
|
|
-static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
|
|
|
|
|
|
+void gen_gvec_srshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static gen_helper_gvec_3 * const fns[] = {
|
|
|
|
+ gen_helper_gvec_srshl_b, gen_helper_gvec_srshl_h,
|
|
|
|
+ gen_helper_gvec_srshl_s, gen_helper_gvec_srshl_d,
|
|
|
|
+ };
|
|
|
|
+ tcg_debug_assert(vece <= MO_64);
|
|
|
|
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_gvec_urshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static gen_helper_gvec_3 * const fns[] = {
|
|
|
|
+ gen_helper_gvec_urshl_b, gen_helper_gvec_urshl_h,
|
|
|
|
+ gen_helper_gvec_urshl_s, gen_helper_gvec_urshl_d,
|
|
|
|
+ };
|
|
|
|
+ tcg_debug_assert(vece <= MO_64);
|
|
|
|
+ tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_neon_sqshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static gen_helper_gvec_3_ptr * const fns[] = {
|
|
|
|
+ gen_helper_neon_sqshl_b, gen_helper_neon_sqshl_h,
|
|
|
|
+ gen_helper_neon_sqshl_s, gen_helper_neon_sqshl_d,
|
|
|
|
+ };
|
|
|
|
+ tcg_debug_assert(vece <= MO_64);
|
|
|
|
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, tcg_env,
|
|
|
|
+ opr_sz, max_sz, 0, fns[vece]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_neon_uqshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static gen_helper_gvec_3_ptr * const fns[] = {
|
|
|
|
+ gen_helper_neon_uqshl_b, gen_helper_neon_uqshl_h,
|
|
|
|
+ gen_helper_neon_uqshl_s, gen_helper_neon_uqshl_d,
|
|
|
|
+ };
|
|
|
|
+ tcg_debug_assert(vece <= MO_64);
|
|
|
|
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, tcg_env,
|
|
|
|
+ opr_sz, max_sz, 0, fns[vece]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_neon_sqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static gen_helper_gvec_3_ptr * const fns[] = {
|
|
|
|
+ gen_helper_neon_sqrshl_b, gen_helper_neon_sqrshl_h,
|
|
|
|
+ gen_helper_neon_sqrshl_s, gen_helper_neon_sqrshl_d,
|
|
|
|
+ };
|
|
|
|
+ tcg_debug_assert(vece <= MO_64);
|
|
|
|
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, tcg_env,
|
|
|
|
+ opr_sz, max_sz, 0, fns[vece]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_neon_uqrshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static gen_helper_gvec_3_ptr * const fns[] = {
|
|
|
|
+ gen_helper_neon_uqrshl_b, gen_helper_neon_uqrshl_h,
|
|
|
|
+ gen_helper_neon_uqrshl_s, gen_helper_neon_uqrshl_d,
|
|
|
|
+ };
|
|
|
|
+ tcg_debug_assert(vece <= MO_64);
|
|
|
|
+ tcg_gen_gvec_3_ptr(rd_ofs, rn_ofs, rm_ofs, tcg_env,
|
|
|
|
+ opr_sz, max_sz, 0, fns[vece]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_uqadd_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz)
|
|
|
|
+{
|
|
|
|
+ uint64_t max = MAKE_64BIT_MASK(0, 8 << esz);
|
|
|
|
+ TCGv_i64 tmp = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_add_i64(tmp, a, b);
|
|
|
|
+ tcg_gen_umin_i64(res, tmp, tcg_constant_i64(max));
|
|
|
|
+ tcg_gen_xor_i64(tmp, tmp, res);
|
|
|
|
+ tcg_gen_or_i64(qc, qc, tmp);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_uqadd_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_add_i64(t, a, b);
|
|
|
|
+ tcg_gen_movcond_i64(TCG_COND_LTU, res, t, a,
|
|
|
|
+ tcg_constant_i64(UINT64_MAX), t);
|
|
|
|
+ tcg_gen_xor_i64(t, t, res);
|
|
|
|
+ tcg_gen_or_i64(qc, qc, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_uqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
|
|
TCGv_vec a, TCGv_vec b)
|
|
TCGv_vec a, TCGv_vec b)
|
|
{
|
|
{
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
tcg_gen_add_vec(vece, x, a, b);
|
|
tcg_gen_add_vec(vece, x, a, b);
|
|
tcg_gen_usadd_vec(vece, t, a, b);
|
|
tcg_gen_usadd_vec(vece, t, a, b);
|
|
- tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
|
|
|
|
- tcg_gen_or_vec(vece, sat, sat, x);
|
|
|
|
|
|
+ tcg_gen_xor_vec(vece, x, x, t);
|
|
|
|
+ tcg_gen_or_vec(vece, qc, qc, x);
|
|
}
|
|
}
|
|
|
|
|
|
void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
{
|
|
{
|
|
static const TCGOpcode vecop_list[] = {
|
|
static const TCGOpcode vecop_list[] = {
|
|
- INDEX_op_usadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
|
|
|
|
|
|
+ INDEX_op_usadd_vec, INDEX_op_add_vec, 0
|
|
};
|
|
};
|
|
static const GVecGen4 ops[4] = {
|
|
static const GVecGen4 ops[4] = {
|
|
{ .fniv = gen_uqadd_vec,
|
|
{ .fniv = gen_uqadd_vec,
|
|
@@ -1250,30 +1359,68 @@ void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
.opt_opc = vecop_list,
|
|
.opt_opc = vecop_list,
|
|
.vece = MO_32 },
|
|
.vece = MO_32 },
|
|
{ .fniv = gen_uqadd_vec,
|
|
{ .fniv = gen_uqadd_vec,
|
|
|
|
+ .fni8 = gen_uqadd_d,
|
|
.fno = gen_helper_gvec_uqadd_d,
|
|
.fno = gen_helper_gvec_uqadd_d,
|
|
.write_aofs = true,
|
|
.write_aofs = true,
|
|
.opt_opc = vecop_list,
|
|
.opt_opc = vecop_list,
|
|
.vece = MO_64 },
|
|
.vece = MO_64 },
|
|
};
|
|
};
|
|
|
|
+
|
|
|
|
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
}
|
|
}
|
|
|
|
|
|
-static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
|
|
|
|
|
|
+void gen_sqadd_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz)
|
|
|
|
+{
|
|
|
|
+ int64_t max = MAKE_64BIT_MASK(0, (8 << esz) - 1);
|
|
|
|
+ int64_t min = -1ll - max;
|
|
|
|
+ TCGv_i64 tmp = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_add_i64(tmp, a, b);
|
|
|
|
+ tcg_gen_smin_i64(res, tmp, tcg_constant_i64(max));
|
|
|
|
+ tcg_gen_smax_i64(res, res, tcg_constant_i64(min));
|
|
|
|
+ tcg_gen_xor_i64(tmp, tmp, res);
|
|
|
|
+ tcg_gen_or_i64(qc, qc, tmp);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_sqadd_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t0 = tcg_temp_new_i64();
|
|
|
|
+ TCGv_i64 t1 = tcg_temp_new_i64();
|
|
|
|
+ TCGv_i64 t2 = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_add_i64(t0, a, b);
|
|
|
|
+
|
|
|
|
+ /* Compute signed overflow indication into T1 */
|
|
|
|
+ tcg_gen_xor_i64(t1, a, b);
|
|
|
|
+ tcg_gen_xor_i64(t2, t0, a);
|
|
|
|
+ tcg_gen_andc_i64(t1, t2, t1);
|
|
|
|
+
|
|
|
|
+ /* Compute saturated value into T2 */
|
|
|
|
+ tcg_gen_sari_i64(t2, a, 63);
|
|
|
|
+ tcg_gen_xori_i64(t2, t2, INT64_MAX);
|
|
|
|
+
|
|
|
|
+ tcg_gen_movcond_i64(TCG_COND_LT, res, t1, tcg_constant_i64(0), t2, t0);
|
|
|
|
+ tcg_gen_xor_i64(t0, t0, res);
|
|
|
|
+ tcg_gen_or_i64(qc, qc, t0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_sqadd_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
|
|
TCGv_vec a, TCGv_vec b)
|
|
TCGv_vec a, TCGv_vec b)
|
|
{
|
|
{
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
tcg_gen_add_vec(vece, x, a, b);
|
|
tcg_gen_add_vec(vece, x, a, b);
|
|
tcg_gen_ssadd_vec(vece, t, a, b);
|
|
tcg_gen_ssadd_vec(vece, t, a, b);
|
|
- tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
|
|
|
|
- tcg_gen_or_vec(vece, sat, sat, x);
|
|
|
|
|
|
+ tcg_gen_xor_vec(vece, x, x, t);
|
|
|
|
+ tcg_gen_or_vec(vece, qc, qc, x);
|
|
}
|
|
}
|
|
|
|
|
|
void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
{
|
|
{
|
|
static const TCGOpcode vecop_list[] = {
|
|
static const TCGOpcode vecop_list[] = {
|
|
- INDEX_op_ssadd_vec, INDEX_op_cmp_vec, INDEX_op_add_vec, 0
|
|
|
|
|
|
+ INDEX_op_ssadd_vec, INDEX_op_add_vec, 0
|
|
};
|
|
};
|
|
static const GVecGen4 ops[4] = {
|
|
static const GVecGen4 ops[4] = {
|
|
{ .fniv = gen_sqadd_vec,
|
|
{ .fniv = gen_sqadd_vec,
|
|
@@ -1292,30 +1439,53 @@ void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
.write_aofs = true,
|
|
.write_aofs = true,
|
|
.vece = MO_32 },
|
|
.vece = MO_32 },
|
|
{ .fniv = gen_sqadd_vec,
|
|
{ .fniv = gen_sqadd_vec,
|
|
|
|
+ .fni8 = gen_sqadd_d,
|
|
.fno = gen_helper_gvec_sqadd_d,
|
|
.fno = gen_helper_gvec_sqadd_d,
|
|
.opt_opc = vecop_list,
|
|
.opt_opc = vecop_list,
|
|
.write_aofs = true,
|
|
.write_aofs = true,
|
|
.vece = MO_64 },
|
|
.vece = MO_64 },
|
|
};
|
|
};
|
|
|
|
+
|
|
|
|
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
}
|
|
}
|
|
|
|
|
|
-static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
|
|
|
|
|
|
+void gen_uqsub_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 tmp = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_sub_i64(tmp, a, b);
|
|
|
|
+ tcg_gen_smax_i64(res, tmp, tcg_constant_i64(0));
|
|
|
|
+ tcg_gen_xor_i64(tmp, tmp, res);
|
|
|
|
+ tcg_gen_or_i64(qc, qc, tmp);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_uqsub_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_sub_i64(t, a, b);
|
|
|
|
+ tcg_gen_movcond_i64(TCG_COND_LTU, res, a, b, tcg_constant_i64(0), t);
|
|
|
|
+ tcg_gen_xor_i64(t, t, res);
|
|
|
|
+ tcg_gen_or_i64(qc, qc, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_uqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
|
|
TCGv_vec a, TCGv_vec b)
|
|
TCGv_vec a, TCGv_vec b)
|
|
{
|
|
{
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
tcg_gen_sub_vec(vece, x, a, b);
|
|
tcg_gen_sub_vec(vece, x, a, b);
|
|
tcg_gen_ussub_vec(vece, t, a, b);
|
|
tcg_gen_ussub_vec(vece, t, a, b);
|
|
- tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
|
|
|
|
- tcg_gen_or_vec(vece, sat, sat, x);
|
|
|
|
|
|
+ tcg_gen_xor_vec(vece, x, x, t);
|
|
|
|
+ tcg_gen_or_vec(vece, qc, qc, x);
|
|
}
|
|
}
|
|
|
|
|
|
void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
{
|
|
{
|
|
static const TCGOpcode vecop_list[] = {
|
|
static const TCGOpcode vecop_list[] = {
|
|
- INDEX_op_ussub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
|
|
|
|
|
|
+ INDEX_op_ussub_vec, INDEX_op_sub_vec, 0
|
|
};
|
|
};
|
|
static const GVecGen4 ops[4] = {
|
|
static const GVecGen4 ops[4] = {
|
|
{ .fniv = gen_uqsub_vec,
|
|
{ .fniv = gen_uqsub_vec,
|
|
@@ -1334,30 +1504,68 @@ void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
.write_aofs = true,
|
|
.write_aofs = true,
|
|
.vece = MO_32 },
|
|
.vece = MO_32 },
|
|
{ .fniv = gen_uqsub_vec,
|
|
{ .fniv = gen_uqsub_vec,
|
|
|
|
+ .fni8 = gen_uqsub_d,
|
|
.fno = gen_helper_gvec_uqsub_d,
|
|
.fno = gen_helper_gvec_uqsub_d,
|
|
.opt_opc = vecop_list,
|
|
.opt_opc = vecop_list,
|
|
.write_aofs = true,
|
|
.write_aofs = true,
|
|
.vece = MO_64 },
|
|
.vece = MO_64 },
|
|
};
|
|
};
|
|
|
|
+
|
|
|
|
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
}
|
|
}
|
|
|
|
|
|
-static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec sat,
|
|
|
|
|
|
+void gen_sqsub_bhs(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b, MemOp esz)
|
|
|
|
+{
|
|
|
|
+ int64_t max = MAKE_64BIT_MASK(0, (8 << esz) - 1);
|
|
|
|
+ int64_t min = -1ll - max;
|
|
|
|
+ TCGv_i64 tmp = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_sub_i64(tmp, a, b);
|
|
|
|
+ tcg_gen_smin_i64(res, tmp, tcg_constant_i64(max));
|
|
|
|
+ tcg_gen_smax_i64(res, res, tcg_constant_i64(min));
|
|
|
|
+ tcg_gen_xor_i64(tmp, tmp, res);
|
|
|
|
+ tcg_gen_or_i64(qc, qc, tmp);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_sqsub_d(TCGv_i64 res, TCGv_i64 qc, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t0 = tcg_temp_new_i64();
|
|
|
|
+ TCGv_i64 t1 = tcg_temp_new_i64();
|
|
|
|
+ TCGv_i64 t2 = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_sub_i64(t0, a, b);
|
|
|
|
+
|
|
|
|
+ /* Compute signed overflow indication into T1 */
|
|
|
|
+ tcg_gen_xor_i64(t1, a, b);
|
|
|
|
+ tcg_gen_xor_i64(t2, t0, a);
|
|
|
|
+ tcg_gen_and_i64(t1, t1, t2);
|
|
|
|
+
|
|
|
|
+ /* Compute saturated value into T2 */
|
|
|
|
+ tcg_gen_sari_i64(t2, a, 63);
|
|
|
|
+ tcg_gen_xori_i64(t2, t2, INT64_MAX);
|
|
|
|
+
|
|
|
|
+ tcg_gen_movcond_i64(TCG_COND_LT, res, t1, tcg_constant_i64(0), t2, t0);
|
|
|
|
+ tcg_gen_xor_i64(t0, t0, res);
|
|
|
|
+ tcg_gen_or_i64(qc, qc, t0);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_sqsub_vec(unsigned vece, TCGv_vec t, TCGv_vec qc,
|
|
TCGv_vec a, TCGv_vec b)
|
|
TCGv_vec a, TCGv_vec b)
|
|
{
|
|
{
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
TCGv_vec x = tcg_temp_new_vec_matching(t);
|
|
tcg_gen_sub_vec(vece, x, a, b);
|
|
tcg_gen_sub_vec(vece, x, a, b);
|
|
tcg_gen_sssub_vec(vece, t, a, b);
|
|
tcg_gen_sssub_vec(vece, t, a, b);
|
|
- tcg_gen_cmp_vec(TCG_COND_NE, vece, x, x, t);
|
|
|
|
- tcg_gen_or_vec(vece, sat, sat, x);
|
|
|
|
|
|
+ tcg_gen_xor_vec(vece, x, x, t);
|
|
|
|
+ tcg_gen_or_vec(vece, qc, qc, x);
|
|
}
|
|
}
|
|
|
|
|
|
void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
{
|
|
{
|
|
static const TCGOpcode vecop_list[] = {
|
|
static const TCGOpcode vecop_list[] = {
|
|
- INDEX_op_sssub_vec, INDEX_op_cmp_vec, INDEX_op_sub_vec, 0
|
|
|
|
|
|
+ INDEX_op_sssub_vec, INDEX_op_sub_vec, 0
|
|
};
|
|
};
|
|
static const GVecGen4 ops[4] = {
|
|
static const GVecGen4 ops[4] = {
|
|
{ .fniv = gen_sqsub_vec,
|
|
{ .fniv = gen_sqsub_vec,
|
|
@@ -1376,11 +1584,14 @@ void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
.write_aofs = true,
|
|
.write_aofs = true,
|
|
.vece = MO_32 },
|
|
.vece = MO_32 },
|
|
{ .fniv = gen_sqsub_vec,
|
|
{ .fniv = gen_sqsub_vec,
|
|
|
|
+ .fni8 = gen_sqsub_d,
|
|
.fno = gen_helper_gvec_sqsub_d,
|
|
.fno = gen_helper_gvec_sqsub_d,
|
|
.opt_opc = vecop_list,
|
|
.opt_opc = vecop_list,
|
|
.write_aofs = true,
|
|
.write_aofs = true,
|
|
.vece = MO_64 },
|
|
.vece = MO_64 },
|
|
};
|
|
};
|
|
|
|
+
|
|
|
|
+ tcg_debug_assert(opr_sz <= sizeof_field(CPUARMState, vfp.qc));
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
tcg_gen_gvec_4(rd_ofs, offsetof(CPUARMState, vfp.qc),
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
rn_ofs, rm_ofs, opr_sz, max_sz, &ops[vece]);
|
|
}
|
|
}
|
|
@@ -1670,3 +1881,435 @@ void gen_gvec_uminp(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
tcg_debug_assert(vece <= MO_32);
|
|
tcg_debug_assert(vece <= MO_32);
|
|
tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]);
|
|
tcg_gen_gvec_3_ool(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, 0, fns[vece]);
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+static void gen_shadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_and_i64(t, a, b);
|
|
|
|
+ tcg_gen_vec_sar8i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_sar8i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
|
|
|
|
+ tcg_gen_vec_add8_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_add8_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_shadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_and_i64(t, a, b);
|
|
|
|
+ tcg_gen_vec_sar16i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_sar16i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
|
|
|
|
+ tcg_gen_vec_add16_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_add16_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_shadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
+
|
|
|
|
+ tcg_gen_and_i32(t, a, b);
|
|
|
|
+ tcg_gen_sari_i32(a, a, 1);
|
|
|
|
+ tcg_gen_sari_i32(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i32(t, t, 1);
|
|
|
|
+ tcg_gen_add_i32(d, a, b);
|
|
|
|
+ tcg_gen_add_i32(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_shadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
+{
|
|
|
|
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
+
|
|
|
|
+ tcg_gen_and_vec(vece, t, a, b);
|
|
|
|
+ tcg_gen_sari_vec(vece, a, a, 1);
|
|
|
|
+ tcg_gen_sari_vec(vece, b, b, 1);
|
|
|
|
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
|
|
|
|
+ tcg_gen_add_vec(vece, d, a, b);
|
|
|
|
+ tcg_gen_add_vec(vece, d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_gvec_shadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static const TCGOpcode vecop_list[] = {
|
|
|
|
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
|
|
|
|
+ };
|
|
|
|
+ static const GVecGen3 g[] = {
|
|
|
|
+ { .fni8 = gen_shadd8_i64,
|
|
|
|
+ .fniv = gen_shadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_8 },
|
|
|
|
+ { .fni8 = gen_shadd16_i64,
|
|
|
|
+ .fniv = gen_shadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_16 },
|
|
|
|
+ { .fni4 = gen_shadd_i32,
|
|
|
|
+ .fniv = gen_shadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_32 },
|
|
|
|
+ };
|
|
|
|
+ tcg_debug_assert(vece <= MO_32);
|
|
|
|
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_uhadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_and_i64(t, a, b);
|
|
|
|
+ tcg_gen_vec_shr8i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_shr8i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
|
|
|
|
+ tcg_gen_vec_add8_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_add8_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_uhadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_and_i64(t, a, b);
|
|
|
|
+ tcg_gen_vec_shr16i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_shr16i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
|
|
|
|
+ tcg_gen_vec_add16_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_add16_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_uhadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
+
|
|
|
|
+ tcg_gen_and_i32(t, a, b);
|
|
|
|
+ tcg_gen_shri_i32(a, a, 1);
|
|
|
|
+ tcg_gen_shri_i32(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i32(t, t, 1);
|
|
|
|
+ tcg_gen_add_i32(d, a, b);
|
|
|
|
+ tcg_gen_add_i32(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_uhadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
+{
|
|
|
|
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
+
|
|
|
|
+ tcg_gen_and_vec(vece, t, a, b);
|
|
|
|
+ tcg_gen_shri_vec(vece, a, a, 1);
|
|
|
|
+ tcg_gen_shri_vec(vece, b, b, 1);
|
|
|
|
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
|
|
|
|
+ tcg_gen_add_vec(vece, d, a, b);
|
|
|
|
+ tcg_gen_add_vec(vece, d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_gvec_uhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static const TCGOpcode vecop_list[] = {
|
|
|
|
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
|
|
|
|
+ };
|
|
|
|
+ static const GVecGen3 g[] = {
|
|
|
|
+ { .fni8 = gen_uhadd8_i64,
|
|
|
|
+ .fniv = gen_uhadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_8 },
|
|
|
|
+ { .fni8 = gen_uhadd16_i64,
|
|
|
|
+ .fniv = gen_uhadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_16 },
|
|
|
|
+ { .fni4 = gen_uhadd_i32,
|
|
|
|
+ .fniv = gen_uhadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_32 },
|
|
|
|
+ };
|
|
|
|
+ tcg_debug_assert(vece <= MO_32);
|
|
|
|
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_shsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_andc_i64(t, b, a);
|
|
|
|
+ tcg_gen_vec_sar8i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_sar8i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
|
|
|
|
+ tcg_gen_vec_sub8_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_sub8_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_shsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_andc_i64(t, b, a);
|
|
|
|
+ tcg_gen_vec_sar16i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_sar16i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
|
|
|
|
+ tcg_gen_vec_sub16_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_sub16_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_shsub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
+
|
|
|
|
+ tcg_gen_andc_i32(t, b, a);
|
|
|
|
+ tcg_gen_sari_i32(a, a, 1);
|
|
|
|
+ tcg_gen_sari_i32(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i32(t, t, 1);
|
|
|
|
+ tcg_gen_sub_i32(d, a, b);
|
|
|
|
+ tcg_gen_sub_i32(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_shsub_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
+{
|
|
|
|
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
+
|
|
|
|
+ tcg_gen_andc_vec(vece, t, b, a);
|
|
|
|
+ tcg_gen_sari_vec(vece, a, a, 1);
|
|
|
|
+ tcg_gen_sari_vec(vece, b, b, 1);
|
|
|
|
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
|
|
|
|
+ tcg_gen_sub_vec(vece, d, a, b);
|
|
|
|
+ tcg_gen_sub_vec(vece, d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_gvec_shsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static const TCGOpcode vecop_list[] = {
|
|
|
|
+ INDEX_op_sari_vec, INDEX_op_sub_vec, 0
|
|
|
|
+ };
|
|
|
|
+ static const GVecGen3 g[4] = {
|
|
|
|
+ { .fni8 = gen_shsub8_i64,
|
|
|
|
+ .fniv = gen_shsub_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_8 },
|
|
|
|
+ { .fni8 = gen_shsub16_i64,
|
|
|
|
+ .fniv = gen_shsub_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_16 },
|
|
|
|
+ { .fni4 = gen_shsub_i32,
|
|
|
|
+ .fniv = gen_shsub_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_32 },
|
|
|
|
+ };
|
|
|
|
+ assert(vece <= MO_32);
|
|
|
|
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_uhsub8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_andc_i64(t, b, a);
|
|
|
|
+ tcg_gen_vec_shr8i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_shr8i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
|
|
|
|
+ tcg_gen_vec_sub8_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_sub8_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_uhsub16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_andc_i64(t, b, a);
|
|
|
|
+ tcg_gen_vec_shr16i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_shr16i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
|
|
|
|
+ tcg_gen_vec_sub16_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_sub16_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_uhsub_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
+
|
|
|
|
+ tcg_gen_andc_i32(t, b, a);
|
|
|
|
+ tcg_gen_shri_i32(a, a, 1);
|
|
|
|
+ tcg_gen_shri_i32(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i32(t, t, 1);
|
|
|
|
+ tcg_gen_sub_i32(d, a, b);
|
|
|
|
+ tcg_gen_sub_i32(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_uhsub_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
+{
|
|
|
|
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
+
|
|
|
|
+ tcg_gen_andc_vec(vece, t, b, a);
|
|
|
|
+ tcg_gen_shri_vec(vece, a, a, 1);
|
|
|
|
+ tcg_gen_shri_vec(vece, b, b, 1);
|
|
|
|
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
|
|
|
|
+ tcg_gen_sub_vec(vece, d, a, b);
|
|
|
|
+ tcg_gen_sub_vec(vece, d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_gvec_uhsub(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static const TCGOpcode vecop_list[] = {
|
|
|
|
+ INDEX_op_shri_vec, INDEX_op_sub_vec, 0
|
|
|
|
+ };
|
|
|
|
+ static const GVecGen3 g[4] = {
|
|
|
|
+ { .fni8 = gen_uhsub8_i64,
|
|
|
|
+ .fniv = gen_uhsub_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_8 },
|
|
|
|
+ { .fni8 = gen_uhsub16_i64,
|
|
|
|
+ .fniv = gen_uhsub_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_16 },
|
|
|
|
+ { .fni4 = gen_uhsub_i32,
|
|
|
|
+ .fniv = gen_uhsub_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_32 },
|
|
|
|
+ };
|
|
|
|
+ assert(vece <= MO_32);
|
|
|
|
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_srhadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_or_i64(t, a, b);
|
|
|
|
+ tcg_gen_vec_sar8i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_sar8i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
|
|
|
|
+ tcg_gen_vec_add8_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_add8_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_srhadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_or_i64(t, a, b);
|
|
|
|
+ tcg_gen_vec_sar16i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_sar16i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
|
|
|
|
+ tcg_gen_vec_add16_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_add16_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_srhadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
+
|
|
|
|
+ tcg_gen_or_i32(t, a, b);
|
|
|
|
+ tcg_gen_sari_i32(a, a, 1);
|
|
|
|
+ tcg_gen_sari_i32(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i32(t, t, 1);
|
|
|
|
+ tcg_gen_add_i32(d, a, b);
|
|
|
|
+ tcg_gen_add_i32(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_srhadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
+{
|
|
|
|
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
+
|
|
|
|
+ tcg_gen_or_vec(vece, t, a, b);
|
|
|
|
+ tcg_gen_sari_vec(vece, a, a, 1);
|
|
|
|
+ tcg_gen_sari_vec(vece, b, b, 1);
|
|
|
|
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
|
|
|
|
+ tcg_gen_add_vec(vece, d, a, b);
|
|
|
|
+ tcg_gen_add_vec(vece, d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_gvec_srhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static const TCGOpcode vecop_list[] = {
|
|
|
|
+ INDEX_op_sari_vec, INDEX_op_add_vec, 0
|
|
|
|
+ };
|
|
|
|
+ static const GVecGen3 g[] = {
|
|
|
|
+ { .fni8 = gen_srhadd8_i64,
|
|
|
|
+ .fniv = gen_srhadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_8 },
|
|
|
|
+ { .fni8 = gen_srhadd16_i64,
|
|
|
|
+ .fniv = gen_srhadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_16 },
|
|
|
|
+ { .fni4 = gen_srhadd_i32,
|
|
|
|
+ .fniv = gen_srhadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_32 },
|
|
|
|
+ };
|
|
|
|
+ assert(vece <= MO_32);
|
|
|
|
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_urhadd8_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_or_i64(t, a, b);
|
|
|
|
+ tcg_gen_vec_shr8i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_shr8i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_8, 1));
|
|
|
|
+ tcg_gen_vec_add8_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_add8_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_urhadd16_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i64 t = tcg_temp_new_i64();
|
|
|
|
+
|
|
|
|
+ tcg_gen_or_i64(t, a, b);
|
|
|
|
+ tcg_gen_vec_shr16i_i64(a, a, 1);
|
|
|
|
+ tcg_gen_vec_shr16i_i64(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i64(t, t, dup_const(MO_16, 1));
|
|
|
|
+ tcg_gen_vec_add16_i64(d, a, b);
|
|
|
|
+ tcg_gen_vec_add16_i64(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_urhadd_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b)
|
|
|
|
+{
|
|
|
|
+ TCGv_i32 t = tcg_temp_new_i32();
|
|
|
|
+
|
|
|
|
+ tcg_gen_or_i32(t, a, b);
|
|
|
|
+ tcg_gen_shri_i32(a, a, 1);
|
|
|
|
+ tcg_gen_shri_i32(b, b, 1);
|
|
|
|
+ tcg_gen_andi_i32(t, t, 1);
|
|
|
|
+ tcg_gen_add_i32(d, a, b);
|
|
|
|
+ tcg_gen_add_i32(d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static void gen_urhadd_vec(unsigned vece, TCGv_vec d, TCGv_vec a, TCGv_vec b)
|
|
|
|
+{
|
|
|
|
+ TCGv_vec t = tcg_temp_new_vec_matching(d);
|
|
|
|
+
|
|
|
|
+ tcg_gen_or_vec(vece, t, a, b);
|
|
|
|
+ tcg_gen_shri_vec(vece, a, a, 1);
|
|
|
|
+ tcg_gen_shri_vec(vece, b, b, 1);
|
|
|
|
+ tcg_gen_and_vec(vece, t, t, tcg_constant_vec_matching(d, vece, 1));
|
|
|
|
+ tcg_gen_add_vec(vece, d, a, b);
|
|
|
|
+ tcg_gen_add_vec(vece, d, d, t);
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+void gen_gvec_urhadd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
+ uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz)
|
|
|
|
+{
|
|
|
|
+ static const TCGOpcode vecop_list[] = {
|
|
|
|
+ INDEX_op_shri_vec, INDEX_op_add_vec, 0
|
|
|
|
+ };
|
|
|
|
+ static const GVecGen3 g[] = {
|
|
|
|
+ { .fni8 = gen_urhadd8_i64,
|
|
|
|
+ .fniv = gen_urhadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_8 },
|
|
|
|
+ { .fni8 = gen_urhadd16_i64,
|
|
|
|
+ .fniv = gen_urhadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_16 },
|
|
|
|
+ { .fni4 = gen_urhadd_i32,
|
|
|
|
+ .fniv = gen_urhadd_vec,
|
|
|
|
+ .opt_opc = vecop_list,
|
|
|
|
+ .vece = MO_32 },
|
|
|
|
+ };
|
|
|
|
+ assert(vece <= MO_32);
|
|
|
|
+ tcg_gen_gvec_3(rd_ofs, rn_ofs, rm_ofs, opr_sz, max_sz, &g[vece]);
|
|
|
|
+}
|