|
@@ -104,8 +104,7 @@ static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg1,
|
|
|
static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg);
|
|
|
static void tcg_out_movi(TCGContext *s, TCGType type,
|
|
|
TCGReg ret, tcg_target_long arg);
|
|
|
-static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long)
|
|
|
- __attribute__((unused));
|
|
|
+static void tcg_out_addi_ptr(TCGContext *s, TCGReg, TCGReg, tcg_target_long);
|
|
|
static void tcg_out_exit_tb(TCGContext *s, uintptr_t arg);
|
|
|
static void tcg_out_goto_tb(TCGContext *s, int which);
|
|
|
static void tcg_out_op(TCGContext *s, TCGOpcode opc,
|
|
@@ -683,6 +682,38 @@ static void layout_arg_normal_n(TCGCumulativeArgs *cum,
|
|
|
cum->arg_slot += n;
|
|
|
}
|
|
|
|
|
|
+static void layout_arg_by_ref(TCGCumulativeArgs *cum, TCGHelperInfo *info)
|
|
|
+{
|
|
|
+ TCGCallArgumentLoc *loc = &info->in[cum->info_in_idx];
|
|
|
+ int n = 128 / TCG_TARGET_REG_BITS;
|
|
|
+
|
|
|
+ /* The first subindex carries the pointer. */
|
|
|
+ layout_arg_1(cum, info, TCG_CALL_ARG_BY_REF);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * The callee is allowed to clobber memory associated with
|
|
|
+ * structure pass by-reference. Therefore we must make copies.
|
|
|
+ * Allocate space from "ref_slot", which will be adjusted to
|
|
|
+ * follow the parameters on the stack.
|
|
|
+ */
|
|
|
+ loc[0].ref_slot = cum->ref_slot;
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Subsequent words also go into the reference slot, but
|
|
|
+ * do not accumulate into the regular arguments.
|
|
|
+ */
|
|
|
+ for (int i = 1; i < n; ++i) {
|
|
|
+ loc[i] = (TCGCallArgumentLoc){
|
|
|
+ .kind = TCG_CALL_ARG_BY_REF_N,
|
|
|
+ .arg_idx = cum->arg_idx,
|
|
|
+ .tmp_subindex = i,
|
|
|
+ .ref_slot = cum->ref_slot + i,
|
|
|
+ };
|
|
|
+ }
|
|
|
+ cum->info_in_idx += n;
|
|
|
+ cum->ref_slot += n;
|
|
|
+}
|
|
|
+
|
|
|
static void init_call_layout(TCGHelperInfo *info)
|
|
|
{
|
|
|
int max_reg_slots = ARRAY_SIZE(tcg_target_call_iarg_regs);
|
|
@@ -718,6 +749,14 @@ static void init_call_layout(TCGHelperInfo *info)
|
|
|
case TCG_CALL_RET_NORMAL:
|
|
|
assert(info->nr_out <= ARRAY_SIZE(tcg_target_call_oarg_regs));
|
|
|
break;
|
|
|
+ case TCG_CALL_RET_BY_REF:
|
|
|
+ /*
|
|
|
+ * Allocate the first argument to the output.
|
|
|
+ * We don't need to store this anywhere, just make it
|
|
|
+ * unavailable for use in the input loop below.
|
|
|
+ */
|
|
|
+ cum.arg_slot = 1;
|
|
|
+ break;
|
|
|
default:
|
|
|
qemu_build_not_reached();
|
|
|
}
|
|
@@ -796,6 +835,9 @@ static void init_call_layout(TCGHelperInfo *info)
|
|
|
case TCG_CALL_ARG_NORMAL:
|
|
|
layout_arg_normal_n(&cum, info, 128 / TCG_TARGET_REG_BITS);
|
|
|
break;
|
|
|
+ case TCG_CALL_ARG_BY_REF:
|
|
|
+ layout_arg_by_ref(&cum, info);
|
|
|
+ break;
|
|
|
default:
|
|
|
qemu_build_not_reached();
|
|
|
}
|
|
@@ -811,7 +853,39 @@ static void init_call_layout(TCGHelperInfo *info)
|
|
|
assert(cum.info_in_idx <= ARRAY_SIZE(info->in));
|
|
|
/* Validate the backend has enough argument space. */
|
|
|
assert(cum.arg_slot <= max_reg_slots + max_stk_slots);
|
|
|
- assert(cum.ref_slot <= max_stk_slots);
|
|
|
+
|
|
|
+ /*
|
|
|
+ * Relocate the "ref_slot" area to the end of the parameters.
|
|
|
+ * Minimizing this stack offset helps code size for x86,
|
|
|
+ * which has a signed 8-bit offset encoding.
|
|
|
+ */
|
|
|
+ if (cum.ref_slot != 0) {
|
|
|
+ int ref_base = 0;
|
|
|
+
|
|
|
+ if (cum.arg_slot > max_reg_slots) {
|
|
|
+ int align = __alignof(Int128) / sizeof(tcg_target_long);
|
|
|
+
|
|
|
+ ref_base = cum.arg_slot - max_reg_slots;
|
|
|
+ if (align > 1) {
|
|
|
+ ref_base = ROUND_UP(ref_base, align);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ assert(ref_base + cum.ref_slot <= max_stk_slots);
|
|
|
+
|
|
|
+ if (ref_base != 0) {
|
|
|
+ for (int i = cum.info_in_idx - 1; i >= 0; --i) {
|
|
|
+ TCGCallArgumentLoc *loc = &info->in[i];
|
|
|
+ switch (loc->kind) {
|
|
|
+ case TCG_CALL_ARG_BY_REF:
|
|
|
+ case TCG_CALL_ARG_BY_REF_N:
|
|
|
+ loc->ref_slot += ref_base;
|
|
|
+ break;
|
|
|
+ default:
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static int indirect_reg_alloc_order[ARRAY_SIZE(tcg_target_reg_alloc_order)];
|
|
@@ -1740,6 +1814,8 @@ void tcg_gen_callN(void *func, TCGTemp *ret, int nargs, TCGTemp **args)
|
|
|
|
|
|
switch (loc->kind) {
|
|
|
case TCG_CALL_ARG_NORMAL:
|
|
|
+ case TCG_CALL_ARG_BY_REF:
|
|
|
+ case TCG_CALL_ARG_BY_REF_N:
|
|
|
op->args[pi++] = temp_arg(ts);
|
|
|
break;
|
|
|
|
|
@@ -4411,6 +4487,27 @@ static void load_arg_normal(TCGContext *s, const TCGCallArgumentLoc *l,
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+static void load_arg_ref(TCGContext *s, int arg_slot, TCGReg ref_base,
|
|
|
+ intptr_t ref_off, TCGRegSet *allocated_regs)
|
|
|
+{
|
|
|
+ TCGReg reg;
|
|
|
+ int stk_slot = arg_slot - ARRAY_SIZE(tcg_target_call_iarg_regs);
|
|
|
+
|
|
|
+ if (stk_slot < 0) {
|
|
|
+ reg = tcg_target_call_iarg_regs[arg_slot];
|
|
|
+ tcg_reg_free(s, reg, *allocated_regs);
|
|
|
+ tcg_out_addi_ptr(s, reg, ref_base, ref_off);
|
|
|
+ tcg_regset_set_reg(*allocated_regs, reg);
|
|
|
+ } else {
|
|
|
+ reg = tcg_reg_alloc(s, tcg_target_available_regs[TCG_TYPE_PTR],
|
|
|
+ *allocated_regs, 0, false);
|
|
|
+ tcg_out_addi_ptr(s, reg, ref_base, ref_off);
|
|
|
+ tcg_out_st(s, TCG_TYPE_PTR, reg, TCG_REG_CALL_STACK,
|
|
|
+ TCG_TARGET_CALL_STACK_OFFSET
|
|
|
+ + stk_slot * sizeof(tcg_target_long));
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
|
|
|
{
|
|
|
const int nb_oargs = TCGOP_CALLO(op);
|
|
@@ -4434,6 +4531,16 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
|
|
|
case TCG_CALL_ARG_EXTEND_S:
|
|
|
load_arg_normal(s, loc, ts, &allocated_regs);
|
|
|
break;
|
|
|
+ case TCG_CALL_ARG_BY_REF:
|
|
|
+ load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
|
|
|
+ load_arg_ref(s, loc->arg_slot, TCG_REG_CALL_STACK,
|
|
|
+ TCG_TARGET_CALL_STACK_OFFSET
|
|
|
+ + loc->ref_slot * sizeof(tcg_target_long),
|
|
|
+ &allocated_regs);
|
|
|
+ break;
|
|
|
+ case TCG_CALL_ARG_BY_REF_N:
|
|
|
+ load_arg_stk(s, loc->ref_slot, ts, allocated_regs);
|
|
|
+ break;
|
|
|
default:
|
|
|
g_assert_not_reached();
|
|
|
}
|
|
@@ -4465,6 +4572,19 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
|
|
|
save_globals(s, allocated_regs);
|
|
|
}
|
|
|
|
|
|
+ /*
|
|
|
+ * If the ABI passes a pointer to the returned struct as the first
|
|
|
+ * argument, load that now. Pass a pointer to the output home slot.
|
|
|
+ */
|
|
|
+ if (info->out_kind == TCG_CALL_RET_BY_REF) {
|
|
|
+ TCGTemp *ts = arg_temp(op->args[0]);
|
|
|
+
|
|
|
+ if (!ts->mem_allocated) {
|
|
|
+ temp_allocate_frame(s, ts);
|
|
|
+ }
|
|
|
+ load_arg_ref(s, 0, ts->mem_base->reg, ts->mem_offset, &allocated_regs);
|
|
|
+ }
|
|
|
+
|
|
|
tcg_out_call(s, tcg_call_func(op), info);
|
|
|
|
|
|
/* Assign output registers and emit moves if needed. */
|
|
@@ -4481,6 +4601,15 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
|
|
|
ts->mem_coherent = 0;
|
|
|
}
|
|
|
break;
|
|
|
+
|
|
|
+ case TCG_CALL_RET_BY_REF:
|
|
|
+ /* The callee has performed a write through the reference. */
|
|
|
+ for (i = 0; i < nb_oargs; i++) {
|
|
|
+ TCGTemp *ts = arg_temp(op->args[i]);
|
|
|
+ ts->val_type = TEMP_VAL_MEM;
|
|
|
+ }
|
|
|
+ break;
|
|
|
+
|
|
|
default:
|
|
|
g_assert_not_reached();
|
|
|
}
|