diff --git a/src/mono/mono/mini/method-to-ir.c b/src/mono/mono/mini/method-to-ir.c
index 5975236dfa98e2..d3b0637119d072 100644
--- a/src/mono/mono/mini/method-to-ir.c
+++ b/src/mono/mono/mini/method-to-ir.c
@@ -7547,34 +7547,27 @@ mono_method_to_ir (MonoCompile *cfg, MonoMethod *method, MonoBasicBlock *start_b
// SwiftSelf, SwiftError, and SwiftIndirectResult are special cases where we need to preserve the class information for the codegen to handle them correctly.
if (mono_type_is_struct (ptype) && !(klass == swift_self || klass == swift_error || klass == swift_indirect_result)) {
SwiftPhysicalLowering lowered_swift_struct = mono_marshal_get_swift_physical_lowering (ptype, FALSE);
+ // Create a new local variable to store the base address of the struct
+ MonoInst *struct_base_address = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
+ CHECK_ARG (idx_param);
+ NEW_ARGLOADA (cfg, struct_base_address, idx_param);
+ MONO_ADD_INS (cfg->cbb, struct_base_address);
if (!lowered_swift_struct.by_reference) {
- // Create a new local variable to store the base address of the struct
- MonoInst *struct_base_address = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
- CHECK_ARG (idx_param);
- NEW_ARGLOADA (cfg, struct_base_address, idx_param);
- MONO_ADD_INS (cfg->cbb, struct_base_address);
-
+ // Load the lowered elements of the struct
for (uint32_t idx_lowered = 0; idx_lowered < lowered_swift_struct.num_lowered_elements; ++idx_lowered) {
- MonoInst *lowered_arg = NULL;
- // Load the lowered elements of the struct
- lowered_arg = mini_emit_memory_load (cfg, lowered_swift_struct.lowered_elements [idx_lowered], struct_base_address, lowered_swift_struct.offsets [idx_lowered], 0);
+ MonoInst *lowered_arg = mini_emit_memory_load (cfg, lowered_swift_struct.lowered_elements [idx_lowered], struct_base_address, lowered_swift_struct.offsets [idx_lowered], 0);
*sp++ = lowered_arg;
- ++new_param_count;
g_array_append_val (new_params, lowered_swift_struct.lowered_elements [idx_lowered]);
+ ++new_param_count;
}
} else {
// For structs that cannot be lowered, we change the argument to byref type
- ptype = mono_class_get_byref_type (mono_defaults.typed_reference_class);
- // Load the address of the struct
- MonoInst *struct_base_address = mono_compile_create_var (cfg, mono_get_int_type (), OP_LOCAL);
- CHECK_ARG (idx_param);
- NEW_ARGLOADA (cfg, struct_base_address, idx_param);
- MONO_ADD_INS (cfg->cbb, struct_base_address);
*sp++ = struct_base_address;
+ ptype = mono_class_get_byref_type (klass);
- ++new_param_count;
g_array_append_val (new_params, ptype);
+ ++new_param_count;
}
} else {
// Copy over non-struct arguments
diff --git a/src/mono/mono/mini/mini-amd64.c b/src/mono/mono/mini/mini-amd64.c
index e69292962494f2..9d547cf2f4cd91 100644
--- a/src/mono/mono/mini/mini-amd64.c
+++ b/src/mono/mono/mini/mini-amd64.c
@@ -615,6 +615,52 @@ add_valuetype_win64 (MonoMethodSignature *signature, ArgInfo *arg_info, MonoType
#endif /* TARGET_WIN32 */
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
+static void
+add_return_valuetype_swiftcall (ArgInfo *ainfo, MonoType *type, guint32 *gr, guint32 *fr)
+{
+ guint32 align;
+ int size = mini_type_stack_size_full (type, &align, TRUE);
+ SwiftPhysicalLowering lowered_swift_struct = mono_marshal_get_swift_physical_lowering (type, FALSE);
+ // The structs that cannot be lowered, we pass them by reference
+ if (lowered_swift_struct.by_reference) {
+ ainfo->storage = ArgValuetypeAddrInIReg;
+ /*
+ * On x64, Swift calls expect the return buffer to be passed in RAX.
+ * However, since RAX mono reg allocator could assign RAX to a different value,
+ * the R10 register is used instead and before the native call,
+ * the value is moved from R10 to RAX (`amd64_handle_swift_return_buffer_reg`).
+ */
+ ainfo->reg = AMD64_R10;
+ return;
+ }
+
+ g_assert (lowered_swift_struct.num_lowered_elements > 0 && lowered_swift_struct.num_lowered_elements <= 4);
+
+ ainfo->storage = ArgSwiftValuetypeLoweredRet;
+ ainfo->nregs = lowered_swift_struct.num_lowered_elements;
+ ainfo->arg_size = size;
+
+ // Record the lowered elements of the struct
+ for (uint32_t idx_lowered_elem = 0; idx_lowered_elem < lowered_swift_struct.num_lowered_elements; ++idx_lowered_elem) {
+ MonoTypeEnum lowered_elem = lowered_swift_struct.lowered_elements [idx_lowered_elem]->type;
+ if (lowered_elem == MONO_TYPE_R4) {
+ ainfo->pair_storage [idx_lowered_elem] = ArgInFloatSSEReg;
+ ainfo->pair_regs [idx_lowered_elem] = float_return_regs [(*fr)++];
+ } else if (lowered_elem == MONO_TYPE_R8) {
+ ainfo->pair_storage [idx_lowered_elem] = ArgInDoubleSSEReg;
+ ainfo->pair_regs [idx_lowered_elem] = float_return_regs [(*fr)++];
+ } else {
+ ainfo->pair_storage [idx_lowered_elem] = ArgInIReg;
+ ainfo->pair_regs [idx_lowered_elem] = return_regs [(*gr)++];
+ }
+ ainfo->offsets [idx_lowered_elem] = GUINT32_TO_UINT8 (lowered_swift_struct.offsets [idx_lowered_elem]);
+ }
+
+ return;
+}
+#endif /* MONO_ARCH_HAVE_SWIFTCALL */
+
static void
add_valuetype (MonoMethodSignature *sig, ArgInfo *ainfo, MonoType *type,
gboolean is_return,
@@ -876,7 +922,7 @@ get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
cinfo->nargs = n;
cinfo->gsharedvt = mini_is_gsharedvt_variable_signature (sig);
cinfo->swift_error_index = -1;
- cinfo->swift_indirect_result_index = -1;
+ cinfo->need_swift_return_buffer = FALSE;
gr = 0;
fr = 0;
@@ -931,8 +977,27 @@ get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
case MONO_TYPE_TYPEDBYREF: {
guint32 tmp_gr = 0, tmp_fr = 0, tmp_stacksize = 0;
- add_valuetype (sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
- g_assert (cinfo->ret.storage != ArgInIReg);
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
+ if (mono_method_signature_has_ext_callconv (sig, MONO_EXT_CALLCONV_SWIFTCALL)) {
+ if (sig->pinvoke)
+ add_return_valuetype_swiftcall (&cinfo->ret, ret_type, &tmp_gr, &tmp_fr);
+ else
+ add_valuetype (sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
+
+ if (cinfo->ret.storage == ArgValuetypeAddrInIReg) {
+ /*
+ * We need to set this even when sig->pinvoke is FALSE, because the `cinfo` gets copied to the
+ * `cfg->arch` on the first pass. However, later in `amd64_handle_swift_return_buffer_reg` we
+ * condition the Swift return buffer handling only to P/Invoke calls.
+ */
+ cinfo->need_swift_return_buffer = TRUE;
+ }
+ } else
+#endif
+ {
+ add_valuetype (sig, &cinfo->ret, ret_type, TRUE, &tmp_gr, &tmp_fr, &tmp_stacksize);
+ }
+ g_assert (cinfo->ret.storage != ArgInIReg);
break;
}
case MONO_TYPE_VAR:
@@ -971,8 +1036,15 @@ get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
add_general (&gr, &stack_size, cinfo->args + 0);
if (ret_storage == ArgValuetypeAddrInIReg || ret_storage == ArgGsharedvtVariableInReg) {
- add_general (&gr, &stack_size, &cinfo->ret);
- cinfo->ret.storage = ret_storage;
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
+ // When Swift struct is returned by reference, we use the R10 register to hold the return buffer.
+ if (!(cinfo->need_swift_return_buffer && cinfo->ret.reg == AMD64_R10 &&
+ sig->pinvoke && mono_method_signature_has_ext_callconv (sig, MONO_EXT_CALLCONV_SWIFTCALL)))
+#endif
+ {
+ add_general (&gr, &stack_size, &cinfo->ret);
+ cinfo->ret.storage = ret_storage;
+ }
}
}
@@ -1019,7 +1091,7 @@ get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
MonoClass *swift_error_ptr = mono_class_create_ptr (m_class_get_this_arg (swift_error));
MonoClass *klass = mono_class_from_mono_type_internal (sig->params [i]);
if (klass == swift_indirect_result)
- cinfo->swift_indirect_result_index = i;
+ cinfo->need_swift_return_buffer = TRUE;
if ((klass == swift_self || klass == swift_indirect_result) && sig->pinvoke) {
guint32 size = mini_type_stack_size_full (m_class_get_byval_arg (klass), NULL, sig->pinvoke && !sig->marshalling_disabled);
@@ -1137,6 +1209,8 @@ arg_need_temp (ArgInfo *ainfo)
// Value types using one register doesn't need temp.
if (ainfo->storage == ArgValuetypeInReg && ainfo->nregs > 1)
return ainfo->nregs * sizeof (host_mgreg_t);
+ else if (ainfo->storage == ArgSwiftValuetypeLoweredRet)
+ return ainfo->arg_size;
return 0;
}
@@ -1183,24 +1257,56 @@ arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
{
g_assert (arg_need_temp (ainfo));
- host_mgreg_t *dest_cast = (host_mgreg_t*)dest;
- /* Reconstruct the value type */
- for (int k = 0; k < ainfo->nregs; k++) {
- int storage_type = ainfo->pair_storage [k];
- int reg_storage = ainfo->pair_regs [k];
- switch (storage_type) {
- case ArgInIReg:
- *dest_cast = ccontext->gregs [reg_storage];
- break;
- case ArgInFloatSSEReg:
- case ArgInDoubleSSEReg:
- *(double*)dest_cast = ccontext->fregs [reg_storage];
- break;
- default:
- g_assert_not_reached ();
+ switch (ainfo->storage) {
+ case ArgValuetypeInReg: {
+ host_mgreg_t *dest_cast = (host_mgreg_t*)dest;
+ /* Reconstruct the value type */
+ for (int k = 0; k < ainfo->nregs; k++) {
+ int storage_type = ainfo->pair_storage [k];
+ int reg_storage = ainfo->pair_regs [k];
+ switch (storage_type) {
+ case ArgInIReg:
+ *dest_cast = ccontext->gregs [reg_storage];
+ break;
+ case ArgInFloatSSEReg:
+ case ArgInDoubleSSEReg:
+ *(double*)dest_cast = ccontext->fregs [reg_storage];
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ dest_cast++;
+ }
+ break;
+ }
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
+ case ArgSwiftValuetypeLoweredRet: {
+ char *storage = (char*)dest;
+ for (int k = 0; k < ainfo->nregs; k++) {
+ int storage_type = ainfo->pair_storage [k];
+ int reg_storage = ainfo->pair_regs [k];
+ switch (storage_type) {
+ case ArgInIReg:
+ *(gsize*)(storage + ainfo->offsets [k]) = ccontext->gregs [reg_storage];
+ break;
+ case ArgInFloatSSEReg:
+ *(float*)(storage + ainfo->offsets [k]) = *(float*)&ccontext->fregs [reg_storage];
+ break;
+ case ArgInDoubleSSEReg:
+ *(double*)(storage + ainfo->offsets [k]) = ccontext->fregs [reg_storage];
+ break;
+ default:
+ g_assert_not_reached ();
+ }
}
- dest_cast++;
+ break;
}
+#endif /* MONO_ARCH_HAVE_SWIFTCALL */
+ default:
+ g_assert_not_reached ();
+ }
+
+
}
static void
@@ -1762,6 +1868,7 @@ mono_arch_fill_argument_info (MonoCompile *cfg)
cfg->ret->inst_offset = -1;
break;
case ArgNone:
+ case ArgSwiftValuetypeLoweredRet:
break;
default:
g_assert_not_reached ();
@@ -1893,6 +2000,19 @@ mono_arch_allocate_vars (MonoCompile *cfg)
cfg->ret->inst_offset = - offset;
}
break;
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
+ case ArgSwiftValuetypeLoweredRet:
+ cfg->ret->opcode = OP_REGOFFSET;
+ cfg->ret->inst_basereg = cfg->frame_reg;
+ if (cfg->arch.omit_fp) {
+ cfg->ret->inst_offset = offset;
+ offset += cinfo->ret.arg_size;
+ } else {
+ offset += cinfo->ret.arg_size;
+ cfg->ret->inst_offset = - offset;
+ }
+ break;
+#endif /* MONO_ARCH_HAVE_SWIFTCALL */
default:
g_assert_not_reached ();
}
@@ -2088,8 +2208,10 @@ mono_arch_create_vars (MonoCompile *cfg)
cfg->lmf_ir = TRUE;
}
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
if (cinfo->swift_error_index >= 0)
cfg->args [cinfo->swift_error_index]->flags |= MONO_INST_VOLATILE;
+#endif
}
static void
@@ -2476,6 +2598,7 @@ mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
switch (cinfo->ret.storage) {
case ArgValuetypeInReg:
+ case ArgSwiftValuetypeLoweredRet:
if (cinfo->ret.pair_storage [0] == ArgInIReg && cinfo->ret.pair_storage [1] == ArgNone) {
/*
* Tell the JIT to use a more efficient calling convention: call using
@@ -4361,7 +4484,8 @@ emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
case OP_VCALL2_REG:
case OP_VCALL2_MEMBASE:
cinfo = get_call_info (cfg->mempool, ((MonoCallInst*)ins)->signature);
- if (cinfo->ret.storage == ArgValuetypeInReg) {
+ switch (cinfo->ret.storage) {
+ case ArgValuetypeInReg: {
MonoInst *loc = cfg->arch.vret_addr_loc;
/* Load the destination address */
@@ -4385,6 +4509,36 @@ emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
NOT_IMPLEMENTED;
}
}
+ break;
+ }
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
+ case ArgSwiftValuetypeLoweredRet: {
+ MonoInst *loc = cfg->arch.vret_addr_loc;
+ int i;
+
+ /* Load the destination address */
+ g_assert (loc->opcode == OP_REGOFFSET);
+ amd64_mov_reg_membase (code, AMD64_RBX, loc->inst_basereg, loc->inst_offset, sizeof(gpointer));
+
+ // Reassemble the returned struct
+ for (i = 0; i < cinfo->ret.nregs; i ++) {
+ switch (cinfo->ret.pair_storage [i]) {
+ case ArgInIReg:
+ amd64_mov_membase_reg (code, AMD64_RBX, cinfo->ret.offsets [i], cinfo->ret.pair_regs [i], sizeof (target_mgreg_t));
+ break;
+ case ArgInFloatSSEReg:
+ amd64_movss_membase_reg (code, AMD64_RBX, cinfo->ret.offsets [i], cinfo->ret.pair_regs [i]);
+ break;
+ case ArgInDoubleSSEReg:
+ amd64_movsd_membase_reg (code, AMD64_RBX, cinfo->ret.offsets [i], cinfo->ret.pair_regs [i]);
+ break;
+ default:
+ NOT_IMPLEMENTED;
+ }
+ }
+ break;
+ }
+#endif /* MONO_ARCH_HAVE_SWIFTCALL */
}
break;
}
@@ -4641,24 +4795,26 @@ amd64_handle_varargs_call (MonoCompile *cfg, guint8 *code, MonoCallInst *call, g
#endif
}
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
static guint8*
-amd64_handle_swift_indirect_result (MonoCompile *cfg, guint8 *code, MonoCallInst *call)
+amd64_handle_swift_return_buffer_reg (MonoCompile *cfg, guint8 *code, MonoCallInst *call)
{
+ MonoMethodSignature *sig = call->signature;
// Ideally, this should be in mono_arch_emit_prolog, but RAX may be used for the call, and it is required to free RAX.
- if (mono_method_signature_has_ext_callconv (cfg->method->signature, MONO_EXT_CALLCONV_SWIFTCALL) &&
- cfg->arch.cinfo->swift_indirect_result_index > -1) {
+ if (mono_method_signature_has_ext_callconv (sig, MONO_EXT_CALLCONV_SWIFTCALL) && sig->pinvoke &&
+ cfg->arch.cinfo->need_swift_return_buffer) {
MonoInst *ins = (MonoInst*)call;
- if (ins->sreg1 == AMD64_RAX) {
- amd64_mov_reg_reg (code, AMD64_R11, AMD64_RAX, 8);
+ if (ins->sreg1 == SWIFT_RETURN_BUFFER_REG) {
+ amd64_mov_reg_reg (code, AMD64_R11, SWIFT_RETURN_BUFFER_REG, 8);
ins->sreg1 = AMD64_R11;
}
- amd64_mov_reg_reg (code, AMD64_RAX, AMD64_R10, 8);
+ amd64_mov_reg_reg (code, SWIFT_RETURN_BUFFER_REG, AMD64_R10, 8);
}
return code;
}
-
+#endif /* MONO_ARCH_HAVE_SWIFTCALL */
void
mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
@@ -5646,7 +5802,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
amd64_mov_membase_reg (code, AMD64_RSP, -8, AMD64_RAX, 8);
code = amd64_handle_varargs_call (cfg, code, call, FALSE);
#ifdef MONO_ARCH_HAVE_SWIFTCALL
- code = amd64_handle_swift_indirect_result (cfg, code, call);
+ code = amd64_handle_swift_return_buffer_reg (cfg, code, call);
#endif
amd64_jump_membase (code, AMD64_RSP, -8);
#endif
@@ -5674,7 +5830,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
code = amd64_handle_varargs_call (cfg, code, call, FALSE);
#ifdef MONO_ARCH_HAVE_SWIFTCALL
- code = amd64_handle_swift_indirect_result (cfg, code, call);
+ code = amd64_handle_swift_return_buffer_reg (cfg, code, call);
#endif
code = emit_call (cfg, call, code, MONO_JIT_ICALL_ZeroIsReserved);
ins->flags |= MONO_INST_GC_CALLSITE;
@@ -5697,7 +5853,7 @@ mono_arch_output_basic_block (MonoCompile *cfg, MonoBasicBlock *bb)
code = amd64_handle_varargs_call (cfg, code, call, TRUE);
#ifdef MONO_ARCH_HAVE_SWIFTCALL
- code = amd64_handle_swift_indirect_result (cfg, code, call);
+ code = amd64_handle_swift_return_buffer_reg (cfg, code, call);
#endif
amd64_call_reg (code, ins->sreg1);
ins->flags |= MONO_INST_GC_CALLSITE;
@@ -8244,8 +8400,8 @@ MONO_RESTORE_WARNING
#ifdef MONO_ARCH_HAVE_SWIFTCALL
if (mono_method_signature_has_ext_callconv (sig, MONO_EXT_CALLCONV_SWIFTCALL) &&
cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED &&
- cfg->arch.cinfo->swift_indirect_result_index > -1) {
- amd64_mov_reg_reg (code, AMD64_R10, AMD64_RAX, 8);
+ cfg->arch.cinfo->need_swift_return_buffer) {
+ amd64_mov_reg_reg (code, AMD64_R10, SWIFT_RETURN_BUFFER_REG, 8);
}
#endif
diff --git a/src/mono/mono/mini/mini-amd64.h b/src/mono/mono/mini/mini-amd64.h
index a8c3f175c3e9ae..5cbd2d8a606612 100644
--- a/src/mono/mono/mini/mini-amd64.h
+++ b/src/mono/mono/mini/mini-amd64.h
@@ -176,6 +176,10 @@ struct sigcontext {
* reproduceable results for benchmarks */
#define MONO_ARCH_CODE_ALIGNMENT 32
+#if defined(TARGET_OSX) || defined(TARGET_APPLE_MOBILE)
+#define MONO_ARCH_HAVE_SWIFTCALL 1
+#endif
+
struct MonoLMF {
/*
* The rsp field points to the stack location where the caller ip is saved.
@@ -235,17 +239,27 @@ static const AMD64_XMM_Reg_No float_return_regs [] = { AMD64_XMM0 };
#else
#define PARAM_REGS 6
#define FLOAT_PARAM_REGS 8
-#define RETURN_REGS 2
-#define FLOAT_RETURN_REGS 2
static const AMD64_Reg_No param_regs [] = {AMD64_RDI, AMD64_RSI, AMD64_RDX,
AMD64_RCX, AMD64_R8, AMD64_R9};
-
static const AMD64_XMM_Reg_No float_param_regs[] = {AMD64_XMM0, AMD64_XMM1, AMD64_XMM2,
AMD64_XMM3, AMD64_XMM4, AMD64_XMM5,
AMD64_XMM6, AMD64_XMM7};
+#ifndef MONO_ARCH_HAVE_SWIFTCALL
+#define RETURN_REGS 2
+#define FLOAT_RETURN_REGS 2
+
static const AMD64_Reg_No return_regs [] = {AMD64_RAX, AMD64_RDX};
+static const AMD64_XMM_Reg_No float_return_regs [] = {AMD64_XMM0, AMD64_XMM1};
+#else
+#define SWIFT_RETURN_BUFFER_REG AMD64_RAX
+#define RETURN_REGS 4
+#define FLOAT_RETURN_REGS 4
+
+static const AMD64_Reg_No return_regs [] = { AMD64_RAX, AMD64_RDX, AMD64_RCX, AMD64_R8 };
+static const AMD64_XMM_Reg_No float_return_regs [] = { AMD64_XMM0, AMD64_XMM1, AMD64_XMM2, AMD64_XMM3 };
+#endif /* MONO_ARCH_HAVE_SWIFTCALL */
#endif
#define CTX_REGS 2
@@ -302,6 +316,8 @@ typedef enum {
/* Variable sized gsharedvt argument passed/returned by addr */
ArgGsharedvtVariableInReg,
ArgSwiftError,
+ /* Swift lowered struct returned in multiple int and float registers. */
+ ArgSwiftValuetypeLoweredRet,
ArgNone /* only in pair_storage */
} ArgStorage;
@@ -310,14 +326,21 @@ typedef struct {
guint8 reg;
ArgStorage storage : 8;
- /* Only if storage == ArgValuetypeInReg */
+ /* Only if storage == ArgValuetypeInReg/ArgSwiftValuetypeLoweredRet */
+#ifndef MONO_ARCH_HAVE_SWIFTCALL
ArgStorage pair_storage [2];
guint8 pair_regs [2];
+#else
+ ArgStorage pair_storage [4]; // The last 2 entries are only used for ArgSwiftValuetypeLoweredRet
+ guint8 pair_regs [4];
+ /* Only if storage == ArgSwiftValuetypeLoweredRet */
+ guint16 offsets [4];
+#endif
/* The size of each pair (bytes) */
int pair_size [2];
int nregs;
- /* Only if storage == ArgOnStack */
- int arg_size; // Bytes, will always be rounded up/aligned to 8 byte boundary
+ /* Only if storage == ArgOnStack/ArgSwiftValuetypeLoweredRet */
+ int arg_size; // Bytes, when on stack, will always be rounded up/aligned to 8 byte boundary
// Size in bytes for small arguments
int byte_arg_size;
guint8 pass_empty_struct : 1; // Set in scenarios when empty structs needs to be represented as argument.
@@ -330,7 +353,7 @@ struct CallInfo {
guint32 reg_usage;
guint32 freg_usage;
gint32 swift_error_index;
- gint32 swift_indirect_result_index;
+ gboolean need_swift_return_buffer;
gboolean need_stack_align;
gboolean gsharedvt;
/* The index of the vret arg in the argument list */
@@ -494,9 +517,6 @@ typedef struct {
// can pass context to generics or interfaces?
#define MONO_ARCH_HAVE_VOLATILE_NON_PARAM_REGISTER 1
-#if defined(TARGET_OSX) || defined(TARGET_APPLE_MOBILE)
-#define MONO_ARCH_HAVE_SWIFTCALL 1
-#endif
void
mono_amd64_patch (unsigned char* code, gpointer target);
diff --git a/src/mono/mono/mini/mini-arm64.c b/src/mono/mono/mini/mini-arm64.c
index b20423940228a8..50106c3c62e12e 100644
--- a/src/mono/mono/mini/mini-arm64.c
+++ b/src/mono/mono/mini/mini-arm64.c
@@ -1637,6 +1637,51 @@ is_hfa (MonoType *t, int *out_nfields, int *out_esize, int *field_offsets)
return TRUE;
}
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
+static void
+add_return_valuetype_swiftcall (CallInfo *cinfo, ArgInfo *ainfo, MonoType *type)
+{
+ guint32 align;
+ int size = mini_type_stack_size_full (type, &align, TRUE);
+ SwiftPhysicalLowering lowered_swift_struct = mono_marshal_get_swift_physical_lowering (type, FALSE);
+ // The structs that cannot be lowered, we pass them by reference
+ if (lowered_swift_struct.by_reference) {
+ ainfo->storage = ArgVtypeByRef;
+ return;
+ }
+
+ g_assert (lowered_swift_struct.num_lowered_elements > 0 && lowered_swift_struct.num_lowered_elements <= 4);
+
+ ainfo->storage = ArgSwiftVtypeLoweredRet;
+ ainfo->nregs = lowered_swift_struct.num_lowered_elements;
+ ainfo->size = size;
+
+ // Record the lowered elements of the struct
+ for (uint32_t idx_lowered_elem = 0; idx_lowered_elem < lowered_swift_struct.num_lowered_elements; ++idx_lowered_elem) {
+ MonoTypeEnum lowered_elem = lowered_swift_struct.lowered_elements [idx_lowered_elem]->type;
+ if (lowered_elem == MONO_TYPE_R4) {
+ ainfo->struct_storage [idx_lowered_elem] = ArgInFRegR4;
+ ++cinfo->fr;
+ } else if (lowered_elem == MONO_TYPE_R8) {
+ ainfo->struct_storage [idx_lowered_elem] = ArgInFReg;
+ ++cinfo->fr;
+ } else {
+ ainfo->struct_storage [idx_lowered_elem] = ArgInIReg;
+ ++cinfo->gr;
+ }
+ ainfo->offsets [idx_lowered_elem] = GUINT32_TO_UINT8 (lowered_swift_struct.offsets [idx_lowered_elem]);
+ }
+ /*
+ * Verify that we didn't exceed the number of available registers.
+ * This should never happen because we are lowering the struct to a maximum of 4 registers
+ * and we only do the lowering here for the return value.
+ */
+ g_assert (cinfo->fr <= FP_PARAM_REGS);
+ g_assert (cinfo->gr <= PARAM_REGS);
+ return;
+}
+#endif /* MONO_ARCH_HAVE_SWIFTCALL */
+
static void
add_valuetype (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t, gboolean is_return)
{
@@ -1673,7 +1718,7 @@ add_valuetype (CallInfo *cinfo, ArgInfo *ainfo, MonoType *t, gboolean is_return)
ainfo->size = size;
ainfo->esize = esize;
for (i = 0; i < nfields; ++i)
- ainfo->foffsets [i] = GINT_TO_UINT8 (field_offsets [i]);
+ ainfo->offsets [i] = GINT_TO_UINT8 (field_offsets [i]);
cinfo->fr += ainfo->nregs;
} else {
ainfo->nfregs_to_skip = FP_PARAM_REGS > cinfo->fr ? FP_PARAM_REGS - cinfo->fr : 0;
@@ -1833,8 +1878,16 @@ get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
cinfo->vararg = sig->call_convention == MONO_CALL_VARARG;
#endif
- /* Return value */
- add_param (cinfo, &cinfo->ret, sig->ret, TRUE);
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
+ // Handle Swift struct lowering in function returns
+ if (sig->pinvoke && mono_method_signature_has_ext_callconv (sig, MONO_EXT_CALLCONV_SWIFTCALL) && mono_type_is_struct (sig->ret)) {
+ add_return_valuetype_swiftcall (cinfo, &cinfo->ret, sig->ret);
+ } else
+#endif
+ {
+ /* Return value */
+ add_param (cinfo, &cinfo->ret, sig->ret, TRUE);
+ }
if (cinfo->ret.storage == ArgVtypeByRef)
cinfo->ret.reg = ARMREG_R8;
/* Reset state */
@@ -1920,7 +1973,7 @@ get_call_info (MonoMemPool *mp, MonoMethodSignature *sig)
static int
arg_need_temp (ArgInfo *ainfo)
{
- if (ainfo->storage == ArgHFA && ainfo->esize == 4)
+ if ((ainfo->storage == ArgHFA && ainfo->esize == 4) || ainfo->storage == ArgSwiftVtypeLoweredRet)
return ainfo->size;
return 0;
}
@@ -1928,31 +1981,31 @@ arg_need_temp (ArgInfo *ainfo)
static gpointer
arg_get_storage (CallContext *ccontext, ArgInfo *ainfo)
{
- switch (ainfo->storage) {
- case ArgVtypeInIRegs:
- case ArgInIReg:
- if (ainfo->reg == ARMREG_R20)
- return &ccontext->gregs [PARAM_REGS + 1];
- else
- return &ccontext->gregs [ainfo->reg];
- case ArgInFReg:
- case ArgInFRegR4:
- case ArgHFA:
- return &ccontext->fregs [ainfo->reg];
- case ArgOnStack:
- case ArgOnStackR4:
- case ArgOnStackR8:
- case ArgVtypeOnStack:
- return ccontext->stack + ainfo->offset;
- case ArgVtypeByRefOnStack:
- return *(gpointer*)(ccontext->stack + ainfo->offset);
- case ArgVtypeByRef:
- return (gpointer) ccontext->gregs [ainfo->reg];
- case ArgSwiftError:
- return &ccontext->gregs [PARAM_REGS + 2];
- default:
- g_error ("Arg storage type not yet supported");
- }
+ switch (ainfo->storage) {
+ case ArgVtypeInIRegs:
+ case ArgInIReg:
+ if (ainfo->reg == ARMREG_R20)
+ return &ccontext->gregs [PARAM_REGS + 1];
+ else
+ return &ccontext->gregs [ainfo->reg];
+ case ArgInFReg:
+ case ArgInFRegR4:
+ case ArgHFA:
+ return &ccontext->fregs [ainfo->reg];
+ case ArgOnStack:
+ case ArgOnStackR4:
+ case ArgOnStackR8:
+ case ArgVtypeOnStack:
+ return ccontext->stack + ainfo->offset;
+ case ArgVtypeByRefOnStack:
+ return *(gpointer*)(ccontext->stack + ainfo->offset);
+ case ArgVtypeByRef:
+ return (gpointer) ccontext->gregs [ainfo->reg];
+ case ArgSwiftError:
+ return &ccontext->gregs [PARAM_REGS + 2];
+ default:
+ g_error ("Arg storage type not yet supported");
+ }
}
static void
@@ -1960,10 +2013,40 @@ arg_get_val (CallContext *ccontext, ArgInfo *ainfo, gpointer dest)
{
g_assert (arg_need_temp (ainfo));
- float *dest_float = (float*)dest;
- for (int k = 0; k < ainfo->nregs; k++) {
- *dest_float = *(float*)&ccontext->fregs [ainfo->reg + k];
- dest_float++;
+ switch (ainfo->storage) {
+ case ArgHFA: {
+ float *dest_float = (float*)dest;
+ for (int k = 0; k < ainfo->nregs; k++) {
+ *dest_float = *(float*)&ccontext->fregs [ainfo->reg + k];
+ dest_float++;
+ }
+ break;
+ }
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
+ case ArgSwiftVtypeLoweredRet: {
+ int i;
+ int gr = 0, fr = 0; // We can start from 0 since we are handling only returns
+ char *storage = (char*)dest;
+ for (i = 0; i < ainfo->nregs; ++i) {
+ switch (ainfo->struct_storage [i]) {
+ case ArgInIReg:
+ *(gsize*)(storage + ainfo->offsets [i]) = ccontext->gregs [gr++];
+ break;
+ case ArgInFReg:
+ *(double*)(storage + ainfo->offsets [i]) = ccontext->fregs [fr++];
+ break;
+ case ArgInFRegR4:
+ *(float*)(storage + ainfo->offsets [i]) = *(float*)&ccontext->fregs [fr++];
+ break;
+ default:
+ g_assert_not_reached ();
+ }
+ }
+ break;
+ }
+#endif /* MONO_ARCH_HAVE_SWIFTCALL */
+ default:
+ g_assert_not_reached ();
}
}
@@ -2423,10 +2506,10 @@ mono_arch_start_dyn_call (MonoDynCallInfo *info, gpointer **args, guint8 *ret, g
case ArgHFA:
if (ainfo->esize == 4) {
for (i = 0; i < ainfo->nregs; ++i)
- p->fpregs [ainfo->reg + i] = bitcast_r4_to_r8 (((float*)arg) [ainfo->foffsets [i] / 4]);
+ p->fpregs [ainfo->reg + i] = bitcast_r4_to_r8 (((float*)arg) [ainfo->offsets [i] / 4]);
} else {
for (i = 0; i < ainfo->nregs; ++i)
- p->fpregs [ainfo->reg + i] = ((double*)arg) [ainfo->foffsets [i] / 8];
+ p->fpregs [ainfo->reg + i] = ((double*)arg) [ainfo->offsets [i] / 8];
}
p->n_fpargs += ainfo->nregs;
break;
@@ -2521,10 +2604,10 @@ mono_arch_finish_dyn_call (MonoDynCallInfo *info, guint8 *buf)
/* Use the same area for returning fp values */
if (cinfo->ret.esize == 4) {
for (i = 0; i < cinfo->ret.nregs; ++i)
- ((float*)ret) [cinfo->ret.foffsets [i] / 4] = bitcast_r8_to_r4 (args->fpregs [i]);
+ ((float*)ret) [cinfo->ret.offsets [i] / 4] = bitcast_r8_to_r4 (args->fpregs [i]);
} else {
for (i = 0; i < cinfo->ret.nregs; ++i)
- ((double*)ret) [cinfo->ret.foffsets [i] / 8] = args->fpregs [i];
+ ((double*)ret) [cinfo->ret.offsets [i] / 8] = args->fpregs [i];
}
break;
default:
@@ -2782,6 +2865,13 @@ mono_arch_allocate_vars (MonoCompile *cfg)
mono_print_ins (cfg->vret_addr);
}
break;
+ case ArgSwiftVtypeLoweredRet: {
+ cfg->ret->opcode = OP_REGOFFSET;
+ cfg->ret->inst_basereg = cfg->frame_reg;
+ cfg->ret->inst_offset = offset;
+ offset += cinfo->ret.size;
+ break;
+ }
default:
g_assert_not_reached ();
break;
@@ -3002,6 +3092,9 @@ mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
linfo->ret.nslots = cinfo->ret.nregs;
linfo->ret.esize = cinfo->ret.esize;
break;
+ case ArgSwiftVtypeLoweredRet:
+ // LLVM compilation of P/Invoke wrappers is not supported
+ break;
default:
g_assert_not_reached ();
break;
@@ -3066,6 +3159,9 @@ mono_arch_get_llvm_call_info (MonoCompile *cfg, MonoMethodSignature *sig)
case ArgInSIMDReg:
lainfo->storage = LLVMArgVtypeInSIMDReg;
break;
+ case ArgSwiftError:
+ // LLVM compilation of P/Invoke wrappers is not supported
+ break;
default:
g_assert_not_reached ();
break;
@@ -3156,6 +3252,7 @@ mono_arch_emit_call (MonoCompile *cfg, MonoCallInst *call)
switch (cinfo->ret.storage) {
case ArgVtypeInIRegs:
case ArgHFA:
+ case ArgSwiftVtypeLoweredRet:
if (MONO_IS_TAILCALL_OPCODE (call))
break;
/*
@@ -3295,7 +3392,7 @@ mono_arch_emit_outarg_vt (MonoCompile *cfg, MonoInst *ins, MonoInst *src)
MONO_INST_NEW (cfg, load, OP_LOADR8_MEMBASE);
load->dreg = mono_alloc_freg (cfg);
load->inst_basereg = src->dreg;
- load->inst_offset = ainfo->foffsets [i];
+ load->inst_offset = ainfo->offsets [i];
MONO_ADD_INS (cfg->cbb, load);
add_outarg_reg (cfg, call, ainfo->esize == 4 ? ArgInFRegR4 : ArgInFReg, ainfo->reg + i, load);
}
@@ -3795,12 +3892,39 @@ emit_move_return_value (MonoCompile *cfg, guint8 * code, MonoInst *ins)
code = emit_ldrx (code, ARMREG_LR, loc->inst_basereg, GTMREG_TO_INT (loc->inst_offset));
for (i = 0; i < cinfo->ret.nregs; ++i) {
if (cinfo->ret.esize == 4)
- arm_strfpw (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.foffsets [i]);
+ arm_strfpw (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.offsets [i]);
else
- arm_strfpx (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.foffsets [i]);
+ arm_strfpx (code, cinfo->ret.reg + i, ARMREG_LR, cinfo->ret.offsets [i]);
+ }
+ break;
+ }
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
+ case ArgSwiftVtypeLoweredRet: {
+ MonoInst *loc = cfg->arch.vret_addr_loc;
+ int i;
+ int gr = 0, fr = 0; // We can start from 0 since we are handling only returns
+
+ /* Load the destination address */
+ g_assert (loc && loc->opcode == OP_REGOFFSET);
+ code = emit_ldrx (code, ARMREG_LR, loc->inst_basereg, GTMREG_TO_INT (loc->inst_offset));
+ for (i = 0; i < cinfo->ret.nregs; ++i) {
+ switch (cinfo->ret.struct_storage [i]) {
+ case ArgInIReg:
+ code = emit_strx (code, gr++, ARMREG_LR, cinfo->ret.offsets [i]);
+ break;
+ case ArgInFRegR4:
+ code = emit_strfpw (code, fr++, ARMREG_LR, cinfo->ret.offsets [i]);
+ break;
+ case ArgInFReg:
+ code = emit_strfpx (code, fr++, ARMREG_LR, cinfo->ret.offsets [i]);
+ break;
+ default:
+ g_assert_not_reached ();
+ }
}
break;
}
+#endif /* MONO_ARCH_HAVE_SWIFTCALL */
case ArgVtypeByRef:
break;
default:
@@ -5947,9 +6071,9 @@ emit_move_args (MonoCompile *cfg, guint8 *code)
case ArgHFA:
for (part = 0; part < ainfo->nregs; part ++) {
if (ainfo->esize == 4)
- code = emit_strfpw (code, ainfo->reg + part, ins->inst_basereg, GTMREG_TO_INT (ins->inst_offset + ainfo->foffsets [part]));
+ code = emit_strfpw (code, ainfo->reg + part, ins->inst_basereg, GTMREG_TO_INT (ins->inst_offset + ainfo->offsets [part]));
else
- code = emit_strfpx (code, ainfo->reg + part, ins->inst_basereg, GTMREG_TO_INT (ins->inst_offset + ainfo->foffsets [part]));
+ code = emit_strfpx (code, ainfo->reg + part, ins->inst_basereg, GTMREG_TO_INT (ins->inst_offset + ainfo->offsets [part]));
}
break;
case ArgInSIMDReg:
@@ -6356,9 +6480,9 @@ mono_arch_emit_epilog (MonoCompile *cfg)
for (i = 0; i < cinfo->ret.nregs; ++i) {
if (cinfo->ret.esize == 4)
- code = emit_ldrfpw (code, cinfo->ret.reg + i, ins->inst_basereg, GTMREG_TO_INT (ins->inst_offset + cinfo->ret.foffsets [i]));
+ code = emit_ldrfpw (code, cinfo->ret.reg + i, ins->inst_basereg, GTMREG_TO_INT (ins->inst_offset + cinfo->ret.offsets [i]));
else
- code = emit_ldrfpx (code, cinfo->ret.reg + i, ins->inst_basereg, GTMREG_TO_INT (ins->inst_offset + cinfo->ret.foffsets [i]));
+ code = emit_ldrfpx (code, cinfo->ret.reg + i, ins->inst_basereg, GTMREG_TO_INT (ins->inst_offset + cinfo->ret.offsets [i]));
}
break;
}
diff --git a/src/mono/mono/mini/mini-arm64.h b/src/mono/mono/mini/mini-arm64.h
index 99fbc7d2235e59..ae7f30a93223bf 100644
--- a/src/mono/mono/mini/mini-arm64.h
+++ b/src/mono/mono/mini/mini-arm64.h
@@ -242,6 +242,12 @@ typedef enum {
ArgVtypeOnStack,
ArgHFA,
ArgSwiftError,
+ /* Swift lowered Vtype returned in
+ * multiple int and float registers.
+ * ainfo->nregs is the number of used registers.
+ * ainfo->offsets offsets of the struct fields.
+ */
+ ArgSwiftVtypeLoweredRet,
ArgNone
} ArgStorage;
@@ -254,9 +260,9 @@ typedef struct {
int nregs, size;
/* ArgHFA */
int esize;
- /* ArgHFA */
- /* The offsets of the float values inside the arg */
- guint16 foffsets [4];
+ /* ArgHFA, ArgSwiftVtypeLoweredRet */
+ /* The offsets of the float and int values inside the arg */
+ guint16 offsets [4];
/* ArgOnStack */
int slot_size;
/* hfa */
@@ -264,6 +270,10 @@ typedef struct {
gboolean sign;
gboolean gsharedvt;
gboolean hfa;
+#ifdef MONO_ARCH_HAVE_SWIFTCALL
+ /* ArgSwiftVtypeLoweredRet */
+ ArgStorage struct_storage [4];
+#endif
} ArgInfo;
struct CallInfo {
diff --git a/src/mono/mono/mini/tramp-amd64.c b/src/mono/mono/mini/tramp-amd64.c
index 2ecb8b71fdb81b..9e61ba12c49d13 100644
--- a/src/mono/mono/mini/tramp-amd64.c
+++ b/src/mono/mono/mini/tramp-amd64.c
@@ -1179,7 +1179,7 @@ mono_arch_get_interp_to_native_trampoline (MonoTrampInfo **info)
/* save all return floating registers in the CallContext */
for (i = 0; i < FLOAT_RETURN_REGS; i++)
- amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (CallContext, fregs) + i * sizeof (double), i);
+ amd64_sse_movsd_membase_reg (code, AMD64_R11, MONO_STRUCT_OFFSET (CallContext, fregs) + float_return_regs [i] * sizeof (double), float_return_regs [i]);
#ifdef MONO_ARCH_HAVE_SWIFTCALL
/* set context registers to CallContext and load context registers from the stack */
diff --git a/src/tests/issues.targets b/src/tests/issues.targets
index 5da277d04e87b4..ef522631f6deb8 100644
--- a/src/tests/issues.targets
+++ b/src/tests/issues.targets
@@ -1911,9 +1911,6 @@
https://github.com/dotnet/runtime/issues/98628
-
- https://github.com/dotnet/runtime/issues/93631: Swift frozen struct support is not implemented on Mono yet
-
https://github.com/dotnet/runtime/issues/93631: Swift reverse pinvokes are not implemented on Mono yet