diff --git a/src/mono/mono/mini/mini-amd64.c b/src/mono/mono/mini/mini-amd64.c
index fbda85ae883a79..1bc469b1aac7d5 100644
--- a/src/mono/mono/mini/mini-amd64.c
+++ b/src/mono/mono/mini/mini-amd64.c
@@ -1990,13 +1990,18 @@ mono_arch_allocate_vars (MonoCompile *cfg)
}
case ArgSwiftError: {
ins->opcode = OP_REGOFFSET;
- ins->inst_basereg = cfg->frame_reg;
- ins->inst_offset = ainfo->offset + ARGS_OFFSET;
offset = ALIGN_TO (offset, sizeof (target_mgreg_t));
+ ins->inst_basereg = cfg->frame_reg;
+ ins->inst_offset = offset;
offset += sizeof (target_mgreg_t);
cfg->arch.swift_error_var = ins;
- cfg->used_int_regs |= (size_t)(1 << AMD64_R12);
+
+ /* In the n2m case, the error register functions as an extra return register
+ * and is thus is not treated as callee-saved.
+ */
+ if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)
+ cfg->used_int_regs |= (size_t)(1 << AMD64_R12);
}
break;
default:
@@ -4280,8 +4285,13 @@ emit_move_return_value (MonoCompile *cfg, MonoInst *ins, guint8 *code)
guint32 quad;
if (cfg->arch.swift_error_var) {
- amd64_mov_reg_membase (code, AMD64_R11, cfg->arch.swift_error_var->inst_basereg, cfg->arch.swift_error_var->inst_offset, sizeof (target_mgreg_t));
- amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_R12, sizeof (target_mgreg_t));
+ if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
+ amd64_mov_reg_membase (code, AMD64_R11, cfg->arch.swift_error_var->inst_basereg, cfg->arch.swift_error_var->inst_offset, sizeof (target_mgreg_t));
+ amd64_mov_membase_reg (code, AMD64_R11, 0, AMD64_R12, sizeof (target_mgreg_t));
+ }
+ else if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
+ amd64_mov_reg_membase (code, AMD64_R12, cfg->arch.swift_error_var->inst_basereg, cfg->arch.swift_error_var->inst_offset, sizeof (target_mgreg_t));
+ }
}
/* Move return value to the target register */
@@ -8248,11 +8258,17 @@ MONO_RESTORE_WARNING
amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, ainfo->reg, 8);
break;
case ArgSwiftError:
- if (ainfo->offset) {
- amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8);
- amd64_mov_membase_reg (code, cfg->arch.swift_error_var->inst_basereg, cfg->arch.swift_error_var->inst_offset, AMD64_R11, sizeof (target_mgreg_t));
- } else {
- amd64_mov_membase_reg (code, cfg->arch.swift_error_var->inst_basereg, cfg->arch.swift_error_var->inst_offset, ainfo->reg, sizeof (target_mgreg_t));
+ if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
+ if (ainfo->offset) {
+ amd64_mov_reg_membase (code, AMD64_R11, AMD64_RBP, ARGS_OFFSET + ainfo->offset, 8);
+ amd64_mov_membase_reg (code, cfg->arch.swift_error_var->inst_basereg, cfg->arch.swift_error_var->inst_offset, AMD64_R11, sizeof (target_mgreg_t));
+ } else {
+ amd64_mov_membase_reg (code, cfg->arch.swift_error_var->inst_basereg, cfg->arch.swift_error_var->inst_offset, ainfo->reg, sizeof (target_mgreg_t));
+ }
+ } else if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
+ /* Relies on arguments being passed on the stack */
+ amd64_lea_membase (code, AMD64_R11, cfg->arch.swift_error_var->inst_basereg, cfg->arch.swift_error_var->inst_offset);
+ amd64_mov_membase_reg (code, ins->inst_basereg, ins->inst_offset, AMD64_R11, 8);
}
break;
default:
diff --git a/src/mono/mono/mini/mini-arm64.c b/src/mono/mono/mini/mini-arm64.c
index 7e95fc7a818cee..56ecdf6bca2aee 100644
--- a/src/mono/mono/mini/mini-arm64.c
+++ b/src/mono/mono/mini/mini-arm64.c
@@ -2887,7 +2887,12 @@ mono_arch_allocate_vars (MonoCompile *cfg)
offset += size;
cfg->arch.swift_error_var = ins;
- cfg->used_int_regs |= 1 << ARMREG_R21;
+
+ /* In the n2m case, the error register functions as an extra return register
+ * and is thus is not treated as callee-saved.
+ */
+ if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE)
+ cfg->used_int_regs |= 1 << ARMREG_R21;
break;
}
default:
@@ -3731,8 +3736,13 @@ emit_move_return_value (MonoCompile *cfg, guint8 * code, MonoInst *ins)
MonoCallInst *call;
if (cfg->arch.swift_error_var) {
- code = emit_ldrx (code, ARMREG_IP0, cfg->arch.swift_error_var->inst_basereg, GTMREG_TO_INT (cfg->arch.swift_error_var->inst_offset));
- code = emit_strx (code, ARMREG_R21, ARMREG_IP0, 0);
+ if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
+ code = emit_ldrx (code, ARMREG_IP0, cfg->arch.swift_error_var->inst_basereg, GTMREG_TO_INT (cfg->arch.swift_error_var->inst_offset));
+ code = emit_strx (code, ARMREG_R21, ARMREG_IP0, 0);
+ } else if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
+ /* Load the value of SwiftError into R21 */
+ code = emit_ldrx (code, ARMREG_R21, cfg->arch.swift_error_var->inst_basereg, GTMREG_TO_INT (cfg->arch.swift_error_var->inst_offset));
+ }
}
call = (MonoCallInst*)ins;
@@ -5937,11 +5947,17 @@ emit_move_args (MonoCompile *cfg, guint8 *code)
code = emit_strfpq (code, ainfo->reg, ins->inst_basereg, GTMREG_TO_INT (ins->inst_offset));
break;
case ArgSwiftError:
- if (ainfo->offset) {
- code = emit_ldrx (code, ARMREG_IP0, cfg->arch.args_reg, ainfo->offset);
- code = emit_strx (code, ARMREG_IP0, cfg->arch.swift_error_var->inst_basereg, GTMREG_TO_INT (cfg->arch.swift_error_var->inst_offset));
- } else {
- code = emit_strx (code, ainfo->reg, cfg->arch.swift_error_var->inst_basereg, GTMREG_TO_INT (cfg->arch.swift_error_var->inst_offset));
+ if (cfg->method->wrapper_type == MONO_WRAPPER_MANAGED_TO_NATIVE) {
+ if (ainfo->offset) {
+ code = emit_ldrx (code, ARMREG_IP0, cfg->arch.args_reg, ainfo->offset);
+ code = emit_strx (code, ARMREG_IP0, cfg->arch.swift_error_var->inst_basereg, GTMREG_TO_INT (cfg->arch.swift_error_var->inst_offset));
+ } else {
+ code = emit_strx (code, ainfo->reg, cfg->arch.swift_error_var->inst_basereg, GTMREG_TO_INT (cfg->arch.swift_error_var->inst_offset));
+ }
+ } else if (cfg->method->wrapper_type == MONO_WRAPPER_NATIVE_TO_MANAGED) {
+ arm_addx_imm (code, ARMREG_IP0, cfg->arch.swift_error_var->inst_basereg, GTMREG_TO_INT (cfg->arch.swift_error_var->inst_offset));
+ /* Relies on arguments being passed on the stack */
+ code = emit_strx (code, ARMREG_IP0, ins->inst_basereg, GTMREG_TO_INT (ins->inst_offset));
}
break;
default:
diff --git a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs
index b1575e04deabd1..0f8b3c4294fb0c 100644
--- a/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs
+++ b/src/tests/Interop/Swift/SwiftErrorHandling/SwiftErrorHandling.cs
@@ -128,7 +128,6 @@ public unsafe static void TestSwiftErrorOnStackNotThrown()
}
[Fact]
- [SkipOnMono("needs reverse P/Invoke support")]
public static unsafe void TestUnmanagedCallersOnly()
{
SwiftError error;
@@ -144,7 +143,6 @@ public static unsafe void TestUnmanagedCallersOnly()
}
[Fact]
- [SkipOnMono("needs reverse P/Invoke support")]
public static unsafe void TestUnmanagedCallersOnlyWithReturn()
{
SwiftError error;
diff --git a/src/tests/issues.targets b/src/tests/issues.targets
index 8289fcefb924fb..ad532de65f6281 100644
--- a/src/tests/issues.targets
+++ b/src/tests/issues.targets
@@ -2191,6 +2191,9 @@
https://github.com/dotnet/runtime/issues/71656
+
+ Reverse P/Invokes not supported yet
+