From a3bb3c06fdd5ff272a06d1e4b0cbb7142894812c Mon Sep 17 00:00:00 2001 From: Sergey Andreenko Date: Mon, 8 Jun 2020 16:45:48 -0700 Subject: [PATCH] No retyping arm/arm64. (#36866) * Enable for arm/arm64. * Add more test cases specific to SIMD* handling. * Support !compDoOldStructRetyping for arm/arm64. * Review feedback. * Return `JitDoOldStructRetyping`. and disable failung tests with old retyping. * a small workaround for default arm64 behaviour. --- src/coreclr/src/jit/compiler.h | 32 ++ src/coreclr/src/jit/gentree.cpp | 5 + src/coreclr/src/jit/gentree.h | 8 +- src/coreclr/src/jit/importer.cpp | 23 +- src/coreclr/src/jit/jitconfigvalues.h | 5 - src/coreclr/src/jit/lclvars.cpp | 2 + src/coreclr/src/jit/lower.cpp | 81 +++- src/coreclr/src/jit/morph.cpp | 17 +- .../JIT/Directed/StructABI/structreturn.cs | 405 +++++++++++++++++- 9 files changed, 531 insertions(+), 47 deletions(-) diff --git a/src/coreclr/src/jit/compiler.h b/src/coreclr/src/jit/compiler.h index 30e39b5b16ebc..0aadb65be3402 100644 --- a/src/coreclr/src/jit/compiler.h +++ b/src/coreclr/src/jit/compiler.h @@ -9161,6 +9161,38 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); #endif // TARGET_XXX +#else // not FEATURE_MULTIREG_RET + + // For this architecture there are no multireg returns + return false; + +#endif // FEATURE_MULTIREG_RET + } + + // Returns true if the method returns a value in more than one return register, + // it should replace/be merged with compMethodReturnsMultiRegRetType when #36868 is fixed. + // The difference from original `compMethodReturnsMultiRegRetType` is in ARM64 SIMD16 handling, + // this method correctly returns false for it (it is passed as HVA), when the original returns true. + bool compMethodReturnsMultiRegRegTypeAlternate() + { +#if FEATURE_MULTIREG_RET +#if defined(TARGET_X86) + // On x86 only 64-bit longs are returned in multiple registers + return varTypeIsLong(info.compRetNativeType); +#else // targets: X64-UNIX, ARM64 or ARM32 +#if defined(TARGET_ARM64) + // TYP_SIMD16 is returned in one register. + if (info.compRetNativeType == TYP_SIMD16) + { + return false; + } +#endif + // On all other targets that support multireg return values: + // Methods returning a struct in multiple registers have a return value of TYP_STRUCT. + // Such method's compRetNativeType is TYP_STRUCT without a hidden RetBufArg + return varTypeIsStruct(info.compRetNativeType) && (info.compRetBuffArg == BAD_VAR_NUM); +#endif // TARGET_XXX + #else // not FEATURE_MULTIREG_RET // For this architecture there are no multireg returns diff --git a/src/coreclr/src/jit/gentree.cpp b/src/coreclr/src/jit/gentree.cpp index 6a150ea2ebaf9..2b7567f6d53ba 100644 --- a/src/coreclr/src/jit/gentree.cpp +++ b/src/coreclr/src/jit/gentree.cpp @@ -15149,6 +15149,11 @@ GenTree* Compiler::gtNewTempAssign( assert(tmp == genReturnLocal); ok = true; } + else if (varTypeIsSIMD(dstTyp) && (valTyp == TYP_STRUCT)) + { + assert(val->IsCall()); + ok = true; + } if (!ok) { diff --git a/src/coreclr/src/jit/gentree.h b/src/coreclr/src/jit/gentree.h index fd196228074de..04617f5641bab 100644 --- a/src/coreclr/src/jit/gentree.h +++ b/src/coreclr/src/jit/gentree.h @@ -4257,13 +4257,7 @@ struct GenTreeCall final : public GenTree { return true; } -#elif defined(FEATURE_HFA) && defined(TARGET_ARM64) - // SIMD types are returned in vector regs on ARM64. - if (varTypeIsSIMD(gtType)) - { - return false; - } -#endif // FEATURE_HFA && TARGET_ARM64 +#endif if (!varTypeIsStruct(gtType) || HasRetBufArg()) { diff --git a/src/coreclr/src/jit/importer.cpp b/src/coreclr/src/jit/importer.cpp index 5ffa1b77ae13e..3433577b66ee3 100644 --- a/src/coreclr/src/jit/importer.cpp +++ b/src/coreclr/src/jit/importer.cpp @@ -804,6 +804,7 @@ void Compiler::impAssignTempGen(unsigned tmpNum, assert(tiVerificationNeeded || varType == TYP_UNDEF || varTypeIsStruct(varType)); lvaSetStruct(tmpNum, structType, false); + varType = lvaTable[tmpNum].lvType; // Now, set the type of the struct value. Note that lvaSetStruct may modify the type // of the lclVar to a specialized type (e.g. TYP_SIMD), based on the handle (structType) // that has been passed in for the value being assigned to the temp, in which case we @@ -812,9 +813,12 @@ void Compiler::impAssignTempGen(unsigned tmpNum, // type, this would not be necessary - but that requires additional JIT/EE interface // calls that may not actually be required - e.g. if we only access a field of a struct. - val->gtType = lvaTable[tmpNum].lvType; + if (compDoOldStructRetyping()) + { + val->gtType = varType; + } - GenTree* dst = gtNewLclvNode(tmpNum, val->gtType); + GenTree* dst = gtNewLclvNode(tmpNum, varType); asg = impAssignStruct(dst, val, structType, curLevel, pAfterStmt, ilOffset, block); } else @@ -1224,7 +1228,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, lcl->gtFlags |= GTF_DONT_CSE; varDsc->lvIsMultiRegRet = true; } - else if (lcl->gtType != src->gtType) + else if ((lcl->gtType != src->gtType) && compDoOldStructRetyping()) { // We change this to a GT_LCL_FLD (from a GT_ADDR of a GT_LCL_VAR) lcl->ChangeOper(GT_LCL_FLD); @@ -1434,7 +1438,7 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr, dest = gtNewOperNode(GT_IND, asgType, destAddr); } } - else + else if (compDoOldStructRetyping()) { dest->gtType = asgType; } @@ -8011,7 +8015,7 @@ var_types Compiler::impImportCall(OPCODE opcode, CORINFO_CLASS_HANDLE actualMethodRetTypeSigClass; actualMethodRetTypeSigClass = sig->retTypeSigClass; - if (varTypeIsStruct(callRetTyp)) + if (varTypeIsStruct(callRetTyp) && compDoOldStructRetyping()) { callRetTyp = impNormStructType(actualMethodRetTypeSigClass); call->gtType = callRetTyp; @@ -16656,7 +16660,14 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode) impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(), (unsigned)CHECK_SPILL_ALL); - GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet()); + var_types lclRetType = op2->TypeGet(); + if (!compDoOldStructRetyping()) + { + LclVarDsc* varDsc = lvaGetDesc(lvaInlineeReturnSpillTemp); + lclRetType = varDsc->lvType; + } + + GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, lclRetType); if (compDoOldStructRetyping()) { diff --git a/src/coreclr/src/jit/jitconfigvalues.h b/src/coreclr/src/jit/jitconfigvalues.h index d531362543ebe..b1ecf953479bd 100644 --- a/src/coreclr/src/jit/jitconfigvalues.h +++ b/src/coreclr/src/jit/jitconfigvalues.h @@ -437,13 +437,8 @@ CONFIG_INTEGER(JitSaveFpLrWithCalleeSavedRegisters, W("JitSaveFpLrWithCalleeSave #endif // defined(TARGET_ARM64) #endif // DEBUG -#if defined(TARGET_ARMARCH) CONFIG_INTEGER(JitDoOldStructRetyping, W("JitDoOldStructRetyping"), 1) // Allow Jit to retype structs as primitive types // when possible. -#else -CONFIG_INTEGER(JitDoOldStructRetyping, W("JitDoOldStructRetyping"), 1) // Allow Jit to retype structs as primitive types - // when possible. -#endif #undef CONFIG_INTEGER #undef CONFIG_STRING diff --git a/src/coreclr/src/jit/lclvars.cpp b/src/coreclr/src/jit/lclvars.cpp index 27732186d13e8..1c9ea8a1053d5 100644 --- a/src/coreclr/src/jit/lclvars.cpp +++ b/src/coreclr/src/jit/lclvars.cpp @@ -1630,6 +1630,7 @@ void Compiler::StructPromotionHelper::CheckRetypedAsScalar(CORINFO_FIELD_HANDLE // bool Compiler::StructPromotionHelper::CanPromoteStructType(CORINFO_CLASS_HANDLE typeHnd) { + assert(typeHnd != nullptr); if (!compiler->eeIsValueClass(typeHnd)) { // TODO-ObjectStackAllocation: Enable promotion of fields of stack-allocated objects. @@ -1865,6 +1866,7 @@ bool Compiler::StructPromotionHelper::CanPromoteStructVar(unsigned lclNum) } CORINFO_CLASS_HANDLE typeHnd = varDsc->lvVerTypeInfo.GetClassHandle(); + assert(typeHnd != nullptr); return CanPromoteStructType(typeHnd); } diff --git a/src/coreclr/src/jit/lower.cpp b/src/coreclr/src/jit/lower.cpp index 07d15696de9b1..902b8bb28b376 100644 --- a/src/coreclr/src/jit/lower.cpp +++ b/src/coreclr/src/jit/lower.cpp @@ -2975,9 +2975,8 @@ void Lowering::LowerRet(GenTreeUnOp* ret) } } #endif // DEBUG - if (varTypeIsStruct(ret) && !comp->compMethodReturnsMultiRegRetType()) + if (varTypeIsStruct(ret)) { - assert(!comp->compDoOldStructRetyping()); LowerRetStruct(ret); } } @@ -3103,7 +3102,37 @@ void Lowering::LowerStoreLocCommon(GenTreeLclVarCommon* lclStore) // void Lowering::LowerRetStruct(GenTreeUnOp* ret) { - assert(!comp->compMethodReturnsMultiRegRetType()); +#if defined(FEATURE_HFA) && defined(TARGET_ARM64) + if (ret->TypeIs(TYP_SIMD16)) + { + if (comp->info.compRetNativeType == TYP_STRUCT) + { + assert(ret->gtGetOp1()->TypeIs(TYP_SIMD16)); + assert(comp->compMethodReturnsMultiRegRegTypeAlternate()); + if (!comp->compDoOldStructRetyping()) + { + ret->ChangeType(comp->info.compRetNativeType); + } + else + { + // With old struct retyping a value that is returned as HFA + // could have both SIMD16 or STRUCT types, keep it as it. + return; + } + } + else + { + assert(comp->info.compRetNativeType == TYP_SIMD16); + return; + } + } +#endif + + if (comp->compMethodReturnsMultiRegRegTypeAlternate()) + { + return; + } + assert(!comp->compDoOldStructRetyping()); assert(ret->OperIs(GT_RETURN)); assert(varTypeIsStruct(ret)); @@ -3185,12 +3214,13 @@ void Lowering::LowerRetStruct(GenTreeUnOp* ret) // node - The return node to lower. // // Notes: +// - the function is only for LclVars that are returned in one register; // - if LclVar is allocated in memory then read it as return type; // - if LclVar can be enregistered read it as register type and add a bitcast if necessary; // void Lowering::LowerRetStructLclVar(GenTreeUnOp* ret) { - assert(!comp->compMethodReturnsMultiRegRetType()); + assert(!comp->compMethodReturnsMultiRegRegTypeAlternate()); assert(!comp->compDoOldStructRetyping()); assert(ret->OperIs(GT_RETURN)); GenTreeLclVarCommon* lclVar = ret->gtGetOp1()->AsLclVar(); @@ -3271,18 +3301,31 @@ void Lowering::LowerCallStruct(GenTreeCall* call) return; } -#ifdef TARGET_ARMARCH - // !compDoOldStructRetyping is not supported on arm yet, - // because of HFA. - assert(comp->compDoOldStructRetyping()); - return; -#else // !TARGET_ARMARCH +#if defined(FEATURE_HFA) + if (comp->IsHfa(call)) + { +#if defined(TARGET_ARM64) + assert(comp->GetHfaCount(call) == 1); +#elif defined(TARGET_ARM) + // ARM returns double in 2 float registers, but + // `call->HasMultiRegRetVal()` count double registers. + assert(comp->GetHfaCount(call) <= 2); +#elif // !TARGET_ARM64 && !TARGET_ARM + unreached(); +#endif // !TARGET_ARM64 && !TARGET_ARM + var_types hfaType = comp->GetHfaType(call); + if (call->TypeIs(hfaType)) + { + return; + } + } +#endif // FEATURE_HFA assert(!comp->compDoOldStructRetyping()); CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd; Compiler::structPassingKind howToReturnStruct; var_types returnType = comp->getReturnTypeForStruct(retClsHnd, &howToReturnStruct); - assert(!varTypeIsStruct(returnType) && returnType != TYP_UNKNOWN); + assert(returnType != TYP_STRUCT && returnType != TYP_UNKNOWN); var_types origType = call->TypeGet(); call->gtType = genActualType(returnType); @@ -3297,12 +3340,18 @@ void Lowering::LowerCallStruct(GenTreeCall* call) case GT_STORE_BLK: case GT_STORE_OBJ: // Leave as is, the user will handle it. - assert(user->TypeIs(origType)); + assert(user->TypeIs(origType) || varTypeIsSIMD(user->TypeGet())); + break; + +#ifdef FEATURE_SIMD + case GT_STORE_LCL_FLD: + assert(varTypeIsSIMD(user) && (returnType == user->TypeGet())); break; +#endif // FEATURE_SIMD case GT_STOREIND: #ifdef FEATURE_SIMD - if (user->TypeIs(TYP_SIMD8)) + if (varTypeIsSIMD(user)) { user->ChangeType(returnType); break; @@ -3319,7 +3368,6 @@ void Lowering::LowerCallStruct(GenTreeCall* call) unreached(); } } -#endif // !TARGET_ARMARCH } //---------------------------------------------------------------------------------------------- @@ -3338,9 +3386,8 @@ void Lowering::LowerStoreCallStruct(GenTreeBlk* store) assert(store->Data()->IsCall()); GenTreeCall* call = store->Data()->AsCall(); - const ClassLayout* layout = store->GetLayout(); - assert(layout->GetSlotCount() == 1); - const var_types regType = layout->GetRegisterType(); + const ClassLayout* layout = store->GetLayout(); + const var_types regType = layout->GetRegisterType(); unsigned storeSize = store->GetLayout()->GetSize(); if (regType != TYP_UNDEF) diff --git a/src/coreclr/src/jit/morph.cpp b/src/coreclr/src/jit/morph.cpp index 4fcaa51aa6173..68ac3f79bc18a 100644 --- a/src/coreclr/src/jit/morph.cpp +++ b/src/coreclr/src/jit/morph.cpp @@ -10191,8 +10191,19 @@ GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigne } else if (effectiveVal->TypeGet() != asgType) { - GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); - effectiveVal = gtNewIndir(asgType, addr); + if (effectiveVal->IsCall()) + { +#ifdef DEBUG + GenTreeCall* call = effectiveVal->AsCall(); + assert(call->TypeGet() == TYP_STRUCT); + assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd)); +#endif + } + else + { + GenTree* addr = gtNewOperNode(GT_ADDR, TYP_BYREF, effectiveVal); + effectiveVal = gtNewIndir(asgType, addr); + } } } else @@ -11941,7 +11952,7 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac) return tree; } - if (tree->TypeIs(TYP_STRUCT) && op1->OperIs(GT_OBJ, GT_BLK)) + if (varTypeIsStruct(tree) && op1->OperIs(GT_OBJ, GT_BLK)) { assert(!compDoOldStructRetyping()); GenTree* addr = op1->AsBlk()->Addr(); diff --git a/src/coreclr/tests/src/JIT/Directed/StructABI/structreturn.cs b/src/coreclr/tests/src/JIT/Directed/StructABI/structreturn.cs index 226818941c806..da4738a021a23 100644 --- a/src/coreclr/tests/src/JIT/Directed/StructABI/structreturn.cs +++ b/src/coreclr/tests/src/JIT/Directed/StructABI/structreturn.cs @@ -9,6 +9,7 @@ using System.Diagnostics; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; +using System.Runtime.Intrinsics; #region Test struct return optimizations. class TestStructReturns @@ -927,7 +928,7 @@ public static void Test() } #endregion -class TestHFA +class TestHFAandHVA { [MethodImpl(MethodImplOptions.NoInlining)] static float ReturnFloat() @@ -965,6 +966,17 @@ static Vector4 ReturnVector4UsingCall() return ReturnVector4(); } + [MethodImpl(MethodImplOptions.NoInlining)] + static void TestReturnPrimitives() + { + ReturnFloat(); + ReturnDouble(); + ReturnVector2(); + ReturnVector3(); + ReturnVector4(); + ReturnVector4UsingCall(); + } + struct FloatWrapper { public float f; @@ -1110,14 +1122,8 @@ static Vector2x2Wrapper ReturnVector2x2Wrapper() } [MethodImpl(MethodImplOptions.NoInlining)] - public static void Test() + static void TestReturnPrimitivesInWrappers() { - ReturnFloat(); - ReturnDouble(); - ReturnVector2(); - ReturnVector3(); - ReturnVector4(); - ReturnVector4UsingCall(); ReturnFloatWrapper(); ReturnDoubleWrapper(); ReturnFloats2Wrapper(); @@ -1131,6 +1137,387 @@ public static void Test() ReturnVector4Wrapper(); ReturnVector2x2Wrapper(); } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector ReturnVectorInt() + { + return new Vector(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector ReturnVectorIntUsingCall() + { + var v = ReturnVectorInt(); + return v; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector ReturnVectorFloat() + { + return new Vector(); + } + + struct A + { + bool a; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector ReturnVectorA() + { + return new Vector(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector ReturnVectorFloat2() + { + return (Vector)ReturnVectorA(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector ReturnVectorT() where T : struct + { + return new Vector(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector ReturnVectorTWithMerge(int v, T init1, T init2, T init3, T init4) where T : struct + { + // issue https://github.com/dotnet/runtime/issues/37341 + // if (v == 0) + // { + // return new Vector(); + // } + // else if (v == 1) + // { + // return new Vector(init1); + // } + // else if (v == 2) + // { + // return new Vector(init2); + // } + // else if (v == 3) + // { + // return new Vector(init3); + // } + // else + // { + // return new Vector(init4); + // } + return new Vector(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector ReturnVectorT2(T init) where T : struct + { + var a = new Vector(); + var b = new Vector(init); + var c = new Vector(init); + var d = a + b + c; + return d; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector ReturnVectorInt2(Vector left, Vector right) where T : struct + { + Vector cond = (Vector)Vector.LessThan(left, right); + return cond; + } + + struct VectorShortWrapper + { + Vector f; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static VectorShortWrapper ReturnVectorShortWrapper() + { + return new VectorShortWrapper(); + } + + struct VectorLongWrapper + { + Vector f; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static VectorLongWrapper ReturnVectorLongWrapper() + { + return new VectorLongWrapper(); + } + + struct VectorDoubleWrapper + { + Vector f; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static VectorDoubleWrapper ReturnVectorDoubleWrapper() + { + return new VectorDoubleWrapper(); + } + + struct VectorTWrapper where T : struct + { + Vector f; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static VectorTWrapper ReturnVectorTWrapper() where T : struct + { + return new VectorTWrapper(); + } + + struct VectorTWrapperWrapper where T : struct + { + VectorTWrapper f; + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static VectorTWrapperWrapper ReturnVectorTWrapperWrapper() where T : struct + { + return new VectorTWrapperWrapper(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + private static void TestReturnViaThrowing() where T : struct + { + Vector vector = Vector.One; + try + { + T value = vector[Vector.Count]; + System.Diagnostics.Debug.Assert(false); + } + catch (IndexOutOfRangeException) + { + return; + } + System.Diagnostics.Debug.Assert(false); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + private static void TestThrowing() + { + TestReturnViaThrowing(); + TestReturnViaThrowing(); + TestReturnViaThrowing(); + TestReturnViaThrowing(); + TestReturnViaThrowing(); + TestReturnViaThrowing(); + TestReturnViaThrowing(); + TestReturnViaThrowing(); + TestReturnViaThrowing(); + TestReturnViaThrowing(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static void TestReturnVectorT() + { + ReturnVectorInt(); + ReturnVectorIntUsingCall(); + ReturnVectorFloat(); + ReturnVectorT(); + ReturnVectorT(); + ReturnVectorT(); + ReturnVectorT(); + ReturnVectorT(); + ReturnVectorT(); + ReturnVectorT(); + ReturnVectorT>(); + ReturnVectorT2(1); + try + { + var a = ReturnVectorT2(new Vector4(1)); + // Delete WriteLine when https://github.com/dotnet/runtime/issues/37506 is fixed. + Console.WriteLine(a.ToString()); + Debug.Assert(false, "unreachable"); + } + catch (System.NotSupportedException) + { + } + try + { + var a = ReturnVectorT2>(new VectorTWrapperWrapper()); + // Delete WriteLine when https://github.com/dotnet/runtime/issues/37506 is fixed. + Console.WriteLine(a.ToString()); + Debug.Assert(false, "unreachable"); + } + catch (System.NotSupportedException) + { + } + ReturnVectorInt2(new Vector(1), new Vector(2)); + ReturnVectorInt2(new Vector(1), new Vector(2)); + + ReturnVectorTWithMerge(0, 0, 0, 0, 0); + ReturnVectorTWithMerge(1, 0.0, 0.0, 0.0, 0.0); + ReturnVectorTWithMerge(2, 0, 0, 0, 0); + ReturnVectorTWithMerge(3, 0, 0, 0, 0); + ReturnVectorTWithMerge>(3, new Vector(0), new Vector(0), new Vector(0), new Vector(0)); + + ReturnVectorShortWrapper(); + ReturnVectorLongWrapper(); + ReturnVectorDoubleWrapper(); + ReturnVectorTWrapper(); + ReturnVectorTWrapper(); + ReturnVectorTWrapperWrapper(); + ReturnVectorTWrapperWrapper(); + ReturnVectorTWrapperWrapper(); + ReturnVectorTWrapperWrapper(); + + TestThrowing(); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector64 ReturnVector64Int() + { + return System.Runtime.Intrinsics.Vector64.Create(1); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector64 ReturnVector64Double() + { + return System.Runtime.Intrinsics.Vector64.Create(1.0); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector64 ReturnVector64IntWithMerge(int v) + { + switch (v) + { + case 0: + return System.Runtime.Intrinsics.Vector64.Create(0); + case 1: + return System.Runtime.Intrinsics.Vector64.Create(1); + case 2: + return System.Runtime.Intrinsics.Vector64.Create(2); + case 3: + return System.Runtime.Intrinsics.Vector64.Create(3); + case 4: + return System.Runtime.Intrinsics.Vector64.Create(4); + case 5: + return System.Runtime.Intrinsics.Vector64.Create(5); + } + return System.Runtime.Intrinsics.Vector64.Create(6); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static void TestReturnVector64(int v) + { + var a = ReturnVector64Int(); + var b = ReturnVector64Double(); + var c = ReturnVector64IntWithMerge(8); + if (v == 0) + { + Console.WriteLine(a); + Console.WriteLine(b); + Console.WriteLine(c); + } + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector128 ReturnVector128Int() + { + return System.Runtime.Intrinsics.Vector128.Create(1); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector128 ReturnVector128Double() + { + return System.Runtime.Intrinsics.Vector128.Create(1.0); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector128 ReturnVector128IntWithMerge(int v) + { + switch (v) + { + case 0: + return System.Runtime.Intrinsics.Vector128.Create(0); + case 1: + return System.Runtime.Intrinsics.Vector128.Create(1); + case 2: + return System.Runtime.Intrinsics.Vector128.Create(2); + case 3: + return System.Runtime.Intrinsics.Vector128.Create(3); + case 4: + return System.Runtime.Intrinsics.Vector128.Create(4); + case 5: + return System.Runtime.Intrinsics.Vector128.Create(5); + } + return System.Runtime.Intrinsics.Vector128.Create(6); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static void TestReturnVector128(int v) + { + var a = ReturnVector128Int(); + var b = ReturnVector128Double(); + var c = ReturnVector128IntWithMerge(8); + if (v == 0) + { + Console.WriteLine(a); + Console.WriteLine(b); + Console.WriteLine(c); + } + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector256 ReturnVector256Int() + { + return System.Runtime.Intrinsics.Vector256.Create(1); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector256 ReturnVector256Double() + { + return System.Runtime.Intrinsics.Vector256.Create(1.0); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static Vector256 ReturnVector256IntWithMerge(int v) + { + switch (v) + { + case 0: + return System.Runtime.Intrinsics.Vector256.Create(0); + case 1: + return System.Runtime.Intrinsics.Vector256.Create(1); + case 2: + return System.Runtime.Intrinsics.Vector256.Create(2); + case 3: + return System.Runtime.Intrinsics.Vector256.Create(3); + case 4: + return System.Runtime.Intrinsics.Vector256.Create(4); + case 5: + return System.Runtime.Intrinsics.Vector256.Create(5); + } + return System.Runtime.Intrinsics.Vector256.Create(6); + } + + [MethodImpl(MethodImplOptions.NoInlining)] + static void TestReturnVector256(int v) + { + var a = ReturnVector256Int(); + var b = ReturnVector256Double(); + var c = ReturnVector256IntWithMerge(8); + if (v == 0) + { + Console.WriteLine(a); + Console.WriteLine(b); + Console.WriteLine(c); + } + } + + [MethodImpl(MethodImplOptions.NoInlining)] + public static void Test() + { + TestReturnPrimitives(); + TestReturnPrimitivesInWrappers(); + TestReturnVectorT(); + TestReturnVector64(1); + TestReturnVector128(1); + TestReturnVector256(1); + } } class TestNon2PowerStructs @@ -1334,7 +1721,7 @@ public static int Main() TestStructReturns.Test(); TestUnsafeCasts.Test(); TestMergeReturnBlocks.Test(); - TestHFA.Test(); + TestHFAandHVA.Test(); TestNon2PowerStructs.Test(); return 100; }