diff --git a/src/coreclr/src/jit/assertionprop.cpp b/src/coreclr/src/jit/assertionprop.cpp
index 69b8f73b07f87..031d4867d87cf 100644
--- a/src/coreclr/src/jit/assertionprop.cpp
+++ b/src/coreclr/src/jit/assertionprop.cpp
@@ -2679,6 +2679,11 @@ GenTree* Compiler::optConstantAssertionProp(AssertionDsc* curAssertion,
newTree->ChangeOperConst(GT_CNS_INT);
newTree->AsIntCon()->gtIconVal = curAssertion->op2.u1.iconVal;
newTree->ClearIconHandleMask();
+ if (newTree->TypeIs(TYP_STRUCT))
+ {
+ // LCL_VAR can be init with a GT_CNS_INT, keep its type INT, not STRUCT.
+ newTree->ChangeType(TYP_INT);
+ }
}
// If we're doing an array index address, assume any constant propagated contributes to the index.
if (isArrIndex)
diff --git a/src/coreclr/src/jit/compiler.h b/src/coreclr/src/jit/compiler.h
index 61939a9bca3fe..8f9b8e7535a89 100644
--- a/src/coreclr/src/jit/compiler.h
+++ b/src/coreclr/src/jit/compiler.h
@@ -2684,7 +2684,7 @@ class Compiler
fgArgTabEntry* gtArgEntryByLateArgIndex(GenTreeCall* call, unsigned lateArgInx);
static GenTree* gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx);
- GenTree* gtNewAssignNode(GenTree* dst, GenTree* src);
+ GenTreeOp* gtNewAssignNode(GenTree* dst, GenTree* src);
GenTree* gtNewTempAssign(unsigned tmp,
GenTree* val,
@@ -9148,6 +9148,11 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#endif // !TARGET_AMD64
}
+ bool compDoOldStructRetyping()
+ {
+ return JitConfig.JitDoOldStructRetyping();
+ }
+
// Returns true if the method returns a value in more than one return register
// TODO-ARM-Bug: Deal with multi-register genReturnLocaled structs?
// TODO-ARM64: Does this apply for ARM64 too?
diff --git a/src/coreclr/src/jit/compiler.hpp b/src/coreclr/src/jit/compiler.hpp
index 992c7f6c3d120..993c287aea54b 100644
--- a/src/coreclr/src/jit/compiler.hpp
+++ b/src/coreclr/src/jit/compiler.hpp
@@ -702,6 +702,7 @@ inline bool Compiler::VarTypeIsMultiByteAndCanEnreg(
if (varTypeIsStruct(type))
{
+ assert(typeClass != nullptr);
size = info.compCompHnd->getClassSize(typeClass);
if (forReturn)
{
@@ -1807,8 +1808,11 @@ inline void LclVarDsc::incRefCnts(BasicBlock::weight_t weight, Compiler* comp, R
//
// Increment counts on the local itself.
//
- if (lvType != TYP_STRUCT || promotionType != Compiler::PROMOTION_TYPE_INDEPENDENT)
+ if ((lvType != TYP_STRUCT) || (promotionType != Compiler::PROMOTION_TYPE_INDEPENDENT))
{
+ // We increment ref counts of this local for primitive types, including structs that have been retyped as their
+ // only field, as well as for structs whose fields are not independently promoted.
+
//
// Increment lvRefCnt
//
diff --git a/src/coreclr/src/jit/flowgraph.cpp b/src/coreclr/src/jit/flowgraph.cpp
index 689fa7f0606e8..854142856c0f1 100644
--- a/src/coreclr/src/jit/flowgraph.cpp
+++ b/src/coreclr/src/jit/flowgraph.cpp
@@ -6052,7 +6052,14 @@ void Compiler::fgFindBasicBlocks()
{
// The lifetime of this var might expand multiple BBs. So it is a long lifetime compiler temp.
lvaInlineeReturnSpillTemp = lvaGrabTemp(false DEBUGARG("Inline return value spill temp"));
- lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
+ if (compDoOldStructRetyping())
+ {
+ lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetNativeType;
+ }
+ else
+ {
+ lvaTable[lvaInlineeReturnSpillTemp].lvType = info.compRetType;
+ }
// If the method returns a ref class, set the class of the spill temp
// to the method's return value. We may update this later if it turns
@@ -8677,7 +8684,18 @@ class MergedReturns
if (comp->compMethodReturnsNativeScalarType())
{
- returnLocalDsc.lvType = genActualType(comp->info.compRetNativeType);
+ if (!comp->compDoOldStructRetyping())
+ {
+ returnLocalDsc.lvType = genActualType(comp->info.compRetType);
+ if (varTypeIsStruct(returnLocalDsc.lvType))
+ {
+ comp->lvaSetStruct(returnLocalNum, comp->info.compMethodInfo->args.retTypeClass, false);
+ }
+ }
+ else
+ {
+ returnLocalDsc.lvType = genActualType(comp->info.compRetNativeType);
+ }
}
else if (comp->compMethodReturnsRetBufAddr())
{
diff --git a/src/coreclr/src/jit/gentree.cpp b/src/coreclr/src/jit/gentree.cpp
index 279402fdc4007..8e341ce7dab5a 100644
--- a/src/coreclr/src/jit/gentree.cpp
+++ b/src/coreclr/src/jit/gentree.cpp
@@ -6481,7 +6481,7 @@ GenTree* Compiler::gtArgNodeByLateArgInx(GenTreeCall* call, unsigned lateArgInx)
* Create a node that will assign 'src' to 'dst'.
*/
-GenTree* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src)
+GenTreeOp* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src)
{
/* Mark the target as being assigned */
@@ -6498,7 +6498,7 @@ GenTree* Compiler::gtNewAssignNode(GenTree* dst, GenTree* src)
/* Create the assignment node */
- GenTree* asg = gtNewOperNode(GT_ASG, dst->TypeGet(), dst, src);
+ GenTreeOp* asg = gtNewOperNode(GT_ASG, dst->TypeGet(), dst, src)->AsOp();
/* Mark the expression as containing an assignment */
@@ -15137,6 +15137,14 @@ GenTree* Compiler::gtNewTempAssign(
// and call returns. Lowering and Codegen will handle these.
ok = true;
}
+ else if ((dstTyp == TYP_STRUCT) && (valTyp == TYP_INT))
+ {
+ // It could come from `ASG(struct, 0)` that was propagated to `RETURN struct(0)`,
+ // and now it is merging to a struct again.
+ assert(!compDoOldStructRetyping());
+ assert(tmp == genReturnLocal);
+ ok = true;
+ }
if (!ok)
{
@@ -15165,15 +15173,34 @@ GenTree* Compiler::gtNewTempAssign(
// struct types. We don't have a convenient way to do that for all SIMD temps, since some
// internal trees use SIMD types that are not used by the input IL. In this case, we allow
// a null type handle and derive the necessary information about the type from its varType.
- CORINFO_CLASS_HANDLE structHnd = gtGetStructHandleIfPresent(val);
- if (varTypeIsStruct(varDsc) && ((structHnd != NO_CLASS_HANDLE) || (varTypeIsSIMD(valTyp))))
+ CORINFO_CLASS_HANDLE valStructHnd = gtGetStructHandleIfPresent(val);
+ if (varTypeIsStruct(varDsc) && (valStructHnd == NO_CLASS_HANDLE) && !varTypeIsSIMD(valTyp))
+ {
+ // There are 2 special cases:
+ // 1. we have lost classHandle from a FIELD node because the parent struct has overlapping fields,
+ // the field was transformed as IND opr GT_LCL_FLD;
+ // 2. we are propogating `ASG(struct V01, 0)` to `RETURN(struct V01)`, `CNT_INT` doesn't `structHnd`;
+ // in these cases, we can use the type of the merge return for the assignment.
+ assert(val->OperIs(GT_IND, GT_LCL_FLD, GT_CNS_INT));
+ assert(!compDoOldStructRetyping());
+ assert(tmp == genReturnLocal);
+ valStructHnd = lvaGetStruct(genReturnLocal);
+ assert(valStructHnd != NO_CLASS_HANDLE);
+ }
+
+ if ((valStructHnd != NO_CLASS_HANDLE) && val->IsConstInitVal())
+ {
+ assert(!compDoOldStructRetyping());
+ asg = gtNewAssignNode(dest, val);
+ }
+ else if (varTypeIsStruct(varDsc) && ((valStructHnd != NO_CLASS_HANDLE) || varTypeIsSIMD(valTyp)))
{
// The struct value may be be a child of a GT_COMMA.
GenTree* valx = val->gtEffectiveVal(/*commaOnly*/ true);
- if (structHnd != NO_CLASS_HANDLE)
+ if (valStructHnd != NO_CLASS_HANDLE)
{
- lvaSetStruct(tmp, structHnd, false);
+ lvaSetStruct(tmp, valStructHnd, false);
}
else
{
@@ -15181,7 +15208,7 @@ GenTree* Compiler::gtNewTempAssign(
}
dest->gtFlags |= GTF_DONT_CSE;
valx->gtFlags |= GTF_DONT_CSE;
- asg = impAssignStruct(dest, val, structHnd, (unsigned)CHECK_SPILL_NONE, pAfterStmt, ilOffset, block);
+ asg = impAssignStruct(dest, val, valStructHnd, (unsigned)CHECK_SPILL_NONE, pAfterStmt, ilOffset, block);
}
else
{
@@ -15189,7 +15216,8 @@ GenTree* Compiler::gtNewTempAssign(
// when the ABI calls for returning a struct as a primitive type.
// TODO-1stClassStructs: When we stop "lying" about the types for ABI purposes, the
// 'genReturnLocal' should be the original struct type.
- assert(!varTypeIsStruct(valTyp) || typGetObjLayout(structHnd)->GetSize() == genTypeSize(varDsc));
+ assert(!varTypeIsStruct(valTyp) || ((valStructHnd != NO_CLASS_HANDLE) &&
+ (typGetObjLayout(valStructHnd)->GetSize() == genTypeSize(varDsc))));
asg = gtNewAssignNode(dest, val);
}
@@ -17245,28 +17273,28 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
}
else
{
- GenTree* addr = tree->AsIndir()->Addr();
+ GenTree* addr = tree->AsIndir()->Addr();
+ FieldSeqNode* fieldSeq = nullptr;
if ((addr->OperGet() == GT_ADD) && addr->gtGetOp2()->OperIs(GT_CNS_INT))
{
- FieldSeqNode* fieldSeq = addr->gtGetOp2()->AsIntCon()->gtFieldSeq;
-
- if (fieldSeq != nullptr)
- {
- while (fieldSeq->m_next != nullptr)
- {
- fieldSeq = fieldSeq->m_next;
- }
- if (fieldSeq != FieldSeqStore::NotAField() && !fieldSeq->IsPseudoField())
- {
- CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->m_fieldHnd;
- CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &structHnd);
- assert(fieldCorType == CORINFO_TYPE_VALUECLASS);
- }
- }
+ fieldSeq = addr->gtGetOp2()->AsIntCon()->gtFieldSeq;
}
- else if (addr->OperGet() == GT_LCL_VAR)
+ else
+ {
+ GetZeroOffsetFieldMap()->Lookup(addr, &fieldSeq);
+ }
+ if (fieldSeq != nullptr)
{
- structHnd = gtGetStructHandleIfPresent(addr);
+ while (fieldSeq->m_next != nullptr)
+ {
+ fieldSeq = fieldSeq->m_next;
+ }
+ if (fieldSeq != FieldSeqStore::NotAField() && !fieldSeq->IsPseudoField())
+ {
+ CORINFO_FIELD_HANDLE fieldHnd = fieldSeq->m_fieldHnd;
+ CorInfoType fieldCorType = info.compCompHnd->getFieldType(fieldHnd, &structHnd);
+ assert(fieldCorType == CORINFO_TYPE_VALUECLASS);
+ }
}
}
}
@@ -17290,6 +17318,9 @@ CORINFO_CLASS_HANDLE Compiler::gtGetStructHandleIfPresent(GenTree* tree)
#endif
break;
}
+ // TODO-1stClassStructs: add a check that `structHnd != NO_CLASS_HANDLE`,
+ // nowadays it won't work because the right part of an ASG could have struct type without a handle
+ // (check `fgMorphBlockOperand(isBlkReqd`) and a few other cases.
}
return structHnd;
}
diff --git a/src/coreclr/src/jit/importer.cpp b/src/coreclr/src/jit/importer.cpp
index c5f01f3e0f9ab..41c5e2540bb17 100644
--- a/src/coreclr/src/jit/importer.cpp
+++ b/src/coreclr/src/jit/importer.cpp
@@ -1173,8 +1173,8 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
ilOffset = impCurStmtOffs;
}
- assert(src->OperIs(GT_LCL_VAR, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) ||
- (src->TypeGet() != TYP_STRUCT && (src->OperIsSimdOrHWintrinsic() || src->OperIs(GT_LCL_FLD))));
+ assert(src->OperIs(GT_LCL_VAR, GT_LCL_FLD, GT_FIELD, GT_IND, GT_OBJ, GT_CALL, GT_MKREFANY, GT_RET_EXPR, GT_COMMA) ||
+ (src->TypeGet() != TYP_STRUCT && src->OperIsSimdOrHWintrinsic()));
var_types asgType = src->TypeGet();
@@ -1199,8 +1199,11 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
var_types returnType = (var_types)src->AsCall()->gtReturnType;
- // We won't use a return buffer, so change the type of src->gtType to 'returnType'
- src->gtType = genActualType(returnType);
+ if (compDoOldStructRetyping())
+ {
+ // We're not using a return buffer, so if we're retyping we'll change the type of 'src' to 'returnTYpe'.
+ src->gtType = genActualType(returnType);
+ }
// First we try to change this to "LclVar/LclFld = call"
//
@@ -1280,13 +1283,18 @@ GenTree* Compiler::impAssignStructPtr(GenTree* destAddr,
else
{
// Case of inline method returning a struct in one or more registers.
- //
- var_types returnType = (var_types)call->gtReturnType;
-
// We won't need a return buffer
- asgType = returnType;
- src->gtType = genActualType(returnType);
- call->gtType = src->gtType;
+ if (compDoOldStructRetyping())
+ {
+ var_types returnType = (var_types)call->gtReturnType;
+ asgType = returnType;
+ src->gtType = genActualType(returnType);
+ call->gtType = src->gtType;
+ }
+ else
+ {
+ asgType = src->gtType;
+ }
if ((destAddr->gtOper != GT_ADDR) || (destAddr->AsOp()->gtOp1->gtOper != GT_LCL_VAR))
{
@@ -8957,6 +8965,10 @@ GenTree* Compiler::impFixupCallStructReturn(GenTreeCall* call, CORINFO_CLASS_HAN
}
else
{
+ if (!compDoOldStructRetyping())
+ {
+ return call;
+ }
assert(returnType != TYP_UNKNOWN);
// See if the struct size is smaller than the return
@@ -9024,6 +9036,12 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
assert(varTypeIsStruct(info.compRetType));
assert(info.compRetBuffArg == BAD_VAR_NUM);
+ if (!compDoOldStructRetyping() && (!op->IsCall() || !op->AsCall()->TreatAsHasRetBufArg(this)))
+ {
+ // Don't retype `struct` as a primitive type in `ret` instruction.
+ return op;
+ }
+
JITDUMP("\nimpFixupStructReturnType: retyping\n");
DISPTREE(op);
@@ -9189,10 +9207,20 @@ GenTree* Compiler::impFixupStructReturnType(GenTree* op, CORINFO_CLASS_HANDLE re
// No need to spill anything as we're about to return.
impAssignTempGen(tmpNum, op, info.compMethodInfo->args.retTypeClass, (unsigned)CHECK_SPILL_NONE);
- // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
- // jump directly to a GT_LCL_FLD.
- op = gtNewLclvNode(tmpNum, info.compRetNativeType);
- op->ChangeOper(GT_LCL_FLD);
+ if (compDoOldStructRetyping())
+ {
+ // Don't create both a GT_ADDR & GT_OBJ just to undo all of that; instead,
+ // jump directly to a GT_LCL_FLD.
+ op = gtNewLclvNode(tmpNum, info.compRetNativeType);
+ op->ChangeOper(GT_LCL_FLD);
+ }
+ else
+ {
+ op = gtNewLclvNode(tmpNum, info.compRetType);
+ JITDUMP("\nimpFixupStructReturnType: created a pseudo-return buffer for a special helper\n");
+ DISPTREE(op);
+ return op;
+ }
}
else
{
@@ -15085,10 +15113,16 @@ void Compiler::impImportBlockCode(BasicBlock* block)
op1 = gtNewHelperCallNode(CORINFO_HELP_TYPEHANDLE_TO_RUNTIMETYPEHANDLE_MAYBENULL, TYP_STRUCT,
helperArgs);
+ CORINFO_CLASS_HANDLE classHandle = impGetTypeHandleClass();
+
// The handle struct is returned in register
op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType();
+ if (!compDoOldStructRetyping())
+ {
+ op1->AsCall()->gtRetClsHnd = classHandle;
+ }
- tiRetVal = typeInfo(TI_STRUCT, impGetTypeHandleClass());
+ tiRetVal = typeInfo(TI_STRUCT, classHandle);
}
impPushOnStack(op1, tiRetVal);
@@ -15125,8 +15159,13 @@ void Compiler::impImportBlockCode(BasicBlock* block)
op1 = gtNewHelperCallNode(helper, TYP_STRUCT, helperArgs);
- // The handle struct is returned in register
+ // The handle struct is returned in register and
+ // it could be consumed both as `TYP_STRUCT` and `TYP_REF`.
op1->AsCall()->gtReturnType = GetRuntimeHandleUnderlyingType();
+ if (!compDoOldStructRetyping())
+ {
+ op1->AsCall()->gtRetClsHnd = tokenType;
+ }
tiRetVal = verMakeTypeInfo(tokenType);
impPushOnStack(op1, tiRetVal);
@@ -15303,6 +15342,13 @@ void Compiler::impImportBlockCode(BasicBlock* block)
op1 = gtNewHelperCallNode(helper,
(var_types)((helper == CORINFO_HELP_UNBOX) ? TYP_BYREF : TYP_STRUCT),
gtNewCallArgs(op2, op1));
+ if (!compDoOldStructRetyping())
+ {
+ if (op1->gtType == TYP_STRUCT)
+ {
+ op1->AsCall()->gtRetClsHnd = resolvedToken.hClass;
+ }
+ }
}
assert(helper == CORINFO_HELP_UNBOX && op1->gtType == TYP_BYREF || // Unbox helper returns a byref.
@@ -16479,11 +16525,14 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
// node type. During morphing, the GT_CALL will get the correct, final, native return type.
bool restoreType = false;
- if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
+ if (compDoOldStructRetyping())
{
- noway_assert(op2->TypeGet() == TYP_STRUCT);
- op2->gtType = info.compRetNativeType;
- restoreType = true;
+ if ((op2->OperGet() == GT_CALL) && (info.compRetType == TYP_STRUCT))
+ {
+ noway_assert(op2->TypeGet() == TYP_STRUCT);
+ op2->gtType = info.compRetNativeType;
+ restoreType = true;
+ }
}
impAssignTempGen(lvaInlineeReturnSpillTemp, op2, se.seTypeInfo.GetClassHandle(),
@@ -16491,9 +16540,12 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
GenTree* tmpOp2 = gtNewLclvNode(lvaInlineeReturnSpillTemp, op2->TypeGet());
- if (restoreType)
+ if (compDoOldStructRetyping())
{
- op2->gtType = TYP_STRUCT; // restore it to what it was
+ if (restoreType)
+ {
+ op2->gtType = TYP_STRUCT; // restore it to what it was
+ }
}
op2 = tmpOp2;
@@ -16720,7 +16772,16 @@ bool Compiler::impReturnInstruction(int prefixFlags, OPCODE& opcode)
#endif
op2 = impFixupStructReturnType(op2, retClsHnd);
// return op2
- op1 = gtNewOperNode(GT_RETURN, genActualType(info.compRetNativeType), op2);
+ var_types returnType;
+ if (compDoOldStructRetyping())
+ {
+ returnType = info.compRetNativeType;
+ }
+ else
+ {
+ returnType = info.compRetType;
+ }
+ op1 = gtNewOperNode(GT_RETURN, genActualType(returnType), op2);
}
else
{
diff --git a/src/coreclr/src/jit/jitconfigvalues.h b/src/coreclr/src/jit/jitconfigvalues.h
index ae91156a4a299..cb86931dfaa7a 100644
--- a/src/coreclr/src/jit/jitconfigvalues.h
+++ b/src/coreclr/src/jit/jitconfigvalues.h
@@ -437,6 +437,14 @@ CONFIG_INTEGER(JitSaveFpLrWithCalleeSavedRegisters, W("JitSaveFpLrWithCalleeSave
#endif // defined(TARGET_ARM64)
#endif // DEBUG
+#if !FEATURE_MULTIREG_RET
+CONFIG_INTEGER(JitDoOldStructRetyping, W("JitDoOldStructRetyping"), 1) // Allow Jit to retype structs as primitive types
+ // when possible.
+#else // FEATURE_MULTIREG_RET
+CONFIG_INTEGER(JitDoOldStructRetyping, W("JitDoOldStructRetyping"), 1) // Allow Jit to retype structs as primitive types
+ // when possible.
+#endif // FEATURE_MULTIREG_RET
+
#undef CONFIG_INTEGER
#undef CONFIG_STRING
#undef CONFIG_METHODSET
diff --git a/src/coreclr/src/jit/lower.cpp b/src/coreclr/src/jit/lower.cpp
index 3a4a6603ab7c5..9389b21325f35 100644
--- a/src/coreclr/src/jit/lower.cpp
+++ b/src/coreclr/src/jit/lower.cpp
@@ -112,8 +112,8 @@ GenTree* Lowering::LowerNode(GenTree* node)
switch (node->gtOper)
{
case GT_IND:
- // Leave struct typed indirs alone, they only appear as the source of
- // block copy operations and LowerBlockStore will handle those.
+ // Process struct typed indirs separately, they only appear as the source of
+ // a block copy operation or a return node.
if (node->TypeGet() != TYP_STRUCT)
{
// TODO-Cleanup: We're passing isContainable = true but ContainCheckIndir rejects
@@ -328,7 +328,7 @@ GenTree* Lowering::LowerNode(GenTree* node)
}
else
#endif // FEATURE_MULTIREG_RET
- if (!src->OperIs(GT_LCL_VAR) || varDsc->GetLayout()->GetRegisterType() == TYP_UNDEF)
+ if (!src->OperIs(GT_LCL_VAR, GT_CALL) || varDsc->GetLayout()->GetRegisterType() == TYP_UNDEF)
{
GenTreeLclVar* addr = comp->gtNewLclVarAddrNode(store->GetLclNum(), TYP_BYREF);
@@ -1683,6 +1683,14 @@ void Lowering::LowerCall(GenTree* node)
LowerFastTailCall(call);
}
+#if !FEATURE_MULTIREG_RET
+ if (varTypeIsStruct(call))
+ {
+ assert(!comp->compDoOldStructRetyping());
+ LowerCallStruct(call);
+ }
+#endif // !FEATURE_MULTIREG_RET
+
ContainCheckCallOperands(call);
JITDUMP("lowering call (after):\n");
DISPTREERANGE(BlockRange(), call);
@@ -2970,14 +2978,37 @@ void Lowering::LowerRet(GenTreeUnOp* ret)
JITDUMP("============");
GenTree* op1 = ret->gtGetOp1();
- if ((ret->TypeGet() != TYP_VOID) && (ret->TypeGet() != TYP_STRUCT) &&
+ if ((ret->TypeGet() != TYP_VOID) && !varTypeIsStruct(ret) &&
(varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(ret->gtGetOp1())))
{
+ assert(comp->compDoOldStructRetyping());
GenTreeUnOp* bitcast = new (comp, GT_BITCAST) GenTreeOp(GT_BITCAST, ret->TypeGet(), ret->gtGetOp1(), nullptr);
ret->gtOp1 = bitcast;
BlockRange().InsertBefore(ret, bitcast);
ContainCheckBitCast(bitcast);
}
+#if !FEATURE_MULTIREG_RET
+ else
+ {
+#ifdef DEBUG
+ if (ret->TypeGet() != TYP_VOID)
+ {
+ GenTree* retVal = ret->gtGetOp1();
+ if (varTypeIsStruct(ret->TypeGet()) != varTypeIsStruct(retVal->TypeGet()))
+ {
+ // This could happen if we have retyped op1 as a primitive type during struct promotion,
+ // check `retypedFieldsMap` for details.
+ assert(genActualType(comp->info.compRetNativeType) == genActualType(retVal->TypeGet()));
+ }
+ }
+#endif
+ if (varTypeIsStruct(ret))
+ {
+ assert(!comp->compDoOldStructRetyping());
+ LowerRetStruct(ret);
+ }
+ }
+#endif // !FEATURE_MULTIREG_RET
// Method doing PInvokes has exactly one return block unless it has tail calls.
if (comp->compMethodRequiresPInvokeFrame() && (comp->compCurBB == comp->genReturnBB))
@@ -2987,6 +3018,174 @@ void Lowering::LowerRet(GenTreeUnOp* ret)
ContainCheckRet(ret);
}
+#if !FEATURE_MULTIREG_RET
+//----------------------------------------------------------------------------------------------
+// LowerRetStructLclVar: Lowers a struct return node.
+//
+// Arguments:
+// node - The return node to lower.
+//
+void Lowering::LowerRetStruct(GenTreeUnOp* ret)
+{
+ assert(!comp->compDoOldStructRetyping());
+ assert(ret->OperIs(GT_RETURN));
+ assert(varTypeIsStruct(ret));
+
+ GenTree* retVal = ret->gtGetOp1();
+ // Note: small types are returned as INT.
+ var_types nativeReturnType = genActualType(comp->info.compRetNativeType);
+ ret->ChangeType(nativeReturnType);
+
+ switch (retVal->OperGet())
+ {
+ case GT_CALL:
+ assert(retVal->TypeIs(nativeReturnType)); // Type should be changed during call processing.
+ break;
+
+ case GT_CNS_INT:
+ assert(retVal->TypeIs(TYP_INT));
+ break;
+
+ case GT_OBJ:
+ retVal->ChangeOper(GT_IND);
+ __fallthrough;
+ case GT_IND:
+ retVal->ChangeType(nativeReturnType);
+ break;
+
+ case GT_LCL_VAR:
+ LowerRetStructLclVar(ret);
+ break;
+
+ case GT_SIMD:
+ case GT_LCL_FLD:
+ {
+ GenTreeUnOp* bitcast = new (comp, GT_BITCAST) GenTreeOp(GT_BITCAST, ret->TypeGet(), retVal, nullptr);
+ ret->gtOp1 = bitcast;
+ BlockRange().InsertBefore(ret, bitcast);
+ ContainCheckBitCast(bitcast);
+ break;
+ }
+ default:
+ unreached();
+ }
+}
+
+//----------------------------------------------------------------------------------------------
+// LowerRetStructLclVar: Lowers a return node with a struct lclVar as a source.
+//
+// Arguments:
+// node - The return node to lower.
+//
+void Lowering::LowerRetStructLclVar(GenTreeUnOp* ret)
+{
+ assert(!comp->compDoOldStructRetyping());
+ assert(ret->OperIs(GT_RETURN));
+ GenTreeLclVar* lclVar = ret->gtGetOp1()->AsLclVar();
+ unsigned lclNum = lclVar->GetLclNum();
+ LclVarDsc* varDsc = comp->lvaGetDesc(lclNum);
+
+#ifdef DEBUG
+ if (comp->gtGetStructHandleIfPresent(lclVar) == NO_CLASS_HANDLE)
+ {
+ // a promoted struct field was retyped as its only field.
+ assert(varDsc->lvIsStructField);
+ }
+#endif
+ if (varDsc->lvPromoted && (comp->lvaGetPromotionType(lclNum) == Compiler::PROMOTION_TYPE_INDEPENDENT))
+ {
+ bool canEnregister = false;
+ if (varDsc->lvFieldCnt == 1)
+ {
+ // We have to replace it with its field.
+ assert(varDsc->lvRefCnt() == 0);
+ unsigned fieldLclNum = varDsc->lvFieldLclStart;
+ LclVarDsc* fieldDsc = comp->lvaGetDesc(fieldLclNum);
+ if (fieldDsc->lvFldOffset == 0)
+ {
+ lclVar->SetLclNum(fieldLclNum);
+ JITDUMP("Replacing an independently promoted local var with its only field for the return %u, %u\n",
+ lclNum, fieldLclNum);
+ lclVar->ChangeType(fieldDsc->lvType);
+ canEnregister = true;
+ }
+ }
+ if (!canEnregister)
+ {
+ // TODO-1stClassStructs: We can no longer promote or enregister this struct,
+ // since it is referenced as a whole.
+ comp->lvaSetVarDoNotEnregister(lclNum DEBUGARG(Compiler::DNER_VMNeedsStackAddr));
+ }
+ }
+
+ var_types regType = varDsc->GetRegisterType(lclVar);
+ assert((regType != TYP_STRUCT) && (regType != TYP_UNKNOWN));
+ // Note: regType could be a small type, in this case return will generate an extension move.
+ lclVar->ChangeType(regType);
+ if (varTypeUsesFloatReg(ret) != varTypeUsesFloatReg(lclVar))
+ {
+ GenTreeUnOp* bitcast = new (comp, GT_BITCAST) GenTreeOp(GT_BITCAST, ret->TypeGet(), lclVar, nullptr);
+ ret->gtOp1 = bitcast;
+ BlockRange().InsertBefore(ret, bitcast);
+ ContainCheckBitCast(bitcast);
+ }
+}
+
+//----------------------------------------------------------------------------------------------
+// LowerCallStruct: Lowers a call node that returns a stuct.
+//
+// Arguments:
+// call - The call node to lower.
+//
+// Note: it transforms the call's user.
+//
+void Lowering::LowerCallStruct(GenTreeCall* call)
+{
+ assert(!comp->compDoOldStructRetyping());
+ CORINFO_CLASS_HANDLE retClsHnd = call->gtRetClsHnd;
+ Compiler::structPassingKind howToReturnStruct;
+ var_types returnType = comp->getReturnTypeForStruct(retClsHnd, &howToReturnStruct);
+ assert(!varTypeIsStruct(returnType) && returnType != TYP_UNKNOWN);
+ call->gtType = genActualType(returnType); // the callee normalizes small return types.
+
+ LIR::Use callUse;
+ if (BlockRange().TryGetUse(call, &callUse))
+ {
+ GenTree* user = callUse.User();
+ switch (user->OperGet())
+ {
+ case GT_RETURN:
+ case GT_STORE_LCL_VAR:
+ // Leave as is, the user will handle it.
+ break;
+
+ case GT_STORE_BLK:
+ case GT_STORE_OBJ:
+ {
+ GenTreeBlk* storeBlk = user->AsBlk();
+#ifdef DEBUG
+ unsigned storeSize = storeBlk->GetLayout()->GetSize();
+ assert(storeSize == genTypeSize(returnType));
+#endif // DEBUG
+ // For `STORE_BLK<2>(dst, call struct<2>)` we will have call retyped as `int`,
+ // but `STORE` has to use the unwidened type.
+ user->ChangeType(returnType);
+ user->SetOper(GT_STOREIND);
+ }
+ break;
+
+ case GT_STOREIND:
+ assert(user->TypeIs(TYP_SIMD8, TYP_REF));
+ user->ChangeType(returnType);
+ break;
+
+ default:
+ unreached();
+ }
+ }
+}
+#endif // !FEATURE_MULTIREG_RET
+
GenTree* Lowering::LowerDirectCall(GenTreeCall* call)
{
noway_assert(call->gtCallType == CT_USER_FUNC || call->gtCallType == CT_HELPER);
diff --git a/src/coreclr/src/jit/lower.h b/src/coreclr/src/jit/lower.h
index 7608c08f91da5..9c79ede16869a 100644
--- a/src/coreclr/src/jit/lower.h
+++ b/src/coreclr/src/jit/lower.h
@@ -132,6 +132,11 @@ class Lowering final : public Phase
GenTreeCC* LowerNodeCC(GenTree* node, GenCondition condition);
void LowerJmpMethod(GenTree* jmp);
void LowerRet(GenTreeUnOp* ret);
+#if !FEATURE_MULTIREG_RET
+ void LowerRetStruct(GenTreeUnOp* ret);
+ void LowerRetStructLclVar(GenTreeUnOp* ret);
+ void LowerCallStruct(GenTreeCall* call);
+#endif
GenTree* LowerDelegateInvoke(GenTreeCall* call);
GenTree* LowerIndirectNonvirtCall(GenTreeCall* call);
GenTree* LowerDirectCall(GenTreeCall* call);
diff --git a/src/coreclr/src/jit/morph.cpp b/src/coreclr/src/jit/morph.cpp
index 69b3eaf80b0df..ab11d827f60d9 100644
--- a/src/coreclr/src/jit/morph.cpp
+++ b/src/coreclr/src/jit/morph.cpp
@@ -4980,6 +4980,10 @@ void Compiler::fgAddSkippedRegsInPromotedStructArg(LclVarDsc* varDsc,
//
void Compiler::fgFixupStructReturn(GenTree* callNode)
{
+ if (!compDoOldStructRetyping())
+ {
+ return;
+ }
assert(varTypeIsStruct(callNode));
GenTreeCall* call = callNode->AsCall();
@@ -6673,7 +6677,8 @@ bool Compiler::fgCanFastTailCall(GenTreeCall* callee, const char** failReason)
#ifdef DEBUG
if (callee->IsTailPrefixedCall())
{
- assert(impTailCallRetTypeCompatible(info.compRetNativeType, info.compMethodInfo->args.retTypeClass,
+ var_types retType = (compDoOldStructRetyping() ? info.compRetNativeType : info.compRetType);
+ assert(impTailCallRetTypeCompatible(retType, info.compMethodInfo->args.retTypeClass,
(var_types)callee->gtReturnType, callee->gtRetClsHnd));
}
#endif
@@ -7596,6 +7601,9 @@ GenTree* Compiler::fgMorphPotentialTailCall(GenTreeCall* call)
{
callType = origCallType;
}
+ assert((callType != TYP_UNKNOWN) && !varTypeIsStruct(callType));
+ callType = genActualType(callType);
+
GenTree* zero = gtNewZeroConNode(callType);
result = fgMorphTree(zero);
}
@@ -9313,6 +9321,12 @@ GenTree* Compiler::fgMorphOneAsgBlockOp(GenTree* tree)
return nullptr;
}
+ if (src->IsCall() || src->OperIsSIMD())
+ {
+ // Can't take ADDR from these nodes, let fgMorphCopyBlock handle it, #11413.
+ return nullptr;
+ }
+
if ((destVarDsc != nullptr) && !varTypeIsStruct(destVarDsc->TypeGet()))
{
@@ -9659,12 +9673,6 @@ GenTree* Compiler::fgMorphInitBlock(GenTree* tree)
tree->AsOp()->gtOp1 = dest;
}
tree->gtType = dest->TypeGet();
- // (Constant propagation may cause a TYP_STRUCT lclVar to be changed to GT_CNS_INT, and its
- // type will be the type of the original lclVar, in which case we will change it to TYP_INT).
- if ((src->OperGet() == GT_CNS_INT) && varTypeIsStruct(src))
- {
- src->gtType = TYP_INT;
- }
JITDUMP("\nfgMorphInitBlock:");
GenTree* oneAsgTree = fgMorphOneAsgBlockOp(tree);
@@ -9998,7 +10006,10 @@ GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE cl
// TODO: Consider using lvaGrabTemp and gtNewTempAssign instead, since we're
// not going to use "temp"
GenTree* temp = fgInsertCommaFormTemp(pTree, clsHnd);
- addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue);
+ assert(!compDoOldStructRetyping());
+ unsigned lclNum = temp->gtEffectiveVal()->AsLclVar()->GetLclNum();
+ lvaSetVarDoNotEnregister(lclNum DEBUG_ARG(DNER_VMNeedsStackAddr));
+ addr = fgMorphGetStructAddr(pTree, clsHnd, isRValue);
break;
}
}
@@ -10020,6 +10031,8 @@ GenTree* Compiler::fgMorphGetStructAddr(GenTree** pTree, CORINFO_CLASS_HANDLE cl
GenTree* Compiler::fgMorphBlkNode(GenTree* tree, bool isDest)
{
+ JITDUMP("fgMorphBlkNode for %s tree, before:\n", (isDest ? "dst" : "src"));
+ DISPTREE(tree);
GenTree* handleTree = nullptr;
GenTree* addr = nullptr;
if (tree->OperIs(GT_COMMA))
@@ -10094,6 +10107,8 @@ GenTree* Compiler::fgMorphBlkNode(GenTree* tree, bool isDest)
if (!tree->OperIsBlk())
{
+ JITDUMP("fgMorphBlkNode after:\n");
+ DISPTREE(tree);
return tree;
}
GenTreeBlk* blkNode = tree->AsBlk();
@@ -10112,24 +10127,31 @@ GenTree* Compiler::fgMorphBlkNode(GenTree* tree, bool isDest)
}
else
{
- return tree;
+ JITDUMP("fgMorphBlkNode after, DYN_BLK with zero size can't be morphed:\n");
+ DISPTREE(blkNode);
+ return blkNode;
}
}
else
{
- return tree;
+ JITDUMP("fgMorphBlkNode after, DYN_BLK with non-const size can't be morphed:\n");
+ DISPTREE(blkNode);
+ return blkNode;
}
}
- if ((blkNode->TypeGet() != TYP_STRUCT) && (blkNode->Addr()->OperGet() == GT_ADDR) &&
- (blkNode->Addr()->gtGetOp1()->OperGet() == GT_LCL_VAR))
+ GenTree* blkSrc = blkNode->Addr();
+ assert(blkSrc != nullptr);
+ if (!blkNode->TypeIs(TYP_STRUCT) && blkSrc->OperIs(GT_ADDR) && blkSrc->gtGetOp1()->OperIs(GT_LCL_VAR))
{
- GenTreeLclVarCommon* lclVarNode = blkNode->Addr()->gtGetOp1()->AsLclVarCommon();
+ GenTreeLclVarCommon* lclVarNode = blkSrc->gtGetOp1()->AsLclVarCommon();
if ((genTypeSize(blkNode) != genTypeSize(lclVarNode)) || (!isDest && !varTypeIsStruct(lclVarNode)))
{
lvaSetVarDoNotEnregister(lclVarNode->GetLclNum() DEBUG_ARG(DNER_VMNeedsStackAddr));
}
}
+ JITDUMP("fgMorphBlkNode after:\n");
+ DISPTREE(tree);
return tree;
}
@@ -10199,6 +10221,16 @@ GenTree* Compiler::fgMorphBlockOperand(GenTree* tree, var_types asgType, unsigne
{
lclNode = effectiveVal->AsLclVarCommon();
}
+ else if (effectiveVal->IsCall())
+ {
+ needsIndirection = false;
+#ifdef DEBUG
+ GenTreeCall* call = effectiveVal->AsCall();
+ assert(call->TypeGet() == TYP_STRUCT);
+ assert(blockWidth == info.compCompHnd->getClassSize(call->gtRetClsHnd));
+#endif
+ }
+
if (lclNode != nullptr)
{
LclVarDsc* varDsc = &(lvaTable[lclNode->GetLclNum()]);
@@ -10287,13 +10319,13 @@ GenTree* Compiler::fgMorphCopyBlock(GenTree* tree)
{
noway_assert(tree->OperIsCopyBlkOp());
- JITDUMP("\nfgMorphCopyBlock:");
+ JITDUMP("fgMorphCopyBlock:\n");
bool isLateArg = (tree->gtFlags & GTF_LATE_ARG) != 0;
- GenTree* asg = tree;
- GenTree* src = asg->gtGetOp2();
- GenTree* dest = asg->gtGetOp1();
+ GenTreeOp* asg = tree->AsOp();
+ GenTree* src = asg->gtGetOp2();
+ GenTree* dest = asg->gtGetOp1();
#if FEATURE_MULTIREG_RET
// If this is a multi-reg return, we will not do any morphing of this node.
@@ -10310,16 +10342,22 @@ GenTree* Compiler::fgMorphCopyBlock(GenTree* tree)
dest = fgMorphBlkNode(dest, true);
if (dest != asg->gtGetOp1())
{
- asg->AsOp()->gtOp1 = dest;
+ asg->gtOp1 = dest;
if (dest->IsLocal())
{
dest->gtFlags |= GTF_VAR_DEF;
}
}
- asg->gtType = dest->TypeGet();
- src = fgMorphBlkNode(src, false);
+#ifdef DEBUG
+ if (asg->TypeGet() != dest->TypeGet())
+ {
+ JITDUMP("changing type of dest from %-6s to %-6s\n", varTypeName(asg->TypeGet()), varTypeName(dest->TypeGet()));
+ }
+#endif
+ asg->ChangeType(dest->TypeGet());
+ src = fgMorphBlkNode(src, false);
- asg->AsOp()->gtOp2 = src;
+ asg->gtOp2 = src;
GenTree* oldTree = tree;
GenTree* oneAsgTree = fgMorphOneAsgBlockOp(tree);
@@ -11071,15 +11109,13 @@ GenTree* Compiler::fgMorphCopyBlock(GenTree* tree)
{
tree->gtDebugFlags |= GTF_DEBUG_NODE_MORPHED;
}
-
- if (verbose)
- {
- printf("\nfgMorphCopyBlock (after):\n");
- gtDispTree(tree);
- }
#endif
_Done:
+
+ JITDUMP("\nfgMorphCopyBlock (after):\n");
+ DISPTREE(tree);
+
return tree;
}
@@ -11876,6 +11912,31 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac)
return tree;
}
+ if (tree->TypeIs(TYP_STRUCT) && op1->OperIs(GT_OBJ, GT_BLK))
+ {
+ assert(!compDoOldStructRetyping());
+ GenTree* addr = op1->AsBlk()->Addr();
+ // if we return `OBJ` or `BLK` from a local var, lcl var has to have a stack address.
+ if (addr->OperIs(GT_ADDR) && addr->gtGetOp1()->OperIs(GT_LCL_VAR))
+ {
+ GenTreeLclVar* lclVar = addr->gtGetOp1()->AsLclVar();
+ assert(!gtIsActiveCSE_Candidate(addr) && !gtIsActiveCSE_Candidate(op1));
+ if (gtGetStructHandle(tree) == gtGetStructHandleIfPresent(lclVar))
+ {
+ // Fold *(&x).
+ tree->AsUnOp()->gtOp1 = op1;
+ DEBUG_DESTROY_NODE(op1);
+ DEBUG_DESTROY_NODE(addr);
+ op1 = lclVar;
+ }
+ else
+ {
+ // TODO-1stClassStructs: It is not address-taken or block operation,
+ // but the current IR doesn't allow to express that cast without stack, see #11413.
+ lvaSetVarDoNotEnregister(lclVar->GetLclNum() DEBUGARG(DNER_BlockOp));
+ }
+ }
+ }
break;
case GT_EQ:
@@ -13313,7 +13374,9 @@ GenTree* Compiler::fgMorphSmpOp(GenTree* tree, MorphAddrContext* mac)
// is a local or clsVar, even if it has been address-exposed.
if (op1->OperGet() == GT_ADDR)
{
- tree->gtFlags |= (op1->gtGetOp1()->gtFlags & GTF_GLOB_REF);
+ GenTreeUnOp* addr = op1->AsUnOp();
+ GenTree* addrOp = addr->gtGetOp1();
+ tree->gtFlags |= (addrOp->gtFlags & GTF_GLOB_REF);
}
break;
diff --git a/src/coreclr/tests/src/JIT/Directed/StructABI/StructWithOverlappingFields.cs b/src/coreclr/tests/src/JIT/Directed/StructABI/StructWithOverlappingFields.cs
new file mode 100644
index 0000000000000..d6d9d34ed477a
--- /dev/null
+++ b/src/coreclr/tests/src/JIT/Directed/StructABI/StructWithOverlappingFields.cs
@@ -0,0 +1,129 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// This test originally showed incorrect VN for different fields with the same offset.
+
+using System;
+using System.Diagnostics;
+using System.Runtime.InteropServices;
+
+namespace Opt_Error
+{
+ [StructLayout(LayoutKind.Explicit, Pack = 1)]
+ public class FourByteClass
+ {
+ [FieldOffset(0)]
+ public int val;
+ [FieldOffset(0)]
+ public uint uval;
+ [FieldOffset(0)]
+ public float fval;
+ [FieldOffset(0)]
+ public byte b0;
+ [FieldOffset(1)]
+ public byte b1;
+ [FieldOffset(2)]
+ public byte b2;
+ [FieldOffset(3)]
+ public byte b3;
+
+ public FourByteClass(int val)
+ {
+ val = val;
+ }
+ }
+
+ [StructLayout(LayoutKind.Explicit, Pack = 1)]
+ public struct FourByteStruct
+ {
+ [FieldOffset(0)]
+ public int val;
+ [FieldOffset(0)]
+ public uint uval;
+ [FieldOffset(0)]
+ public float fval;
+ [FieldOffset(0)]
+ public byte b0;
+ [FieldOffset(1)]
+ public byte b1;
+ [FieldOffset(2)]
+ public byte b2;
+ [FieldOffset(3)]
+ public byte b3;
+
+ public FourByteStruct(int val)
+ {
+ this.val = 0;
+ uval = 0;
+ fval = 0;
+ b0 = 0;
+ b1 = 0;
+ b2 = 0;
+ b3 = 0;
+ this.val = val;
+ }
+ }
+
+ class Program
+ {
+ static void TestClass(int initVal)
+ {
+ FourByteClass fb = new FourByteClass(initVal);
+ fb.fval = 0;
+ fb.b0 = 1;
+ fb.uval = 2;
+
+ int cseb0_1 = fb.b0 * 5 + 3;
+ uint cse_uval_1 = fb.uval * 2 - 5 + fb.uval;
+ int cse_val_1 = fb.val * 7 - 4 + fb.val * 7;
+
+ Console.WriteLine("First result: " + cseb0_1 + ", " + cse_uval_1 + ", " + cse_val_1 + ";");
+ Debug.Assert(cseb0_1 == 13);
+ Debug.Assert(cse_uval_1 == 1);
+ Debug.Assert(cse_val_1 == 24);
+ fb.val = 4;
+ int cseb0_2 = fb.b0 * 5 + 3;
+ uint cse_uval_2 = fb.uval * 2 - 5 + fb.uval;
+ int cse_val_2 = fb.val * 7 - 4 + fb.val * 7;
+
+ Console.WriteLine("Second result: " + cseb0_2 + ", " + cse_uval_2 + ", " + cse_val_2 + ";");
+ Debug.Assert(cseb0_2 == 23);
+ Debug.Assert(cse_uval_2 == 7);
+ Debug.Assert(cse_val_2 == 52);
+ }
+
+ static void TestStruct(int initVal)
+ {
+ FourByteStruct fb = new FourByteStruct(initVal);
+ fb.fval = 0;
+ fb.b0 = 1;
+ fb.uval = 2;
+
+ int cseb0_1 = fb.b0 * 5 + 3;
+ uint cse_uval_1 = fb.uval * 2 - 5 + fb.uval;
+ int cse_val_1 = fb.val * 7 - 4 + fb.val * 7;
+
+ Console.WriteLine("First result: " + cseb0_1 + ", " + cse_uval_1 + ", " + cse_val_1 + ";");
+ Debug.Assert(cseb0_1 == 13);
+ Debug.Assert(cse_uval_1 == 1);
+ Debug.Assert(cse_val_1 == 24);
+ fb.val = 4;
+ int cseb0_2 = fb.b0 * 5 + 3;
+ uint cse_uval_2 = fb.uval * 2 - 5 + fb.uval;
+ int cse_val_2 = fb.val * 7 - 4 + fb.val * 7;
+
+ Console.WriteLine("Second result: " + cseb0_2 + ", " + cse_uval_2 + ", " + cse_val_2 + ";");
+ Debug.Assert(cseb0_2 == 23);
+ Debug.Assert(cse_uval_2 == 7);
+ Debug.Assert(cse_val_2 == 52);
+ }
+
+ static int Main(string[] args)
+ {
+ TestClass(2);
+ TestStruct(2);
+ return 100;
+ }
+ }
+}
diff --git a/src/coreclr/tests/src/JIT/Directed/StructABI/StructWithOverlappingFields.csproj b/src/coreclr/tests/src/JIT/Directed/StructABI/StructWithOverlappingFields.csproj
new file mode 100644
index 0000000000000..40fbb16bdf90f
--- /dev/null
+++ b/src/coreclr/tests/src/JIT/Directed/StructABI/StructWithOverlappingFields.csproj
@@ -0,0 +1,13 @@
+
+
+ Exe
+ 1
+
+
+ PdbOnly
+ True
+
+
+
+
+
diff --git a/src/coreclr/tests/src/JIT/Directed/StructABI/structreturn.cs b/src/coreclr/tests/src/JIT/Directed/StructABI/structreturn.cs
new file mode 100644
index 0000000000000..e63b9b27f843f
--- /dev/null
+++ b/src/coreclr/tests/src/JIT/Directed/StructABI/structreturn.cs
@@ -0,0 +1,939 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+// Test register struct returns and local vars retyping cases.
+
+using System;
+using System.Numerics;
+using System.Diagnostics;
+using System.Runtime.CompilerServices;
+using System.Runtime.InteropServices;
+
+#region Test struct return optimizations.
+class TestStructReturns
+{
+ struct LessNativeInt
+ {
+ public bool a;
+ public bool b;
+ }
+
+ struct NativeIntOneField
+ {
+ public long a;
+ }
+
+ struct NativeIntTwoFields
+ {
+ public int a;
+ public int b;
+ }
+
+ struct NativeIntFloatField
+ {
+ public double a;
+ }
+
+ struct NativeIntMixedFields
+ {
+ public int a;
+ public float b;
+ }
+
+ static LessNativeInt TestLessNativeIntReturnBlockInit()
+ {
+ LessNativeInt a = new LessNativeInt();
+ return a;
+ }
+
+ static LessNativeInt TestLessNativeIntReturnFieldInit()
+ {
+ LessNativeInt a;
+ a.a = false;
+ a.b = true;
+ return a;
+ }
+
+
+ static NativeIntOneField TestNativeIntOneFieldReturnBlockInit()
+ {
+ NativeIntOneField a = new NativeIntOneField();
+ return a;
+ }
+
+ static NativeIntOneField TestNativeIntOneFieldReturnFieldInit()
+ {
+ NativeIntOneField a;
+ a.a = 100;
+ return a;
+ }
+
+ static NativeIntTwoFields TestNativeIntTwoFieldsReturnBlockInit()
+ {
+ NativeIntTwoFields a = new NativeIntTwoFields();
+ return a;
+ }
+
+ static NativeIntTwoFields TestNativeIntTwoFieldsReturnFieldInit()
+ {
+ NativeIntTwoFields a;
+ a.a = 100;
+ a.b = 10;
+ return a;
+ }
+
+ static NativeIntFloatField TestNativeIntFloatFieldReturnBlockInit()
+ {
+ NativeIntFloatField a = new NativeIntFloatField();
+ return a;
+ }
+
+ static NativeIntFloatField TestNativeIntFloatFieldReturnFieldInit()
+ {
+ NativeIntFloatField a;
+ a.a = 100;
+ return a;
+ }
+
+ static NativeIntMixedFields TestNativeIntMixedFieldsReturnBlockInit()
+ {
+ NativeIntMixedFields a = new NativeIntMixedFields();
+ return a;
+ }
+
+ static NativeIntMixedFields TestNativeIntMixedFieldsReturnFieldInit()
+ {
+ NativeIntMixedFields a;
+ a.a = 100;
+ a.b = 10;
+ return a;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static int TestLessNativeIntCall1()
+ {
+ int res = 0;
+ var v = TestLessNativeIntReturnBlockInit();
+ if (v.a)
+ {
+ res++;
+ }
+ if (v.b)
+ {
+ res++;
+ }
+ if (v.a && v.b)
+ {
+ res++;
+ }
+ if (!v.a && !v.b)
+ {
+ res--;
+ }
+ return res;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static int TestLessNativeIntCall2()
+ {
+ int res = 0;
+ var v = TestLessNativeIntReturnFieldInit();
+ if (v.a)
+ {
+ res++;
+ }
+ if (v.b)
+ {
+ res++;
+ }
+ if (v.a && v.b)
+ {
+ res++;
+ }
+ if (!v.a && !v.b)
+ {
+ res--;
+ }
+ return res;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static int TestNativeIntOneFieldCall1()
+ {
+ int res = 0;
+ var v = TestNativeIntOneFieldReturnBlockInit();
+ if (v.a == 0)
+ {
+ res++;
+ }
+ return res;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static int TestNativeIntOneFieldCall2()
+ {
+ int res = 0;
+ var v = TestNativeIntOneFieldReturnFieldInit();
+ if (v.a == 0)
+ {
+ res++;
+ }
+ return res;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static int TestNativeIntTwoFieldsCall1()
+ {
+ int res = 0;
+ var v = TestNativeIntTwoFieldsReturnBlockInit();
+ if (v.a == 0)
+ {
+ res++;
+ }
+ if (v.b == 0)
+ {
+ res++;
+ }
+ return res;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static int TestNativeIntTwoFieldsCall2()
+ {
+ int res = 0;
+ var v = TestNativeIntTwoFieldsReturnFieldInit();
+ if (v.a == 0)
+ {
+ res++;
+ }
+ if (v.b == 0)
+ {
+ res++;
+ }
+ return res;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static int TestNativeIntFloatFieldCall1()
+ {
+ int res = 0;
+ var v = TestNativeIntFloatFieldReturnBlockInit();
+ if (v.a >= 0)
+ {
+ res++;
+ }
+ return res;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static int TestNativeIntFloatFieldCall2()
+ {
+ int res = 0;
+ var v = TestNativeIntFloatFieldReturnFieldInit();
+ if (v.a >= 0)
+ {
+ res++;
+ }
+ return res;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static int TestNativeIntMixedFieldsCall1()
+ {
+ int res = 0;
+ var v = TestNativeIntMixedFieldsReturnBlockInit();
+ if (v.a == 0)
+ {
+ res++;
+ }
+ if (v.b == 0)
+ {
+ res++;
+ }
+ return res;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static int TestNativeIntMixedFieldsCall2()
+ {
+ int res = 0;
+ var v = TestNativeIntMixedFieldsReturnFieldInit();
+ if (v.a == 0)
+ {
+ res++;
+ }
+ if (v.b == 0)
+ {
+ res++;
+ }
+ return res;
+ }
+
+ public static void Test()
+ {
+ TestLessNativeIntCall1();
+ TestLessNativeIntCall2();
+ TestNativeIntOneFieldCall1();
+ TestNativeIntOneFieldCall2();
+ TestNativeIntTwoFieldsCall1();
+ TestNativeIntTwoFieldsCall2();
+ TestNativeIntFloatFieldCall1();
+ TestNativeIntFloatFieldCall2();
+ TestNativeIntMixedFieldsCall1();
+ TestNativeIntMixedFieldsCall2();
+ }
+}
+#endregion
+
+#region Test struct unsafe casts
+class TestUnsafeCasts
+{
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromLPrim1()
+ {
+ long[] l = new long[1] { 1 };
+ ref int r = ref Unsafe.As(ref l[0]);
+ Debug.Assert(l[0] != 2);
+ r = 2;
+ Debug.Assert(l[0] == 2);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromLPrim2()
+ {
+ long[] l = new long[1] { 1 };
+ ref float r = ref Unsafe.As(ref l[0]);
+ Debug.Assert(l[0] != 0);
+ r = 0;
+ Debug.Assert(l[0] == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromLPrim3()
+ {
+ double[] d = new double[1] { 154345345 };
+ ref float r = ref Unsafe.As(ref d[0]);
+ Debug.Assert(d[0] != 0);
+ r = 0;
+ Debug.Assert(d[0] == 154345344);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromLPrim4()
+ {
+ double[] d = new double[1] { 154345345 };
+ ref int r = ref Unsafe.As(ref d[0]);
+ Debug.Assert(d[0] != 0);
+ r = 0;
+ Debug.Assert(d[0] == 154345344);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromLPrim5()
+ {
+ long l = 0x123412341234;
+ int r = Unsafe.As(ref l);
+ Debug.Assert(r == 0x12341234);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromLPrim6()
+ {
+ long l = 0;
+ float r = Unsafe.As(ref l);
+ Debug.Assert(r == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSPrim1()
+ {
+ int[] i = new int[2] { 0x12341234, 0 };
+ ref long r = ref Unsafe.As(ref i[0]);
+ Debug.Assert(r == 0x12341234);
+ r = 0;
+ Debug.Assert(i[0] == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSPrim2()
+ {
+ int[] i = new int[2] { 1, 2 };
+ ref double r = ref Unsafe.As(ref i[0]);
+ Debug.Assert(i[0] != 0);
+ r = 0;
+ Debug.Assert(i[0] == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSPrim3()
+ {
+ float[] f = new float[2] { 1, 2 };
+ ref double r = ref Unsafe.As(ref f[0]);
+ Debug.Assert(f[0] != 0);
+ r = 0;
+ Debug.Assert(f[0] == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSPrim4()
+ {
+ float[] f = new float[2] { 1, 2 };
+ ref long r = ref Unsafe.As(ref f[0]);
+ Debug.Assert(f[0] != 0);
+ r = 0;
+ Debug.Assert(f[0] == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSPrim5()
+ {
+ int l = 0x12341234;
+ long r = Unsafe.As(ref l);
+ Debug.Assert((uint)(r % (UInt32.MaxValue + 1L)) == 0x12341234);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSPrim6()
+ {
+ int l = 5;
+ double r = Unsafe.As(ref l);
+ l = Unsafe.As(ref r);
+ Debug.Assert(l == 5);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSameSizePrim1()
+ {
+ float[] f = new float[1] { 1 };
+ ref int r = ref Unsafe.As(ref f[0]);
+ Debug.Assert(f[0] != 0);
+ r = 0;
+ Debug.Assert(f[0] == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSameSizePrim2()
+ {
+ int[] i = new int[1] { 1 };
+ ref float r = ref Unsafe.As(ref i[0]);
+ Debug.Assert(i[0] != 0);
+ r = 0;
+ Debug.Assert(i[0] == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSameSizePrim3()
+ {
+ short[] f = new short[1] { 1 };
+ ref ushort r = ref Unsafe.As(ref f[0]);
+ Debug.Assert(f[0] != 0);
+ r = 0;
+ Debug.Assert(f[0] == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSameSizePrim4()
+ {
+ int[] i = new int[1] { 1 };
+ ref uint r = ref Unsafe.As(ref i[0]);
+ Debug.Assert(i[0] != 0);
+ r = 0;
+ Debug.Assert(i[0] == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSameSizePrim5()
+ {
+ double l = 0x12341234;
+ long r = Unsafe.As(ref l);
+ Debug.Assert(r == 0x41b2341234000000);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSameSizePrim6()
+ {
+ float l = 0x12341234;
+ int r = Unsafe.As(ref l);
+ Debug.Assert(r == 0x4d91a092);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSameSizePrim7()
+ {
+ long l = 0;
+ double r = Unsafe.As(ref l);
+ Debug.Assert(r == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSameSizePrim8()
+ {
+ int l = 0;
+ float r = Unsafe.As(ref l);
+ Debug.Assert(r == 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSameSizePrim9()
+ {
+ short l = 100;
+ ushort r = Unsafe.As(ref l);
+ Debug.Assert(r == 100);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromLargePrim()
+ {
+ PrimFromLPrim1();
+ PrimFromLPrim2();
+ PrimFromLPrim3();
+ PrimFromLPrim4();
+ PrimFromLPrim5();
+ PrimFromLPrim6();
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSmallPrim()
+ {
+ PrimFromSPrim1();
+ PrimFromSPrim2();
+ PrimFromSPrim3();
+ PrimFromSPrim4();
+ PrimFromSPrim5();
+ PrimFromSPrim6();
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void PrimFromSameSizePrim()
+ {
+ PrimFromSameSizePrim1();
+ PrimFromSameSizePrim2();
+ PrimFromSameSizePrim3();
+ PrimFromSameSizePrim4();
+ PrimFromSameSizePrim5();
+ PrimFromSameSizePrim6();
+ PrimFromSameSizePrim7();
+ PrimFromSameSizePrim8();
+ PrimFromSameSizePrim9();
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ public static void TestPrimitiveCasts()
+ {
+ PrimFromLargePrim();
+ PrimFromSmallPrim();
+ PrimFromSameSizePrim();
+ }
+
+ struct smallStruct
+ {
+ public bool a;
+ public bool b;
+ }
+
+ struct nativeStruct
+ {
+ public IntPtr a;
+ }
+
+ struct largeStruct
+ {
+ public int a;
+ public long b;
+ public double c;
+ public bool d;
+ public float e;
+ }
+
+ struct eightByteStruct
+ {
+ public long a;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void PrimFromSStruct()
+ {
+ smallStruct[] s = new smallStruct[2];
+ s[0].a = true;
+ s[0].b = false;
+ int v = Unsafe.As(ref s[0]);
+ Debug.Assert(v != 0);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void PrimFromLStruct()
+ {
+ largeStruct s;
+ s.a = 1;
+ s.b = 2;
+ s.c = 3.0;
+ s.d = false;
+ s.e = 1;
+ int v = Unsafe.As(ref s);
+ Debug.Assert(v == 1);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void PrimFromStruct()
+ {
+ nativeStruct s;
+ s.a = new IntPtr(100);
+ long v = Unsafe.As(ref s);
+ s = Unsafe.As(ref v);
+ Debug.Assert(s.a.ToInt32() == 100);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void StructFromSPrim()
+ {
+ byte[] v = new byte[8] { 1, 0, 0, 0, 0, 0, 0, 0 };
+ smallStruct s = Unsafe.As(ref v[0]);
+ Debug.Assert(s.a == true && s.b == false);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void StructFromLPrim()
+ {
+ int v = 0b1;
+ smallStruct s = Unsafe.As(ref v);
+ Debug.Assert(s.a == true && s.b == false);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void StructFromPrim()
+ {
+ long v = 100;
+ nativeStruct s = Unsafe.As(ref v);
+ Debug.Assert(s.a.ToInt32() == 100);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void StructFromSStruct1()
+ {
+ smallStruct[] smallS = new smallStruct[4];
+ smallS[0].a = true;
+ smallS[0].b = false;
+ eightByteStruct largeS = Unsafe.As(ref smallS[0]);
+ Debug.Assert((uint)(largeS.a % (UInt32.MaxValue + 1L)) == 1);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void StructFromSStruct2()
+ {
+ smallStruct[] smallS = new smallStruct[8];
+ smallS[0].a = true;
+ smallS[0].b = false;
+ largeStruct largeS = Unsafe.As(ref smallS[0]);
+ Debug.Assert((uint)(largeS.a % (UInt32.MaxValue + 1L)) == 1);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void StructFromSStruct3()
+ {
+ eightByteStruct[] smallS = new eightByteStruct[2];
+ smallS[0].a = 1000;
+ largeStruct largeS = Unsafe.As(ref smallS[0]);
+ Debug.Assert(largeS.a == 1000);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void StructFromLStruct1()
+ {
+ eightByteStruct largeS;
+ largeS.a = 1;
+ smallStruct smallS = Unsafe.As(ref largeS);
+ Debug.Assert(smallS.a == true && smallS.b == false);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void StructFromLStruct2()
+ {
+ largeStruct largeS;
+ largeS.a = 1;
+ largeS.b = 2;
+ largeS.c = 3.0;
+ largeS.d = false;
+ largeS.e = 1;
+ smallStruct smallS = Unsafe.As(ref largeS);
+ Debug.Assert(smallS.a == true && smallS.b == false);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void StructFromLStruct3()
+ {
+ largeStruct largeS;
+ largeS.a = 3;
+ largeS.b = 2;
+ largeS.c = 3.0;
+ largeS.d = false;
+ largeS.e = 1;
+ eightByteStruct smallS = Unsafe.As(ref largeS);
+ Debug.Assert(smallS.a == 3);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static unsafe void StructFromStruct()
+ {
+ eightByteStruct s1;
+ s1.a = 3;
+ eightByteStruct s2 = Unsafe.As(ref s1);
+ Debug.Assert(s2.a == 3);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void TestStructCasts()
+ {
+ PrimFromSStruct();
+ PrimFromLStruct();
+ PrimFromStruct();
+
+ StructFromSPrim();
+ StructFromLPrim();
+ StructFromPrim();
+
+ StructFromSStruct1();
+ StructFromSStruct2();
+ StructFromSStruct3();
+
+ StructFromLStruct1();
+ StructFromLStruct2();
+ StructFromLStruct3();
+
+ StructFromStruct();
+ }
+
+ #region for the tests below we are expecting only one move instruction to be generated for each.
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static long ReturnAsLong(eightByteStruct a)
+ {
+ return Unsafe.As(ref a);
+ }
+
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static eightByteStruct ReturnAsEightByteStructFromLong(long a)
+ {
+ return Unsafe.As(ref a);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static double ReturnAsDouble(eightByteStruct a)
+ {
+ return Unsafe.As(ref a);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static eightByteStruct ReturnAsEightByteStructFromDouble(double a)
+ {
+ return Unsafe.As(ref a);
+ }
+
+ struct eightByteStructOverDouble
+ {
+ public double a;
+ }
+
+ static long ReturnAsLong(eightByteStructOverDouble a)
+ {
+ return Unsafe.As(ref a);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static eightByteStructOverDouble ReturnAsEightByteStructOverDoubleFromLong(long a)
+ {
+ return Unsafe.As(ref a);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static double ReturnAsDouble(eightByteStructOverDouble a)
+ {
+ return Unsafe.As(ref a);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static eightByteStructOverDouble ReturnAsEightByteStructOverDoubleFromDouble(double a)
+ {
+ return Unsafe.As(ref a);
+ }
+
+ #endregion
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void TestCastSameSize()
+ {
+ eightByteStruct e;
+ e.a = 32;
+ long l = ReturnAsLong(e);
+ Debug.Assert(l == 32);
+ e = ReturnAsEightByteStructFromLong(l);
+ Debug.Assert(e.a == 32);
+ double d = ReturnAsDouble(e);
+ e = ReturnAsEightByteStructFromDouble(d);
+ Debug.Assert(e.a == 32);
+
+ eightByteStructOverDouble ed;
+ ed = ReturnAsEightByteStructOverDoubleFromLong(l);
+ l = ReturnAsLong(ed);
+ Debug.Assert(l == 32);
+ d = ReturnAsDouble(ed);
+ ed = ReturnAsEightByteStructOverDoubleFromDouble(d);
+ l = ReturnAsLong(ed);
+ Debug.Assert(e.a == 32);
+ }
+
+ struct StructWithVectorField
+ {
+ public int a;
+ public Vector b;
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void TestSIMDInit()
+ {
+ Vector localVector = new Vector();
+ StructWithVectorField structWithVectorField;
+ structWithVectorField.a = 0;
+ structWithVectorField.b = new Vector();
+ }
+
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void TestWhatShouldBeOptimized()
+ {
+ TestCastSameSize();
+ TestSIMDInit();
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ public static void Test()
+ {
+ TestPrimitiveCasts();
+ TestStructCasts();
+ TestWhatShouldBeOptimized();
+ }
+}
+
+#endregion
+
+#region Test merge return blocks
+class TestMergeReturnBlocks
+{
+ struct ReturnStruct
+ {
+ public float a;
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ public ReturnStruct(int a)
+ {
+ this.a = a;
+ }
+ }
+
+ static ReturnStruct TestConstPropogation(int a)
+ {
+ if (a == 0)
+ {
+ ReturnStruct s = new ReturnStruct(); // ASG(a, 0);
+ return s;
+ }
+ else if (a == 1)
+ {
+ ReturnStruct s = new ReturnStruct(1);
+ return s;
+ }
+ else if (a == 2)
+ {
+ ReturnStruct s;
+ s.a = 2;
+ return s;
+ }
+ else if (a == 3)
+ {
+ ReturnStruct s = new ReturnStruct(3);
+ ReturnStruct s2 = s;
+ ReturnStruct s3 = s2;
+ return s3;
+ }
+ return new ReturnStruct(4);
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void TestConstPropogation()
+ {
+ TestConstPropogation(5);
+ }
+
+
+ [StructLayout(LayoutKind.Explicit, Pack = 1)]
+ struct StructWithOverlaps
+ {
+ [FieldOffset(0)]
+ public int val;
+ [FieldOffset(0)]
+ public ReturnStruct s;
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ public StructWithOverlaps(int v)
+ {
+ val = 0;
+ s.a = v;
+ }
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static ReturnStruct TestNoFieldSeqPropogation(int a)
+ {
+ StructWithOverlaps s = new StructWithOverlaps();
+ if (a == 0)
+ {
+ return new ReturnStruct();
+ }
+ else if (a == 1)
+ {
+ return s.s;
+ }
+ else if (a == 2)
+ {
+ StructWithOverlaps s2 = new StructWithOverlaps(2);
+ return s2.s;
+ }
+ else if (a == 3)
+ {
+ StructWithOverlaps s3 = new StructWithOverlaps(3);
+ return s3.s;
+ }
+ else
+ {
+ StructWithOverlaps s4 = new StructWithOverlaps(4);
+ return s4.s;
+ }
+
+ }
+
+ [MethodImpl(MethodImplOptions.NoInlining)]
+ static void TestNoFieldSeqPropogation()
+ {
+ TestNoFieldSeqPropogation(5);
+ }
+
+
+ public static void Test()
+ {
+ TestConstPropogation();
+ TestNoFieldSeqPropogation();
+ }
+}
+#endregion
+
+class TestStructs
+{
+ public static int Main()
+ {
+ TestStructReturns.Test();
+ TestUnsafeCasts.Test();
+ TestMergeReturnBlocks.Test();
+ return 100;
+ }
+}
diff --git a/src/coreclr/tests/src/JIT/Directed/StructABI/structreturn.csproj b/src/coreclr/tests/src/JIT/Directed/StructABI/structreturn.csproj
new file mode 100644
index 0000000000000..d9516b03e4a5d
--- /dev/null
+++ b/src/coreclr/tests/src/JIT/Directed/StructABI/structreturn.csproj
@@ -0,0 +1,14 @@
+
+
+ Exe
+ 1
+
+
+ PdbOnly
+ True
+ True
+
+
+
+
+