diff --git a/eng/intellisense.targets b/eng/intellisense.targets
index a4ffedf6d790c..fe4c60862798b 100644
--- a/eng/intellisense.targets
+++ b/eng/intellisense.targets
@@ -13,12 +13,18 @@
- $(NoWarn);1591
$([MSBuild]::NormalizeDirectory('$(NuGetPackageRoot)', 'microsoft.private.intellisense', '$(MicrosoftPrivateIntellisenseVersion)', 'IntellisenseFiles'))
$([MSBuild]::NormalizePath('$(IntellisensePackageXmlRootFolder)', 'net', '1033', '$(AssemblyName).xml'))
$([MSBuild]::NormalizePath('$(IntellisensePackageXmlRootFolder)', 'dotnet-plat-ext', '1033', '$(AssemblyName).xml'))
$(IntellisensePackageXmlFilePathFromNetFolder)
$(IntellisensePackageXmlFilePathFromDotNetPlatExtFolder)
+
+ $(NoWarn);1591
diff --git a/src/coreclr/inc/corinfo.h b/src/coreclr/inc/corinfo.h
index 04a8f2fb898d4..83d007e5a436b 100644
--- a/src/coreclr/inc/corinfo.h
+++ b/src/coreclr/inc/corinfo.h
@@ -1761,7 +1761,6 @@ enum CORINFO_OS
enum CORINFO_RUNTIME_ABI
{
- CORINFO_DESKTOP_ABI = 0x100,
CORINFO_CORECLR_ABI = 0x200,
CORINFO_NATIVEAOT_ABI = 0x300,
};
@@ -2027,7 +2026,7 @@ class ICorStaticInfo
virtual void getMethodSig (
CORINFO_METHOD_HANDLE ftn, /* IN */
CORINFO_SIG_INFO *sig, /* OUT */
- CORINFO_CLASS_HANDLE memberParent = NULL /* IN */
+ CORINFO_CLASS_HANDLE memberParent = NULL /* IN */
) = 0;
/*********************************************************************
@@ -2063,9 +2062,9 @@ class ICorStaticInfo
// inlining results when it returns INLINE_FAIL and INLINE_NEVER. All other results are reported by the
// JIT.
virtual void reportInliningDecision (CORINFO_METHOD_HANDLE inlinerHnd,
- CORINFO_METHOD_HANDLE inlineeHnd,
- CorInfoInline inlineResult,
- const char * reason) = 0;
+ CORINFO_METHOD_HANDLE inlineeHnd,
+ CorInfoInline inlineResult,
+ const char * reason) = 0;
// Returns false if the call is across security boundaries thus we cannot tailcall
@@ -2075,23 +2074,23 @@ class ICorStaticInfo
CORINFO_METHOD_HANDLE callerHnd, /* IN */
CORINFO_METHOD_HANDLE declaredCalleeHnd, /* IN */
CORINFO_METHOD_HANDLE exactCalleeHnd, /* IN */
- bool fIsTailPrefix /* IN */
+ bool fIsTailPrefix /* IN */
) = 0;
// Reports whether or not a method can be tail called, and why.
// canTailCall is responsible for reporting all results when it returns
// false. All other results are reported by the JIT.
virtual void reportTailCallDecision (CORINFO_METHOD_HANDLE callerHnd,
- CORINFO_METHOD_HANDLE calleeHnd,
- bool fIsTailPrefix,
- CorInfoTailCall tailCallResult,
- const char * reason) = 0;
+ CORINFO_METHOD_HANDLE calleeHnd,
+ bool fIsTailPrefix,
+ CorInfoTailCall tailCallResult,
+ const char * reason) = 0;
// get individual exception handler
virtual void getEHinfo(
CORINFO_METHOD_HANDLE ftn, /* IN */
- unsigned EHnumber, /* IN */
- CORINFO_EH_CLAUSE* clause /* OUT */
+ unsigned EHnumber, /* IN */
+ CORINFO_EH_CLAUSE* clause /* OUT */
) = 0;
// return class it belongs to
@@ -2099,11 +2098,6 @@ class ICorStaticInfo
CORINFO_METHOD_HANDLE method
) = 0;
- // return module it belongs to
- virtual CORINFO_MODULE_HANDLE getMethodModule (
- CORINFO_METHOD_HANDLE method
- ) = 0;
-
// This function returns the offset of the specified method in the
// vtable of it's owning class or interface.
virtual void getMethodVTableOffset (
@@ -2123,7 +2117,7 @@ class ICorStaticInfo
// Get the unboxed entry point for a method, if possible.
virtual CORINFO_METHOD_HANDLE getUnboxedEntry(
CORINFO_METHOD_HANDLE ftn,
- bool* requiresInstMethodTableArg
+ bool* requiresInstMethodTableArg
) = 0;
// Given T, return the type of the default Comparer.
@@ -2172,23 +2166,11 @@ class ICorStaticInfo
) = 0;
// Check constraints on method type arguments (only).
- // The parent class should be checked separately using satisfiesClassConstraints(parent).
virtual bool satisfiesMethodConstraints(
CORINFO_CLASS_HANDLE parent, // the exact parent of the method
CORINFO_METHOD_HANDLE method
) = 0;
- // Given a delegate target class, a target method parent class, a target method,
- // a delegate class, check if the method signature is compatible with the Invoke method of the delegate
- // (under the typical instantiation of any free type variables in the memberref signatures).
- virtual bool isCompatibleDelegate(
- CORINFO_CLASS_HANDLE objCls, /* type of the delegate target, if any */
- CORINFO_CLASS_HANDLE methodParentCls, /* exact parent of the target method, if any */
- CORINFO_METHOD_HANDLE method, /* (representative) target method, if any */
- CORINFO_CLASS_HANDLE delegateCls, /* exact type of the delegate */
- bool *pfIsOpenDelegate /* is the delegate open */
- ) = 0;
-
// load and restore the method
virtual void methodMustBeLoadedBeforeCodeIsRun(
CORINFO_METHOD_HANDLE method
@@ -2201,7 +2183,7 @@ class ICorStaticInfo
// Returns the global cookie for the /GS unsafe buffer checks
// The cookie might be a constant value (JIT), or a handle to memory location (Ngen)
virtual void getGSCookie(
- GSCookie * pCookieVal, // OUT
+ GSCookie * pCookieVal, // OUT
GSCookie ** ppCookieVal // OUT
) = 0;
@@ -2226,12 +2208,6 @@ class ICorStaticInfo
// failures during token resolution.
virtual void resolveToken(/* IN, OUT */ CORINFO_RESOLVED_TOKEN * pResolvedToken) = 0;
- // Attempt to resolve a metadata token into a runtime method handle. Returns true
- // if resolution succeeded and false otherwise (e.g. if it encounters invalid metadata
- // during token reoslution). This method should be used instead of `resolveToken` in
- // situations that need to be resilient to invalid metadata.
- virtual bool tryResolveToken(/* IN, OUT */ CORINFO_RESOLVED_TOKEN * pResolvedToken) = 0;
-
// Signature information about the call sig
virtual void findSig (
CORINFO_MODULE_HANDLE module, /* IN */
@@ -2253,18 +2229,6 @@ class ICorStaticInfo
virtual CORINFO_CLASS_HANDLE getTokenTypeAsHandle (
CORINFO_RESOLVED_TOKEN * pResolvedToken /* IN */) = 0;
- // Checks if the given metadata token is valid
- virtual bool isValidToken (
- CORINFO_MODULE_HANDLE module, /* IN */
- unsigned metaTOK /* IN */
- ) = 0;
-
- // Checks if the given metadata token is valid StringRef
- virtual bool isValidStringRef (
- CORINFO_MODULE_HANDLE module, /* IN */
- unsigned metaTOK /* IN */
- ) = 0;
-
// Returns (sub)string length and content (can be null for dynamic context)
// for given metaTOK and module, length `-1` means input is incorrect
virtual int getStringLiteral (
@@ -2380,8 +2344,8 @@ class ICorStaticInfo
virtual size_t getClassModuleIdForStatics (
CORINFO_CLASS_HANDLE cls,
- CORINFO_MODULE_HANDLE *pModule,
- void **ppIndirection
+ CORINFO_MODULE_HANDLE * pModule,
+ void ** ppIndirection
) = 0;
virtual bool getIsClassInitedFlagAddress(
@@ -2398,17 +2362,17 @@ class ICorStaticInfo
// return the number of bytes needed by an instance of the class
virtual unsigned getClassSize (
- CORINFO_CLASS_HANDLE cls
+ CORINFO_CLASS_HANDLE cls
) = 0;
// return the number of bytes needed by an instance of the class allocated on the heap
virtual unsigned getHeapClassSize(
- CORINFO_CLASS_HANDLE cls
- ) = 0;
+ CORINFO_CLASS_HANDLE cls
+ ) = 0;
virtual bool canAllocateOnStack(
- CORINFO_CLASS_HANDLE cls
- ) = 0;
+ CORINFO_CLASS_HANDLE cls
+ ) = 0;
virtual unsigned getClassAlignmentRequirement (
CORINFO_CLASS_HANDLE cls,
@@ -2435,21 +2399,21 @@ class ICorStaticInfo
) = 0;
virtual CORINFO_FIELD_HANDLE getFieldInClass(
- CORINFO_CLASS_HANDLE clsHnd,
- int32_t num
+ CORINFO_CLASS_HANDLE clsHnd,
+ int32_t num
) = 0;
virtual bool checkMethodModifier(
- CORINFO_METHOD_HANDLE hMethod,
- const char * modifier,
- bool fOptional
+ CORINFO_METHOD_HANDLE hMethod,
+ const char * modifier,
+ bool fOptional
) = 0;
// returns the "NEW" helper optimized for "newCls."
virtual CorInfoHelpFunc getNewHelper(
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_METHOD_HANDLE callerHandle,
- bool * pHasSideEffects
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_METHOD_HANDLE callerHandle,
+ bool * pHasSideEffects
) = 0;
// returns the newArr (1-Dim array) helper optimized for "arrayCls."
@@ -2459,17 +2423,17 @@ class ICorStaticInfo
// returns the optimized "IsInstanceOf" or "ChkCast" helper
virtual CorInfoHelpFunc getCastingHelper(
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- bool fThrowing
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ bool fThrowing
) = 0;
// returns helper to trigger static constructor
virtual CorInfoHelpFunc getSharedCCtorHelper(
- CORINFO_CLASS_HANDLE clsHnd
+ CORINFO_CLASS_HANDLE clsHnd
) = 0;
// Boxing nullable actually returns a boxed not a boxed Nullable.
- virtual CORINFO_CLASS_HANDLE getTypeForBox(
+ virtual CORINFO_CLASS_HANDLE getTypeForBox(
CORINFO_CLASS_HANDLE cls
) = 0;
@@ -2528,7 +2492,8 @@ class ICorStaticInfo
virtual bool getStringChar(
CORINFO_OBJECT_HANDLE strObj,
int index,
- uint16_t* value) = 0;
+ uint16_t* value
+ ) = 0;
//------------------------------------------------------------------------------
// getObjectType: obtains type handle for given object
@@ -2551,10 +2516,10 @@ class ICorStaticInfo
) = 0;
virtual void getReadyToRunDelegateCtorHelper(
- CORINFO_RESOLVED_TOKEN * pTargetMethod,
- mdToken targetConstraint,
- CORINFO_CLASS_HANDLE delegateType,
- CORINFO_LOOKUP * pLookup
+ CORINFO_RESOLVED_TOKEN * pTargetMethod,
+ mdToken targetConstraint,
+ CORINFO_CLASS_HANDLE delegateType,
+ CORINFO_LOOKUP * pLookup
) = 0;
// This function tries to initialize the class (run the class constructor).
@@ -2600,7 +2565,7 @@ class ICorStaticInfo
CORINFO_CLASS_HANDLE cls
) = 0;
- // TRUE if child is a subtype of parent
+ // `true` if child is a subtype of parent
// if parent is an interface, then does child implement / extend parent
virtual bool canCast(
CORINFO_CLASS_HANDLE child, // subtype (extends parent)
@@ -2621,12 +2586,6 @@ class ICorStaticInfo
CORINFO_CLASS_HANDLE cls2
) = 0;
- // Returns the intersection of cls1 and cls2.
- virtual CORINFO_CLASS_HANDLE mergeClasses(
- CORINFO_CLASS_HANDLE cls1,
- CORINFO_CLASS_HANDLE cls2
- ) = 0;
-
// Returns true if cls2 is known to be a more specific type
// than cls1 (a subtype or more restrictive shared type)
// for purposes of jit type tracking. This is a hint to the
@@ -2659,13 +2618,8 @@ class ICorStaticInfo
// Given an Array of Type Foo, returns Foo.
// Given BYREF Foo, returns Foo
virtual CorInfoType getChildType (
- CORINFO_CLASS_HANDLE clsHnd,
- CORINFO_CLASS_HANDLE *clsRet
- ) = 0;
-
- // Check constraints on type arguments of this class and parent classes
- virtual bool satisfiesClassConstraints(
- CORINFO_CLASS_HANDLE cls
+ CORINFO_CLASS_HANDLE clsHnd,
+ CORINFO_CLASS_HANDLE* clsRet
) = 0;
// Check if this is a single dimensional array type
@@ -2673,14 +2627,14 @@ class ICorStaticInfo
CORINFO_CLASS_HANDLE cls
) = 0;
- // Get the numbmer of dimensions in an array
+ // Get the number of dimensions in an array
virtual unsigned getArrayRank(
CORINFO_CLASS_HANDLE cls
) = 0;
// Get the index of runtime provided array method
virtual CorInfoArrayIntrinsic getArrayIntrinsicID(
- CORINFO_METHOD_HANDLE ftn
+ CORINFO_METHOD_HANDLE ftn
) = 0;
// Get static field data for an array
@@ -2691,11 +2645,11 @@ class ICorStaticInfo
// Check Visibility rules.
virtual CorInfoIsAccessAllowedResult canAccessClass(
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_METHOD_HANDLE callerHandle,
- CORINFO_HELPER_DESC *pAccessHelper /* If canAccessMethod returns something other
- than ALLOWED, then this is filled in. */
- ) = 0;
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_METHOD_HANDLE callerHandle,
+ CORINFO_HELPER_DESC * pAccessHelper /* If canAccessMethod returns something other
+ than ALLOWED, then this is filled in. */
+ ) = 0;
/**********************************************************************************/
//
@@ -2723,29 +2677,34 @@ class ICorStaticInfo
// 'memberParent' is typically only set when verifying. It should be the
// result of calling getMemberParent.
virtual CorInfoType getFieldType(
- CORINFO_FIELD_HANDLE field,
- CORINFO_CLASS_HANDLE *structType = NULL,
- CORINFO_CLASS_HANDLE memberParent = NULL /* IN */
- ) = 0;
+ CORINFO_FIELD_HANDLE field,
+ CORINFO_CLASS_HANDLE * structType = NULL,
+ CORINFO_CLASS_HANDLE memberParent = NULL /* IN */
+ ) = 0;
// return the data member's instance offset
virtual unsigned getFieldOffset(
- CORINFO_FIELD_HANDLE field
- ) = 0;
+ CORINFO_FIELD_HANDLE field
+ ) = 0;
- virtual void getFieldInfo (CORINFO_RESOLVED_TOKEN * pResolvedToken,
- CORINFO_METHOD_HANDLE callerHandle,
- CORINFO_ACCESS_FLAGS flags,
- CORINFO_FIELD_INFO *pResult
- ) = 0;
+ virtual void getFieldInfo(
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ CORINFO_METHOD_HANDLE callerHandle,
+ CORINFO_ACCESS_FLAGS flags,
+ CORINFO_FIELD_INFO * pResult
+ ) = 0;
// Returns the index against which the field's thread static block in stored in TLS.
virtual uint32_t getThreadLocalFieldInfo (
- CORINFO_FIELD_HANDLE field, bool isGCType) = 0;
+ CORINFO_FIELD_HANDLE field,
+ bool isGCType
+ ) = 0;
// Returns the thread static block information like offsets, etc. from current TLS.
virtual void getThreadLocalStaticBlocksInfo (
- CORINFO_THREAD_STATIC_BLOCKS_INFO* pInfo, bool isGCType) = 0;
+ CORINFO_THREAD_STATIC_BLOCKS_INFO* pInfo,
+ bool isGCType
+ ) = 0;
// Returns true iff "fldHnd" represents a static field.
virtual bool isFieldStatic(CORINFO_FIELD_HANDLE fldHnd) = 0;
@@ -2768,12 +2727,12 @@ class ICorStaticInfo
// be used only as a hint and the native compiler should not change its
// code generation.
virtual void getBoundaries(
- CORINFO_METHOD_HANDLE ftn, // [IN] method of interest
- unsigned int *cILOffsets, // [OUT] size of pILOffsets
- uint32_t **pILOffsets, // [OUT] IL offsets of interest
- // jit MUST free with freeArray!
- ICorDebugInfo::BoundaryTypes *implicitBoundaries // [OUT] tell jit, all boundaries of this type
- ) = 0;
+ CORINFO_METHOD_HANDLE ftn, // [IN] method of interest
+ unsigned int *cILOffsets, // [OUT] size of pILOffsets
+ uint32_t **pILOffsets, // [OUT] IL offsets of interest
+ // jit MUST free with freeArray!
+ ICorDebugInfo::BoundaryTypes *implicitBoundaries // [OUT] tell jit, all boundaries of this type
+ ) = 0;
// Report back the mapping from IL to native code,
// this map should include all boundaries that 'getBoundaries'
@@ -2783,11 +2742,11 @@ class ICorStaticInfo
// offsets form a contiguous block of memory, and that the
// OffsetMapping is sorted in order of increasing native offset.
virtual void setBoundaries(
- CORINFO_METHOD_HANDLE ftn, // [IN] method of interest
- uint32_t cMap, // [IN] size of pMap
- ICorDebugInfo::OffsetMapping *pMap // [IN] map including all points of interest.
- // jit allocated with allocateArray, EE frees
- ) = 0;
+ CORINFO_METHOD_HANDLE ftn, // [IN] method of interest
+ uint32_t cMap, // [IN] size of pMap
+ ICorDebugInfo::OffsetMapping *pMap // [IN] map including all points of interest.
+ // jit allocated with allocateArray, EE frees
+ ) = 0;
// Query the EE to find out the scope of local variables.
// normally the JIT would trash variables after last use, but
@@ -2800,16 +2759,15 @@ class ICorStaticInfo
virtual void getVars(
CORINFO_METHOD_HANDLE ftn, // [IN] method of interest
uint32_t *cVars, // [OUT] size of 'vars'
- ICorDebugInfo::ILVarInfo **vars, // [OUT] scopes of variables of interest
+ ICorDebugInfo::ILVarInfo **vars, // [OUT] scopes of variables of interest
// jit MUST free with freeArray!
- bool *extendOthers // [OUT] it TRUE, then assume the scope
+ bool *extendOthers // [OUT] if `true`, then assume the scope
// of unmentioned vars is entire method
) = 0;
// Report back to the EE the location of every variable.
// note that the JIT might split lifetimes into different
// locations etc.
-
virtual void setVars(
CORINFO_METHOD_HANDLE ftn, // [IN] method of interest
uint32_t cVars, // [IN] size of 'vars'
@@ -2833,15 +2791,15 @@ class ICorStaticInfo
// For eg, use this to allocated memory for reporting debug info,
// which will be handed to the EE by setVars() and setBoundaries()
virtual void * allocateArray(
- size_t cBytes
- ) = 0;
+ size_t cBytes
+ ) = 0;
// JitCompiler will free arrays passed by the EE using this
// For eg, The EE returns memory in getVars() and getBoundaries()
// to the JitCompiler, which the JitCompiler should release using
// freeArray()
virtual void freeArray(
- void *array
+ void * array
) = 0;
/*********************************************************************************/
@@ -2868,17 +2826,17 @@ class ICorStaticInfo
virtual CorInfoTypeWithMod getArgType (
CORINFO_SIG_INFO* sig, /* IN */
CORINFO_ARG_LIST_HANDLE args, /* IN */
- CORINFO_CLASS_HANDLE *vcTypeRet /* OUT */
+ CORINFO_CLASS_HANDLE* vcTypeRet /* OUT */
) = 0;
// Obtains a list of exact classes for a given base type. Returns 0 if the number of
// the exact classes is greater than maxExactClasses or if more types might be loaded
// in future.
virtual int getExactClasses(
- CORINFO_CLASS_HANDLE baseType, /* IN */
- int maxExactClasses, /* IN */
- CORINFO_CLASS_HANDLE* exactClsRet /* OUT */
- ) = 0;
+ CORINFO_CLASS_HANDLE baseType, /* IN */
+ int maxExactClasses, /* IN */
+ CORINFO_CLASS_HANDLE* exactClsRet /* OUT */
+ ) = 0;
// If the Arg is a CORINFO_TYPE_CLASS fetch the class handle associated with it
virtual CORINFO_CLASS_HANDLE getArgClass (
@@ -2888,7 +2846,7 @@ class ICorStaticInfo
// Returns type of HFA for valuetype
virtual CorInfoHFAElemType getHFAType (
- CORINFO_CLASS_HANDLE hClass
+ CORINFO_CLASS_HANDLE hClass
) = 0;
// Runs the given function under an error trap. This allows the JIT to make calls
@@ -2897,18 +2855,18 @@ class ICorStaticInfo
// successfully and false otherwise.
typedef void (*errorTrapFunction)(void*);
virtual bool runWithErrorTrap(
- errorTrapFunction function, // The function to run
- void* parameter // The context parameter that will be passed to the function and the handler
- ) = 0;
+ errorTrapFunction function, // The function to run
+ void* parameter // The context parameter that will be passed to the function and the handler
+ ) = 0;
// Runs the given function under an error trap. This allows the JIT to make calls
// to interface functions that may throw exceptions without needing to be aware of
// the EH ABI, exception types, etc. Returns true if the given function completed
// successfully and false otherwise. This error trap checks for SuperPMI exceptions
virtual bool runWithSPMIErrorTrap(
- errorTrapFunction function, // The function to run
- void* parameter // The context parameter that will be passed to the function and the handler
- ) = 0;
+ errorTrapFunction function, // The function to run
+ void* parameter // The context parameter that will be passed to the function and the handler
+ ) = 0;
/*****************************************************************************
* ICorStaticInfo contains EE interface methods which return values that are
@@ -2919,8 +2877,8 @@ class ICorStaticInfo
// Return details about EE internal data structures
virtual void getEEInfo(
- CORINFO_EE_INFO *pEEInfoOut
- ) = 0;
+ CORINFO_EE_INFO *pEEInfoOut
+ ) = 0;
// Returns name of the JIT timer log
virtual const char16_t *getJitTimeLogFilename() = 0;
@@ -2944,7 +2902,8 @@ class ICorStaticInfo
CORINFO_METHOD_HANDLE ftn,
char* buffer,
size_t bufferSize,
- size_t* pRequiredBufferSize = nullptr) = 0;
+ size_t* pRequiredBufferSize = nullptr
+ ) = 0;
// Return method name as in metadata, or nullptr if there is none,
// and optionally return the class, enclosing class, and namespace names
@@ -2954,7 +2913,7 @@ class ICorStaticInfo
CORINFO_METHOD_HANDLE ftn, /* IN */
const char **className, /* OUT */
const char **namespaceName, /* OUT */
- const char **enclosingClassName /* OUT */
+ const char **enclosingClassName /* OUT */
) = 0;
// this function is for debugging only. It returns a value that
@@ -2964,19 +2923,11 @@ class ICorStaticInfo
CORINFO_METHOD_HANDLE ftn /* IN */
) = 0;
- // this function is for debugging only.
- virtual size_t findNameOfToken (
- CORINFO_MODULE_HANDLE module, /* IN */
- mdToken metaTOK, /* IN */
- _Out_writes_ (FQNameCapacity) char * szFQName, /* OUT */
- size_t FQNameCapacity /* IN */
- ) = 0;
-
// returns whether the struct is enregisterable. Only valid on a System V VM. Returns true on success, false on failure.
virtual bool getSystemVAmd64PassStructInRegisterDescriptor(
- /* IN */ CORINFO_CLASS_HANDLE structHnd,
- /* OUT */ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr
- ) = 0;
+ CORINFO_CLASS_HANDLE structHnd, /* IN */
+ SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr /* OUT */
+ ) = 0;
virtual uint32_t getLoongArch64PassStructInRegisterFlags(CORINFO_CLASS_HANDLE cls) = 0;
virtual uint32_t getRISCV64PassStructInRegisterFlags(CORINFO_CLASS_HANDLE cls) = 0;
@@ -3015,70 +2966,68 @@ class ICorDynamicInfo : public ICorStaticInfo
// Return details about EE internal data structures
virtual uint32_t getThreadTLSIndex(
- void **ppIndirection = NULL
- ) = 0;
-
- virtual const void * getInlinedCallFrameVptr(
- void **ppIndirection = NULL
- ) = 0;
+ void **ppIndirection = NULL
+ ) = 0;
virtual int32_t * getAddrOfCaptureThreadGlobal(
- void **ppIndirection = NULL
- ) = 0;
+ void **ppIndirection = NULL
+ ) = 0;
// return the native entry point to an EE helper (see CorInfoHelpFunc)
virtual void* getHelperFtn (
- CorInfoHelpFunc ftnNum,
- void **ppIndirection = NULL
- ) = 0;
+ CorInfoHelpFunc ftnNum,
+ void **ppIndirection = NULL
+ ) = 0;
// return a callable address of the function (native code). This function
// may return a different value (depending on whether the method has
// been JITed or not.
virtual void getFunctionEntryPoint(
- CORINFO_METHOD_HANDLE ftn, /* IN */
- CORINFO_CONST_LOOKUP * pResult, /* OUT */
- CORINFO_ACCESS_FLAGS accessFlags = CORINFO_ACCESS_ANY) = 0;
+ CORINFO_METHOD_HANDLE ftn, /* IN */
+ CORINFO_CONST_LOOKUP * pResult, /* OUT */
+ CORINFO_ACCESS_FLAGS accessFlags = CORINFO_ACCESS_ANY
+ ) = 0;
// return a directly callable address. This can be used similarly to the
// value returned by getFunctionEntryPoint() except that it is
// guaranteed to be multi callable entrypoint.
virtual void getFunctionFixedEntryPoint(
- CORINFO_METHOD_HANDLE ftn,
- bool isUnsafeFunctionPointer,
- CORINFO_CONST_LOOKUP * pResult) = 0;
+ CORINFO_METHOD_HANDLE ftn,
+ bool isUnsafeFunctionPointer,
+ CORINFO_CONST_LOOKUP * pResult
+ ) = 0;
// get the synchronization handle that is passed to monXstatic function
virtual void* getMethodSync(
- CORINFO_METHOD_HANDLE ftn,
- void **ppIndirection = NULL
- ) = 0;
+ CORINFO_METHOD_HANDLE ftn,
+ void** ppIndirection = NULL
+ ) = 0;
// get slow lazy string literal helper to use (CORINFO_HELP_STRCNS*).
// Returns CORINFO_HELP_UNDEF if lazy string literal helper cannot be used.
virtual CorInfoHelpFunc getLazyStringLiteralHelper(
- CORINFO_MODULE_HANDLE handle
- ) = 0;
+ CORINFO_MODULE_HANDLE handle
+ ) = 0;
virtual CORINFO_MODULE_HANDLE embedModuleHandle(
- CORINFO_MODULE_HANDLE handle,
- void **ppIndirection = NULL
- ) = 0;
+ CORINFO_MODULE_HANDLE handle,
+ void **ppIndirection = NULL
+ ) = 0;
virtual CORINFO_CLASS_HANDLE embedClassHandle(
- CORINFO_CLASS_HANDLE handle,
- void **ppIndirection = NULL
- ) = 0;
+ CORINFO_CLASS_HANDLE handle,
+ void **ppIndirection = NULL
+ ) = 0;
virtual CORINFO_METHOD_HANDLE embedMethodHandle(
- CORINFO_METHOD_HANDLE handle,
- void **ppIndirection = NULL
- ) = 0;
+ CORINFO_METHOD_HANDLE handle,
+ void **ppIndirection = NULL
+ ) = 0;
virtual CORINFO_FIELD_HANDLE embedFieldHandle(
- CORINFO_FIELD_HANDLE handle,
- void **ppIndirection = NULL
- ) = 0;
+ CORINFO_FIELD_HANDLE handle,
+ void **ppIndirection = NULL
+ ) = 0;
// Given a module scope (module), a method handle (context) and
// a metadata token (metaTOK), fetch the handle
@@ -3088,9 +3037,10 @@ class ICorDynamicInfo : public ICorStaticInfo
// then indicate how the handle should be looked up at run-time.
//
virtual void embedGenericHandle(
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- bool fEmbedParent, // TRUE - embeds parent type handle of the field/method handle
- CORINFO_GENERICHANDLE_RESULT * pResult) = 0;
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ bool fEmbedParent, // `true` - embeds parent type handle of the field/method handle
+ CORINFO_GENERICHANDLE_RESULT * pResult
+ ) = 0;
// Return information used to locate the exact enclosing type of the current method.
// Used only to invoke .cctor method from code shared across generic instantiations
@@ -3100,76 +3050,69 @@ class ICorDynamicInfo : public ICorStaticInfo
// CORINFO_LOOKUP_CLASSPARAM use vtable hidden param
// CORINFO_LOOKUP_METHODPARAM use enclosing type of method-desc hidden param
virtual void getLocationOfThisType(
- CORINFO_METHOD_HANDLE context,
- CORINFO_LOOKUP_KIND* pLookupKind
- ) = 0;
+ CORINFO_METHOD_HANDLE context,
+ CORINFO_LOOKUP_KIND* pLookupKind
+ ) = 0;
// return the address of the PInvoke target. May be a fixup area in the
// case of late-bound PInvoke calls.
virtual void getAddressOfPInvokeTarget(
- CORINFO_METHOD_HANDLE method,
- CORINFO_CONST_LOOKUP *pLookup
- ) = 0;
+ CORINFO_METHOD_HANDLE method,
+ CORINFO_CONST_LOOKUP * pLookup
+ ) = 0;
// Generate a cookie based on the signature that would needs to be passed
// to CORINFO_HELP_PINVOKE_CALLI
virtual void* GetCookieForPInvokeCalliSig(
- CORINFO_SIG_INFO* szMetaSig,
- void ** ppIndirection = NULL
+ CORINFO_SIG_INFO* szMetaSig,
+ void** ppIndirection = NULL
) = 0;
// returns true if a VM cookie can be generated for it (might be false due to cross-module
// inlining, in which case the inlining should be aborted)
virtual bool canGetCookieForPInvokeCalliSig(
- CORINFO_SIG_INFO* szMetaSig
- ) = 0;
+ CORINFO_SIG_INFO* szMetaSig
+ ) = 0;
// Gets a handle that is checked to see if the current method is
// included in "JustMyCode"
virtual CORINFO_JUST_MY_CODE_HANDLE getJustMyCodeHandle(
- CORINFO_METHOD_HANDLE method,
- CORINFO_JUST_MY_CODE_HANDLE**ppIndirection = NULL
- ) = 0;
+ CORINFO_METHOD_HANDLE method,
+ CORINFO_JUST_MY_CODE_HANDLE** ppIndirection = NULL
+ ) = 0;
// Gets a method handle that can be used to correlate profiling data.
// This is the IP of a native method, or the address of the descriptor struct
// for IL. Always guaranteed to be unique per process, and not to move. */
virtual void GetProfilingHandle(
- bool *pbHookFunction,
- void **pProfilerHandle,
- bool *pbIndirectedHandles
- ) = 0;
+ bool *pbHookFunction,
+ void **pProfilerHandle,
+ bool *pbIndirectedHandles
+ ) = 0;
// Returns instructions on how to make the call. See code:CORINFO_CALL_INFO for possible return values.
virtual void getCallInfo(
- // Token info
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
-
- //Generics info
- CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken,
-
- //Security info
- CORINFO_METHOD_HANDLE callerHandle,
+ // Token info
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
- //Jit info
- CORINFO_CALLINFO_FLAGS flags,
+ // Generics info
+ CORINFO_RESOLVED_TOKEN * pConstrainedResolvedToken,
- //out params
- CORINFO_CALL_INFO *pResult
- ) = 0;
+ // Security info
+ CORINFO_METHOD_HANDLE callerHandle,
- virtual bool canAccessFamily(CORINFO_METHOD_HANDLE hCaller,
- CORINFO_CLASS_HANDLE hInstanceType) = 0;
+ // Jit info
+ CORINFO_CALLINFO_FLAGS flags,
- // Returns TRUE if the Class Domain ID is the RID of the class (currently true for every class
- // except reflection emitted classes and generics)
- virtual bool isRIDClassDomainID(CORINFO_CLASS_HANDLE cls) = 0;
+ // out params
+ CORINFO_CALL_INFO *pResult
+ ) = 0;
- // returns the class's domain ID for accessing shared statics
+ // Returns the class's domain ID for accessing shared statics
virtual unsigned getClassDomainID (
- CORINFO_CLASS_HANDLE cls,
- void **ppIndirection = NULL
- ) = 0;
+ CORINFO_CLASS_HANDLE cls,
+ void **ppIndirection = NULL
+ ) = 0;
//------------------------------------------------------------------------------
// getStaticFieldContent: returns true and the actual field's value if the given
@@ -3185,19 +3128,19 @@ class ICorDynamicInfo : public ICorStaticInfo
// Returns true if field's constant value was available and successfully copied to buffer
//
virtual bool getStaticFieldContent(
- CORINFO_FIELD_HANDLE field,
- uint8_t *buffer,
- int bufferSize,
- int valueOffset = 0,
- bool ignoreMovableObjects = true
- ) = 0;
+ CORINFO_FIELD_HANDLE field,
+ uint8_t *buffer,
+ int bufferSize,
+ int valueOffset = 0,
+ bool ignoreMovableObjects = true
+ ) = 0;
virtual bool getObjectContent(
- CORINFO_OBJECT_HANDLE obj,
- uint8_t* buffer,
- int bufferSize,
- int valueOffset
- ) = 0;
+ CORINFO_OBJECT_HANDLE obj,
+ uint8_t* buffer,
+ int bufferSize,
+ int valueOffset
+ ) = 0;
// If pIsSpeculative is NULL, return the class handle for the value of ref-class typed
// static readonly fields, if there is a unique location for the static and the class
@@ -3211,40 +3154,40 @@ class ICorDynamicInfo : public ICorStaticInfo
// is readonly but class has not yet finished initialization). Set *pIsSpeculative false
// if this type will not change.
virtual CORINFO_CLASS_HANDLE getStaticFieldCurrentClass(
- CORINFO_FIELD_HANDLE field,
- bool *pIsSpeculative = NULL
- ) = 0;
+ CORINFO_FIELD_HANDLE field,
+ bool *pIsSpeculative = NULL
+ ) = 0;
// registers a vararg sig & returns a VM cookie for it (which can contain other stuff)
virtual CORINFO_VARARGS_HANDLE getVarArgsHandle(
- CORINFO_SIG_INFO *pSig,
- void **ppIndirection = NULL
- ) = 0;
+ CORINFO_SIG_INFO *pSig,
+ void **ppIndirection = NULL
+ ) = 0;
// returns true if a VM cookie can be generated for it (might be false due to cross-module
// inlining, in which case the inlining should be aborted)
virtual bool canGetVarArgsHandle(
- CORINFO_SIG_INFO *pSig
- ) = 0;
+ CORINFO_SIG_INFO *pSig
+ ) = 0;
// Allocate a string literal on the heap and return a handle to it
virtual InfoAccessType constructStringLiteral(
- CORINFO_MODULE_HANDLE module,
- mdToken metaTok,
- void **ppValue
- ) = 0;
+ CORINFO_MODULE_HANDLE module,
+ mdToken metaTok,
+ void **ppValue
+ ) = 0;
virtual InfoAccessType emptyStringLiteral(
- void **ppValue
- ) = 0;
+ void **ppValue
+ ) = 0;
// (static fields only) given that 'field' refers to thread local store,
// return the ID (TLS index), which is used to find the beginning of the
// TLS data area for the particular DLL 'field' is associated with.
virtual uint32_t getFieldThreadLocalStoreID (
- CORINFO_FIELD_HANDLE field,
- void **ppIndirection = NULL
- ) = 0;
+ CORINFO_FIELD_HANDLE field,
+ void **ppIndirection = NULL
+ ) = 0;
virtual CORINFO_METHOD_HANDLE GetDelegateCtor(
CORINFO_METHOD_HANDLE methHnd,
@@ -3254,34 +3197,34 @@ class ICorDynamicInfo : public ICorStaticInfo
) = 0;
virtual void MethodCompileComplete(
- CORINFO_METHOD_HANDLE methHnd
- ) = 0;
+ CORINFO_METHOD_HANDLE methHnd
+ ) = 0;
// Obtain tailcall help for the specified call site.
virtual bool getTailCallHelpers(
+ // The resolved token for the call. Can be null for calli.
+ CORINFO_RESOLVED_TOKEN* callToken,
- // The resolved token for the call. Can be null for calli.
- CORINFO_RESOLVED_TOKEN* callToken,
+ // The signature at the callsite.
+ CORINFO_SIG_INFO* sig,
- // The signature at the callsite.
- CORINFO_SIG_INFO* sig,
+ // Flags for the tailcall site.
+ CORINFO_GET_TAILCALL_HELPERS_FLAGS flags,
- // Flags for the tailcall site.
- CORINFO_GET_TAILCALL_HELPERS_FLAGS flags,
-
- // The resulting help.
- CORINFO_TAILCALL_HELPERS* pResult) = 0;
+ // The resulting help.
+ CORINFO_TAILCALL_HELPERS* pResult
+ ) = 0;
// Optionally, convert calli to regular method call. This is for PInvoke argument marshalling.
virtual bool convertPInvokeCalliToCall(
- CORINFO_RESOLVED_TOKEN * pResolvedToken,
- bool fMustConvert
- ) = 0;
+ CORINFO_RESOLVED_TOKEN * pResolvedToken,
+ bool fMustConvert
+ ) = 0;
// Notify EE about intent to use or not to use instruction set in the method. Returns true if the instruction set is supported unconditionally.
virtual bool notifyInstructionSetUsage(
- CORINFO_InstructionSet instructionSet,
- bool supportEnabled
+ CORINFO_InstructionSet instructionSet,
+ bool supportEnabled
) = 0;
// Notify EE that JIT needs an entry-point that is tail-callable.
diff --git a/src/coreclr/inc/corjit.h b/src/coreclr/inc/corjit.h
index 344ca9a82bdd4..9cee9b1742125 100644
--- a/src/coreclr/inc/corjit.h
+++ b/src/coreclr/inc/corjit.h
@@ -191,11 +191,11 @@ class ICorJitCompiler
// * For the 64 bit jit this is implemented by code:PreJit.compileMethod
// Note: setTargetOS must be called before this api is used.
virtual CorJitResult compileMethod (
- ICorJitInfo *comp, /* IN */
- struct CORINFO_METHOD_INFO *info, /* IN */
- unsigned /* code:CorJitFlag */ flags, /* IN */
- uint8_t **nativeEntry, /* OUT */
- uint32_t *nativeSizeOfCode /* OUT */
+ ICorJitInfo* comp, /* IN */
+ struct CORINFO_METHOD_INFO* info, /* IN */
+ unsigned /* code:CorJitFlag */ flags, /* IN */
+ uint8_t** nativeEntry, /* OUT */
+ uint32_t* nativeSizeOfCode /* OUT */
) = 0;
// Do any appropriate work at process shutdown. Default impl is to do nothing.
@@ -251,7 +251,7 @@ class ICorJitInfo : public ICorDynamicInfo
virtual void reserveUnwindInfo (
bool isFunclet, /* IN */
bool isColdCode, /* IN */
- uint32_t unwindSize /* IN */
+ uint32_t unwindSize /* IN */
) = 0;
// Allocate and initialize the .rdata and .pdata for this method or
@@ -272,19 +272,18 @@ class ICorJitInfo : public ICorDynamicInfo
// funcKind type of funclet (main method code, handler, filter)
//
virtual void allocUnwindInfo (
- uint8_t * pHotCode, /* IN */
- uint8_t * pColdCode, /* IN */
- uint32_t startOffset, /* IN */
- uint32_t endOffset, /* IN */
- uint32_t unwindSize, /* IN */
- uint8_t * pUnwindBlock, /* IN */
- CorJitFuncKind funcKind /* IN */
+ uint8_t * pHotCode, /* IN */
+ uint8_t * pColdCode, /* IN */
+ uint32_t startOffset, /* IN */
+ uint32_t endOffset, /* IN */
+ uint32_t unwindSize, /* IN */
+ uint8_t * pUnwindBlock, /* IN */
+ CorJitFuncKind funcKind /* IN */
) = 0;
- // Get a block of memory needed for the code manager information,
- // (the info for enumerating the GC pointers while crawling the
- // stack frame).
- // Note that allocMem must be called first
+ // Get a block of memory needed for the code manager information,
+ // (the info for enumerating the GC pointers while crawling the
+ // stack frame). Note that allocMem must be called first.
virtual void * allocGCInfo (
size_t size /* IN */
) = 0;
@@ -293,7 +292,7 @@ class ICorJitInfo : public ICorDynamicInfo
// This is guaranteed to be called before any 'setEHinfo' call.
// Note that allocMem must be called before this method can be called.
virtual void setEHcount (
- unsigned cEH /* IN */
+ unsigned cEH /* IN */
) = 0;
// Set the values for one particular exception handler block.
@@ -303,7 +302,7 @@ class ICorJitInfo : public ICorDynamicInfo
// determine if a "finally" clause is executing.
virtual void setEHinfo (
unsigned EHnumber, /* IN */
- const CORINFO_EH_CLAUSE *clause /* IN */
+ const CORINFO_EH_CLAUSE* clause /* IN */
) = 0;
// Level -> fatalError, Level 2 -> Error, Level 3 -> Warning
@@ -463,20 +462,19 @@ class ICorJitInfo : public ICorDynamicInfo
// the call site has no signature information (e.g. a helper call) or has no method handle
// (e.g. a CALLI P/Invoke), then null should be passed instead.
virtual void recordCallSite(
- uint32_t instrOffset, /* IN */
- CORINFO_SIG_INFO * callSig, /* IN */
- CORINFO_METHOD_HANDLE methodHandle /* IN */
+ uint32_t instrOffset, /* IN */
+ CORINFO_SIG_INFO * callSig, /* IN */
+ CORINFO_METHOD_HANDLE methodHandle /* IN */
) = 0;
// A relocation is recorded if we are pre-jitting.
// A jump thunk may be inserted if we are jitting
virtual void recordRelocation(
- void * location, /* IN */
- void * locationRW, /* IN */
- void * target, /* IN */
- uint16_t fRelocType, /* IN */
- uint16_t slotNum = 0, /* IN */
- int32_t addlDelta = 0 /* IN */
+ void * location, /* IN */
+ void * locationRW, /* IN */
+ void * target, /* IN */
+ uint16_t fRelocType, /* IN */
+ int32_t addlDelta = 0 /* IN */
) = 0;
virtual uint16_t getRelocTypeHint(void * target) = 0;
@@ -491,9 +489,9 @@ class ICorJitInfo : public ICorDynamicInfo
// Fetches extended flags for a particular compilation instance. Returns
// the number of bytes written to the provided buffer.
virtual uint32_t getJitFlags(
- CORJIT_FLAGS* flags, /* IN: Points to a buffer that will hold the extended flags. */
- uint32_t sizeInBytes /* IN: The size of the buffer. Note that this is effectively a
- version number for the CORJIT_FLAGS value. */
+ CORJIT_FLAGS* flags, /* IN: Points to a buffer that will hold the extended flags. */
+ uint32_t sizeInBytes /* IN: The size of the buffer. Note that this is effectively a
+ version number for the CORJIT_FLAGS value. */
) = 0;
};
diff --git a/src/coreclr/inc/corjitflags.h b/src/coreclr/inc/corjitflags.h
index 6f7b00cb6f919..dce9a9f00b399 100644
--- a/src/coreclr/inc/corjitflags.h
+++ b/src/coreclr/inc/corjitflags.h
@@ -22,97 +22,52 @@ class CORJIT_FLAGS
{
public:
+ // Note: these flags can be #ifdef'ed, but no number should be re-used between different #ifdef conditions,
+ // so platform-independent code can know uniquely which number corresponds to which flag.
enum CorJitFlag
{
CORJIT_FLAG_CALL_GETJITFLAGS = 0xffffffff, // Indicates that the JIT should retrieve flags in the form of a
// pointer to a CORJIT_FLAGS value via ICorJitInfo::getJitFlags().
- CORJIT_FLAG_SPEED_OPT = 0,
- CORJIT_FLAG_SIZE_OPT = 1,
+
+ CORJIT_FLAG_SPEED_OPT = 0, // optimize for speed
+ CORJIT_FLAG_SIZE_OPT = 1, // optimize for code size
CORJIT_FLAG_DEBUG_CODE = 2, // generate "debuggable" code (no code-mangling optimizations)
CORJIT_FLAG_DEBUG_EnC = 3, // We are in Edit-n-Continue mode
CORJIT_FLAG_DEBUG_INFO = 4, // generate line and local-var info
CORJIT_FLAG_MIN_OPT = 5, // disable all jit optimizations (not necessarily debuggable code)
- CORJIT_FLAG_ENABLE_CFG = 6, // generate control-flow guard checks
- CORJIT_FLAG_MCJIT_BACKGROUND = 7, // Calling from multicore JIT background thread, do not call JitComplete
-
- #if defined(TARGET_X86)
- CORJIT_FLAG_PINVOKE_RESTORE_ESP = 8, // Restore ESP after returning from inlined PInvoke
- #else // !defined(TARGET_X86)
- CORJIT_FLAG_UNUSED2 = 8,
- #endif // !defined(TARGET_X86)
-
- CORJIT_FLAG_UNUSED3 = 9,
- CORJIT_FLAG_UNUSED4 = 10,
- CORJIT_FLAG_UNUSED5 = 11,
- CORJIT_FLAG_UNUSED6 = 12,
-
- CORJIT_FLAG_OSR = 13, // Generate alternate method for On Stack Replacement
-
- CORJIT_FLAG_ALT_JIT = 14, // JIT should consider itself an ALT_JIT
- CORJIT_FLAG_FROZEN_ALLOC_ALLOWED = 15, // JIT is allowed to use *_MAYBEFROZEN allocators
- CORJIT_FLAG_UNUSED9 = 16,
- CORJIT_FLAG_UNUSED10 = 17,
-
- CORJIT_FLAG_MAKEFINALCODE = 18, // Use the final code generator, i.e., not the interpreter.
- CORJIT_FLAG_READYTORUN = 19, // Use version-resilient code generation
- CORJIT_FLAG_PROF_ENTERLEAVE = 20, // Instrument prologues/epilogues
- CORJIT_FLAG_UNUSED11 = 21,
- CORJIT_FLAG_PROF_NO_PINVOKE_INLINE = 22, // Disables PInvoke inlining
- CORJIT_FLAG_UNUSED12 = 23,
- CORJIT_FLAG_PREJIT = 24, // jit or prejit is the execution engine.
- CORJIT_FLAG_RELOC = 25, // Generate relocatable code
- CORJIT_FLAG_UNUSED13 = 26,
- CORJIT_FLAG_IL_STUB = 27, // method is an IL stub
- CORJIT_FLAG_PROCSPLIT = 28, // JIT should separate code into hot and cold sections
- CORJIT_FLAG_BBINSTR = 29, // Collect basic block profile information
- CORJIT_FLAG_BBOPT = 30, // Optimize method based on profile information
- CORJIT_FLAG_FRAMED = 31, // All methods have an EBP frame
- CORJIT_FLAG_BBINSTR_IF_LOOPS = 32, // JIT must instrument current method if it has loops
- CORJIT_FLAG_PUBLISH_SECRET_PARAM = 33, // JIT must place stub secret param into local 0. (used by IL stubs)
- CORJIT_FLAG_UNUSED14 = 34,
- CORJIT_FLAG_UNUSED37 = 35,
- CORJIT_FLAG_USE_PINVOKE_HELPERS = 36, // The JIT should use the PINVOKE_{BEGIN,END} helpers instead of emitting inline transitions
- CORJIT_FLAG_REVERSE_PINVOKE = 37, // The JIT should insert REVERSE_PINVOKE_{ENTER,EXIT} helpers into method prolog/epilog
- CORJIT_FLAG_TRACK_TRANSITIONS = 38, // The JIT should insert the REVERSE_PINVOKE helper variants that track transitions.
- CORJIT_FLAG_TIER0 = 39, // This is the initial tier for tiered compilation which should generate code as quickly as possible
- CORJIT_FLAG_TIER1 = 40, // This is the final tier (for now) for tiered compilation which should generate high quality code
+ CORJIT_FLAG_ENABLE_CFG = 6, // generate CFG enabled code
+ CORJIT_FLAG_OSR = 7, // Generate alternate version for On Stack Replacement
+ CORJIT_FLAG_ALT_JIT = 8, // JIT should consider itself an ALT_JIT
+ CORJIT_FLAG_FROZEN_ALLOC_ALLOWED = 9, // JIT is allowed to use *_MAYBEFROZEN allocators
+ CORJIT_FLAG_MAKEFINALCODE = 10, // Use the final code generator, i.e., not the interpreter.
+ CORJIT_FLAG_READYTORUN = 11, // Use version-resilient code generation
+ CORJIT_FLAG_PROF_ENTERLEAVE = 12, // Instrument prologues/epilogues
+ CORJIT_FLAG_PROF_NO_PINVOKE_INLINE = 13, // Disables PInvoke inlining
+ CORJIT_FLAG_PREJIT = 14, // prejit is the execution engine.
+ CORJIT_FLAG_RELOC = 15, // Generate relocatable code
+ CORJIT_FLAG_IL_STUB = 16, // method is an IL stub
+ CORJIT_FLAG_PROCSPLIT = 17, // JIT should separate code into hot and cold sections
+ CORJIT_FLAG_BBINSTR = 18, // Collect basic block profile information
+ CORJIT_FLAG_BBINSTR_IF_LOOPS = 19, // JIT must instrument current method if it has loops
+ CORJIT_FLAG_BBOPT = 20, // Optimize method based on profile information
+ CORJIT_FLAG_FRAMED = 21, // All methods have an EBP frame
+ CORJIT_FLAG_PUBLISH_SECRET_PARAM = 22, // JIT must place stub secret param into local 0. (used by IL stubs)
+ CORJIT_FLAG_USE_PINVOKE_HELPERS = 23, // The JIT should use the PINVOKE_{BEGIN,END} helpers instead of emitting inline transitions
+ CORJIT_FLAG_REVERSE_PINVOKE = 24, // The JIT should insert REVERSE_PINVOKE_{ENTER,EXIT} helpers into method prolog/epilog
+ CORJIT_FLAG_TRACK_TRANSITIONS = 25, // The JIT should insert the helper variants that track transitions.
+ CORJIT_FLAG_TIER0 = 26, // This is the initial tier for tiered compilation which should generate code as quickly as possible
+ CORJIT_FLAG_TIER1 = 27, // This is the final tier (for now) for tiered compilation which should generate high quality code
+ CORJIT_FLAG_NO_INLINING = 28, // JIT should not inline any called method into this method
#if defined(TARGET_ARM)
- CORJIT_FLAG_RELATIVE_CODE_RELOCS = 41, // JIT should generate PC-relative address computations instead of EE relocation records
-#else // !defined(TARGET_ARM)
- CORJIT_FLAG_UNUSED15 = 41,
-#endif // !defined(TARGET_ARM)
+ CORJIT_FLAG_RELATIVE_CODE_RELOCS = 29, // JIT should generate PC-relative address computations instead of EE relocation records
+ CORJIT_FLAG_SOFTFP_ABI = 30, // Enable armel calling convention
+#endif
- CORJIT_FLAG_NO_INLINING = 42, // JIT should not inline any called method into this method
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
+ CORJIT_FLAG_VECTOR512_THROTTLING = 31, // On x86/x64, 512-bit vector usage may incur CPU frequency throttling
+#endif
-#if defined(TARGET_ARM)
- CORJIT_FLAG_SOFTFP_ABI = 43, // On ARM should enable armel calling convention
-#elif defined(TARGET_X86) || defined(TARGET_AMD64)
- CORJIT_FLAG_VECTOR512_THROTTLING = 43, // On Xarch, 512-bit vector usage may incur CPU frequency throttling
-#else
- CORJIT_FLAG_UNUSED16 = 43,
-#endif // !defined(TARGET_ARM)
-
- CORJIT_FLAG_UNUSED17 = 44,
- CORJIT_FLAG_UNUSED18 = 45,
- CORJIT_FLAG_UNUSED19 = 46,
- CORJIT_FLAG_UNUSED20 = 47,
- CORJIT_FLAG_UNUSED21 = 48,
- CORJIT_FLAG_UNUSED22 = 49,
- CORJIT_FLAG_UNUSED23 = 50,
- CORJIT_FLAG_UNUSED24 = 51,
- CORJIT_FLAG_UNUSED25 = 52,
- CORJIT_FLAG_UNUSED26 = 53,
- CORJIT_FLAG_UNUSED27 = 54,
- CORJIT_FLAG_UNUSED28 = 55,
- CORJIT_FLAG_UNUSED29 = 56,
- CORJIT_FLAG_UNUSED30 = 57,
- CORJIT_FLAG_UNUSED31 = 58,
- CORJIT_FLAG_UNUSED32 = 59,
- CORJIT_FLAG_UNUSED33 = 60,
- CORJIT_FLAG_UNUSED34 = 61,
- CORJIT_FLAG_UNUSED35 = 62,
- CORJIT_FLAG_UNUSED36 = 63
};
CORJIT_FLAGS()
diff --git a/src/coreclr/inc/icorjitinfoimpl_generated.h b/src/coreclr/inc/icorjitinfoimpl_generated.h
index f3d93194df258..5a7b737b85168 100644
--- a/src/coreclr/inc/icorjitinfoimpl_generated.h
+++ b/src/coreclr/inc/icorjitinfoimpl_generated.h
@@ -75,9 +75,6 @@ void getEHinfo(
CORINFO_CLASS_HANDLE getMethodClass(
CORINFO_METHOD_HANDLE method) override;
-CORINFO_MODULE_HANDLE getMethodModule(
- CORINFO_METHOD_HANDLE method) override;
-
void getMethodVTableOffset(
CORINFO_METHOD_HANDLE method,
unsigned* offsetOfIndirection,
@@ -117,13 +114,6 @@ bool satisfiesMethodConstraints(
CORINFO_CLASS_HANDLE parent,
CORINFO_METHOD_HANDLE method) override;
-bool isCompatibleDelegate(
- CORINFO_CLASS_HANDLE objCls,
- CORINFO_CLASS_HANDLE methodParentCls,
- CORINFO_METHOD_HANDLE method,
- CORINFO_CLASS_HANDLE delegateCls,
- bool* pfIsOpenDelegate) override;
-
void methodMustBeLoadedBeforeCodeIsRun(
CORINFO_METHOD_HANDLE method) override;
@@ -143,9 +133,6 @@ PatchpointInfo* getOSRInfo(
void resolveToken(
CORINFO_RESOLVED_TOKEN* pResolvedToken) override;
-bool tryResolveToken(
- CORINFO_RESOLVED_TOKEN* pResolvedToken) override;
-
void findSig(
CORINFO_MODULE_HANDLE module,
unsigned sigTOK,
@@ -161,14 +148,6 @@ void findCallSiteSig(
CORINFO_CLASS_HANDLE getTokenTypeAsHandle(
CORINFO_RESOLVED_TOKEN* pResolvedToken) override;
-bool isValidToken(
- CORINFO_MODULE_HANDLE module,
- unsigned metaTOK) override;
-
-bool isValidStringRef(
- CORINFO_MODULE_HANDLE module,
- unsigned metaTOK) override;
-
int getStringLiteral(
CORINFO_MODULE_HANDLE module,
unsigned metaTOK,
@@ -347,10 +326,6 @@ TypeCompareState compareTypesForEquality(
CORINFO_CLASS_HANDLE cls1,
CORINFO_CLASS_HANDLE cls2) override;
-CORINFO_CLASS_HANDLE mergeClasses(
- CORINFO_CLASS_HANDLE cls1,
- CORINFO_CLASS_HANDLE cls2) override;
-
bool isMoreSpecificType(
CORINFO_CLASS_HANDLE cls1,
CORINFO_CLASS_HANDLE cls2) override;
@@ -366,9 +341,6 @@ CorInfoType getChildType(
CORINFO_CLASS_HANDLE clsHnd,
CORINFO_CLASS_HANDLE* clsRet) override;
-bool satisfiesClassConstraints(
- CORINFO_CLASS_HANDLE cls) override;
-
bool isSDArray(
CORINFO_CLASS_HANDLE cls) override;
@@ -509,12 +481,6 @@ const char* getMethodNameFromMetadata(
unsigned getMethodHash(
CORINFO_METHOD_HANDLE ftn) override;
-size_t findNameOfToken(
- CORINFO_MODULE_HANDLE moduleHandle,
- mdToken token,
- char* szFQName,
- size_t FQNameCapacity) override;
-
bool getSystemVAmd64PassStructInRegisterDescriptor(
CORINFO_CLASS_HANDLE structHnd,
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr) override;
@@ -528,9 +494,6 @@ uint32_t getRISCV64PassStructInRegisterFlags(
uint32_t getThreadTLSIndex(
void** ppIndirection) override;
-const void* getInlinedCallFrameVptr(
- void** ppIndirection) override;
-
int32_t* getAddrOfCaptureThreadGlobal(
void** ppIndirection) override;
@@ -607,13 +570,6 @@ void getCallInfo(
CORINFO_CALLINFO_FLAGS flags,
CORINFO_CALL_INFO* pResult) override;
-bool canAccessFamily(
- CORINFO_METHOD_HANDLE hCaller,
- CORINFO_CLASS_HANDLE hInstanceType) override;
-
-bool isRIDClassDomainID(
- CORINFO_CLASS_HANDLE cls) override;
-
unsigned getClassDomainID(
CORINFO_CLASS_HANDLE cls,
void** ppIndirection) override;
@@ -743,7 +699,6 @@ void recordRelocation(
void* locationRW,
void* target,
uint16_t fRelocType,
- uint16_t slotNum,
int32_t addlDelta) override;
uint16_t getRelocTypeHint(
diff --git a/src/coreclr/inc/jiteeversionguid.h b/src/coreclr/inc/jiteeversionguid.h
index ebcf4919cb6af..36f906df5e7ed 100644
--- a/src/coreclr/inc/jiteeversionguid.h
+++ b/src/coreclr/inc/jiteeversionguid.h
@@ -43,11 +43,11 @@ typedef const GUID *LPCGUID;
#define GUID_DEFINED
#endif // !GUID_DEFINED
-constexpr GUID JITEEVersionIdentifier = { /* fda2f9dd-6b3e-4ecd-a7b8-79e5edf1f072 */
- 0xfda2f9dd,
- 0x6b3e,
- 0x4ecd,
- {0xa7, 0xb8, 0x79, 0xe5, 0xed, 0xf1, 0xf0, 0x72}
+constexpr GUID JITEEVersionIdentifier = { /* 88398e9f-093a-4212-85bc-bebb8c14cd24 */
+ 0x88398e9f,
+ 0x093a,
+ 0x4212,
+ {0x85, 0xbc, 0xbe, 0xbb, 0x8c, 0x14, 0xcd, 0x24}
};
//////////////////////////////////////////////////////////////////////////////////////////////////////////
diff --git a/src/coreclr/jit/ICorJitInfo_names_generated.h b/src/coreclr/jit/ICorJitInfo_names_generated.h
index b93f42dfba8ff..7a7f975e50f99 100644
--- a/src/coreclr/jit/ICorJitInfo_names_generated.h
+++ b/src/coreclr/jit/ICorJitInfo_names_generated.h
@@ -16,7 +16,6 @@ DEF_CLR_API(canTailCall)
DEF_CLR_API(reportTailCallDecision)
DEF_CLR_API(getEHinfo)
DEF_CLR_API(getMethodClass)
-DEF_CLR_API(getMethodModule)
DEF_CLR_API(getMethodVTableOffset)
DEF_CLR_API(resolveVirtualMethod)
DEF_CLR_API(getUnboxedEntry)
@@ -27,19 +26,15 @@ DEF_CLR_API(isIntrinsicType)
DEF_CLR_API(getUnmanagedCallConv)
DEF_CLR_API(pInvokeMarshalingRequired)
DEF_CLR_API(satisfiesMethodConstraints)
-DEF_CLR_API(isCompatibleDelegate)
DEF_CLR_API(methodMustBeLoadedBeforeCodeIsRun)
DEF_CLR_API(mapMethodDeclToMethodImpl)
DEF_CLR_API(getGSCookie)
DEF_CLR_API(setPatchpointInfo)
DEF_CLR_API(getOSRInfo)
DEF_CLR_API(resolveToken)
-DEF_CLR_API(tryResolveToken)
DEF_CLR_API(findSig)
DEF_CLR_API(findCallSiteSig)
DEF_CLR_API(getTokenTypeAsHandle)
-DEF_CLR_API(isValidToken)
-DEF_CLR_API(isValidStringRef)
DEF_CLR_API(getStringLiteral)
DEF_CLR_API(printObjectDescription)
DEF_CLR_API(asCorInfoType)
@@ -86,12 +81,10 @@ DEF_CLR_API(getTypeForPrimitiveNumericClass)
DEF_CLR_API(canCast)
DEF_CLR_API(compareTypesForCast)
DEF_CLR_API(compareTypesForEquality)
-DEF_CLR_API(mergeClasses)
DEF_CLR_API(isMoreSpecificType)
DEF_CLR_API(isEnum)
DEF_CLR_API(getParentType)
DEF_CLR_API(getChildType)
-DEF_CLR_API(satisfiesClassConstraints)
DEF_CLR_API(isSDArray)
DEF_CLR_API(getArrayRank)
DEF_CLR_API(getArrayIntrinsicID)
@@ -126,12 +119,10 @@ DEF_CLR_API(getMethodDefFromMethod)
DEF_CLR_API(printMethodName)
DEF_CLR_API(getMethodNameFromMetadata)
DEF_CLR_API(getMethodHash)
-DEF_CLR_API(findNameOfToken)
DEF_CLR_API(getSystemVAmd64PassStructInRegisterDescriptor)
DEF_CLR_API(getLoongArch64PassStructInRegisterFlags)
DEF_CLR_API(getRISCV64PassStructInRegisterFlags)
DEF_CLR_API(getThreadTLSIndex)
-DEF_CLR_API(getInlinedCallFrameVptr)
DEF_CLR_API(getAddrOfCaptureThreadGlobal)
DEF_CLR_API(getHelperFtn)
DEF_CLR_API(getFunctionEntryPoint)
@@ -150,8 +141,6 @@ DEF_CLR_API(canGetCookieForPInvokeCalliSig)
DEF_CLR_API(getJustMyCodeHandle)
DEF_CLR_API(GetProfilingHandle)
DEF_CLR_API(getCallInfo)
-DEF_CLR_API(canAccessFamily)
-DEF_CLR_API(isRIDClassDomainID)
DEF_CLR_API(getClassDomainID)
DEF_CLR_API(getStaticFieldContent)
DEF_CLR_API(getObjectContent)
diff --git a/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp b/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp
index da369eda01a37..6d08b04259688 100644
--- a/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp
+++ b/src/coreclr/jit/ICorJitInfo_wrapper_generated.hpp
@@ -132,15 +132,6 @@ CORINFO_CLASS_HANDLE WrapICorJitInfo::getMethodClass(
return temp;
}
-CORINFO_MODULE_HANDLE WrapICorJitInfo::getMethodModule(
- CORINFO_METHOD_HANDLE method)
-{
- API_ENTER(getMethodModule);
- CORINFO_MODULE_HANDLE temp = wrapHnd->getMethodModule(method);
- API_LEAVE(getMethodModule);
- return temp;
-}
-
void WrapICorJitInfo::getMethodVTableOffset(
CORINFO_METHOD_HANDLE method,
unsigned* offsetOfIndirection,
@@ -238,19 +229,6 @@ bool WrapICorJitInfo::satisfiesMethodConstraints(
return temp;
}
-bool WrapICorJitInfo::isCompatibleDelegate(
- CORINFO_CLASS_HANDLE objCls,
- CORINFO_CLASS_HANDLE methodParentCls,
- CORINFO_METHOD_HANDLE method,
- CORINFO_CLASS_HANDLE delegateCls,
- bool* pfIsOpenDelegate)
-{
- API_ENTER(isCompatibleDelegate);
- bool temp = wrapHnd->isCompatibleDelegate(objCls, methodParentCls, method, delegateCls, pfIsOpenDelegate);
- API_LEAVE(isCompatibleDelegate);
- return temp;
-}
-
void WrapICorJitInfo::methodMustBeLoadedBeforeCodeIsRun(
CORINFO_METHOD_HANDLE method)
{
@@ -302,15 +280,6 @@ void WrapICorJitInfo::resolveToken(
API_LEAVE(resolveToken);
}
-bool WrapICorJitInfo::tryResolveToken(
- CORINFO_RESOLVED_TOKEN* pResolvedToken)
-{
- API_ENTER(tryResolveToken);
- bool temp = wrapHnd->tryResolveToken(pResolvedToken);
- API_LEAVE(tryResolveToken);
- return temp;
-}
-
void WrapICorJitInfo::findSig(
CORINFO_MODULE_HANDLE module,
unsigned sigTOK,
@@ -342,26 +311,6 @@ CORINFO_CLASS_HANDLE WrapICorJitInfo::getTokenTypeAsHandle(
return temp;
}
-bool WrapICorJitInfo::isValidToken(
- CORINFO_MODULE_HANDLE module,
- unsigned metaTOK)
-{
- API_ENTER(isValidToken);
- bool temp = wrapHnd->isValidToken(module, metaTOK);
- API_LEAVE(isValidToken);
- return temp;
-}
-
-bool WrapICorJitInfo::isValidStringRef(
- CORINFO_MODULE_HANDLE module,
- unsigned metaTOK)
-{
- API_ENTER(isValidStringRef);
- bool temp = wrapHnd->isValidStringRef(module, metaTOK);
- API_LEAVE(isValidStringRef);
- return temp;
-}
-
int WrapICorJitInfo::getStringLiteral(
CORINFO_MODULE_HANDLE module,
unsigned metaTOK,
@@ -813,16 +762,6 @@ TypeCompareState WrapICorJitInfo::compareTypesForEquality(
return temp;
}
-CORINFO_CLASS_HANDLE WrapICorJitInfo::mergeClasses(
- CORINFO_CLASS_HANDLE cls1,
- CORINFO_CLASS_HANDLE cls2)
-{
- API_ENTER(mergeClasses);
- CORINFO_CLASS_HANDLE temp = wrapHnd->mergeClasses(cls1, cls2);
- API_LEAVE(mergeClasses);
- return temp;
-}
-
bool WrapICorJitInfo::isMoreSpecificType(
CORINFO_CLASS_HANDLE cls1,
CORINFO_CLASS_HANDLE cls2)
@@ -862,15 +801,6 @@ CorInfoType WrapICorJitInfo::getChildType(
return temp;
}
-bool WrapICorJitInfo::satisfiesClassConstraints(
- CORINFO_CLASS_HANDLE cls)
-{
- API_ENTER(satisfiesClassConstraints);
- bool temp = wrapHnd->satisfiesClassConstraints(cls);
- API_LEAVE(satisfiesClassConstraints);
- return temp;
-}
-
bool WrapICorJitInfo::isSDArray(
CORINFO_CLASS_HANDLE cls)
{
@@ -1206,18 +1136,6 @@ unsigned WrapICorJitInfo::getMethodHash(
return temp;
}
-size_t WrapICorJitInfo::findNameOfToken(
- CORINFO_MODULE_HANDLE moduleHandle,
- mdToken token,
- char* szFQName,
- size_t FQNameCapacity)
-{
- API_ENTER(findNameOfToken);
- size_t temp = wrapHnd->findNameOfToken(moduleHandle, token, szFQName, FQNameCapacity);
- API_LEAVE(findNameOfToken);
- return temp;
-}
-
bool WrapICorJitInfo::getSystemVAmd64PassStructInRegisterDescriptor(
CORINFO_CLASS_HANDLE structHnd,
SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr)
@@ -1255,15 +1173,6 @@ uint32_t WrapICorJitInfo::getThreadTLSIndex(
return temp;
}
-const void* WrapICorJitInfo::getInlinedCallFrameVptr(
- void** ppIndirection)
-{
- API_ENTER(getInlinedCallFrameVptr);
- const void* temp = wrapHnd->getInlinedCallFrameVptr(ppIndirection);
- API_LEAVE(getInlinedCallFrameVptr);
- return temp;
-}
-
int32_t* WrapICorJitInfo::getAddrOfCaptureThreadGlobal(
void** ppIndirection)
{
@@ -1441,25 +1350,6 @@ void WrapICorJitInfo::getCallInfo(
API_LEAVE(getCallInfo);
}
-bool WrapICorJitInfo::canAccessFamily(
- CORINFO_METHOD_HANDLE hCaller,
- CORINFO_CLASS_HANDLE hInstanceType)
-{
- API_ENTER(canAccessFamily);
- bool temp = wrapHnd->canAccessFamily(hCaller, hInstanceType);
- API_LEAVE(canAccessFamily);
- return temp;
-}
-
-bool WrapICorJitInfo::isRIDClassDomainID(
- CORINFO_CLASS_HANDLE cls)
-{
- API_ENTER(isRIDClassDomainID);
- bool temp = wrapHnd->isRIDClassDomainID(cls);
- API_LEAVE(isRIDClassDomainID);
- return temp;
-}
-
unsigned WrapICorJitInfo::getClassDomainID(
CORINFO_CLASS_HANDLE cls,
void** ppIndirection)
@@ -1742,11 +1632,10 @@ void WrapICorJitInfo::recordRelocation(
void* locationRW,
void* target,
uint16_t fRelocType,
- uint16_t slotNum,
int32_t addlDelta)
{
API_ENTER(recordRelocation);
- wrapHnd->recordRelocation(location, locationRW, target, fRelocType, slotNum, addlDelta);
+ wrapHnd->recordRelocation(location, locationRW, target, fRelocType, addlDelta);
API_LEAVE(recordRelocation);
}
diff --git a/src/coreclr/jit/block.cpp b/src/coreclr/jit/block.cpp
index 5284e747092bd..01c1e52b99613 100644
--- a/src/coreclr/jit/block.cpp
+++ b/src/coreclr/jit/block.cpp
@@ -846,7 +846,7 @@ BasicBlock* BasicBlock::GetUniquePred(Compiler* compiler) const
//
// Return Value:
// The unique successor of a block, or nullptr if there is no unique successor.
-
+//
BasicBlock* BasicBlock::GetUniqueSucc() const
{
if (bbJumpKind == BBJ_ALWAYS)
diff --git a/src/coreclr/jit/compiler.h b/src/coreclr/jit/compiler.h
index b25f233ff1174..597b88709019b 100644
--- a/src/coreclr/jit/compiler.h
+++ b/src/coreclr/jit/compiler.h
@@ -6137,6 +6137,7 @@ class Compiler
PhaseStatus fgForwardSub();
bool fgForwardSubBlock(BasicBlock* block);
bool fgForwardSubStatement(Statement* statement);
+ bool fgForwardSubHasStoreInterference(Statement* defStmt, Statement* nextStmt, GenTree* nextStmtUse);
void fgForwardSubUpdateLiveness(GenTree* newSubListFirst, GenTree* newSubListLast);
// The given local variable, required to be a struct variable, is being assigned via
@@ -8072,9 +8073,7 @@ class Compiler
WORD eeGetRelocTypeHint(void* target);
- // ICorStaticInfo wrapper functions
-
- bool eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken);
+// ICorStaticInfo wrapper functions
#if defined(UNIX_AMD64_ABI)
#ifdef DEBUG
@@ -9758,7 +9757,6 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
STRESS_MODE(CATCH_ARG) /* Will spill catch arg */ \
STRESS_MODE(UNSAFE_BUFFER_CHECKS) \
STRESS_MODE(NULL_OBJECT_CHECK) \
- STRESS_MODE(PINVOKE_RESTORE_ESP) \
STRESS_MODE(RANDOM_INLINE) \
STRESS_MODE(SWITCH_CMP_BR_EXPANSION) \
STRESS_MODE(GENERIC_VARN) \
diff --git a/src/coreclr/jit/ee_il_dll.cpp b/src/coreclr/jit/ee_il_dll.cpp
index 5d7c5b9b9c4eb..bbec124b4b1ac 100644
--- a/src/coreclr/jit/ee_il_dll.cpp
+++ b/src/coreclr/jit/ee_il_dll.cpp
@@ -1361,11 +1361,6 @@ void Compiler::eeGetSystemVAmd64PassStructInRegisterDescriptor(
#endif // UNIX_AMD64_ABI
-bool Compiler::eeTryResolveToken(CORINFO_RESOLVED_TOKEN* resolvedToken)
-{
- return info.compCompHnd->tryResolveToken(resolvedToken);
-}
-
bool Compiler::eeRunWithErrorTrapImp(void (*function)(void*), void* param)
{
return info.compCompHnd->runWithErrorTrap(function, param);
diff --git a/src/coreclr/jit/emit.cpp b/src/coreclr/jit/emit.cpp
index 11407c9cabb1d..70a9e5ebb054f 100644
--- a/src/coreclr/jit/emit.cpp
+++ b/src/coreclr/jit/emit.cpp
@@ -10084,8 +10084,7 @@ void emitter::emitRecordRelocation(void* location, /* IN */
// late disassembly; maybe we'll need it?
if (emitComp->info.compMatchedVM)
{
- // slotNum is unused on all supported platforms.
- emitCmpHandle->recordRelocation(location, locationRW, target, fRelocType, /* slotNum */ 0, addlDelta);
+ emitCmpHandle->recordRelocation(location, locationRW, target, fRelocType, addlDelta);
}
#if defined(LATE_DISASM)
codeGen->getDisAssembler().disRecordRelocation((size_t)location, (size_t)target);
diff --git a/src/coreclr/jit/emitarm64.cpp b/src/coreclr/jit/emitarm64.cpp
index 8cb8d54513b31..50fd567e8a9fc 100644
--- a/src/coreclr/jit/emitarm64.cpp
+++ b/src/coreclr/jit/emitarm64.cpp
@@ -10222,10 +10222,10 @@ BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i)
assert(fmt == IF_BI_0A);
assert((distVal & 1) == 0);
- code_t code = emitInsCode(ins, fmt);
- const bool recordRelocation = emitComp->opts.compReloc && emitJumpCrossHotColdBoundary(srcOffs, dstOffs);
+ code_t code = emitInsCode(ins, fmt);
+ const bool doRecordRelocation = emitComp->opts.compReloc && emitJumpCrossHotColdBoundary(srcOffs, dstOffs);
- if (recordRelocation)
+ if (doRecordRelocation)
{
// dst isn't an actual final target location, just some intermediate
// location. Thus we cannot make any guarantees about distVal (not
@@ -10246,7 +10246,7 @@ BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i)
const unsigned instrSize = emitOutput_Instr(dst, code);
- if (recordRelocation)
+ if (doRecordRelocation)
{
assert(id->idjKeepLong);
if (emitComp->info.compMatchedVM)
diff --git a/src/coreclr/jit/forwardsub.cpp b/src/coreclr/jit/forwardsub.cpp
index eea19cabfdc8e..1f05ff1929e87 100644
--- a/src/coreclr/jit/forwardsub.cpp
+++ b/src/coreclr/jit/forwardsub.cpp
@@ -652,6 +652,14 @@ bool Compiler::fgForwardSubStatement(Statement* stmt)
//
// if the next tree can't change the value of fwdSubNode or be impacted by fwdSubNode effects
//
+ if (((fsv.GetFlags() & GTF_ASG) != 0) && fgForwardSubHasStoreInterference(stmt, nextStmt, fsv.GetNode()))
+ {
+ // We execute a store before the substitution local; that
+ // store could interfere with some of the locals in the source of
+ // the candidate def.
+ JITDUMP(" cannot reorder with potential interfering store\n");
+ return false;
+ }
if (((fwdSubNode->gtFlags & GTF_CALL) != 0) && ((fsv.GetFlags() & GTF_ALL_EFFECT) != 0))
{
JITDUMP(" cannot reorder call with any side effect\n");
@@ -871,6 +879,69 @@ bool Compiler::fgForwardSubStatement(Statement* stmt)
return true;
}
+//------------------------------------------------------------------------
+// fgForwardSubHasStoreInterference: Check if a forward sub candidate
+// interferes with stores in the statement it may be substituted into.
+//
+// Arguments:
+// defStmt - The statement with the def
+// nextStmt - The statement that is being substituted into
+// nextStmtUse - Use of the local being substituted in the next statement
+//
+// Returns:
+// True if there is interference.
+//
+// Remarks:
+// We expect the caller to have checked for GTF_ASG before doing the precise
+// check here.
+//
+bool Compiler::fgForwardSubHasStoreInterference(Statement* defStmt, Statement* nextStmt, GenTree* nextStmtUse)
+{
+ assert(defStmt->GetRootNode()->OperIsLocalStore());
+ assert(nextStmtUse->OperIsLocalRead());
+
+ GenTreeLclVarCommon* defNode = defStmt->GetRootNode()->AsLclVarCommon();
+
+ for (GenTreeLclVarCommon* defStmtLcl : defStmt->LocalsTreeList())
+ {
+ if (defStmtLcl == defNode)
+ {
+ break;
+ }
+
+ unsigned defStmtLclNum = defStmtLcl->GetLclNum();
+ LclVarDsc* defStmtLclDsc = lvaGetDesc(defStmtLclNum);
+ unsigned defStmtParentLclNum = BAD_VAR_NUM;
+ if (defStmtLclDsc->lvIsStructField)
+ {
+ defStmtParentLclNum = defStmtLclDsc->lvParentLcl;
+ }
+
+ for (GenTreeLclVarCommon* useStmtLcl : nextStmt->LocalsTreeList())
+ {
+ if (useStmtLcl == nextStmtUse)
+ {
+ break;
+ }
+
+ if (!useStmtLcl->OperIsLocalStore())
+ {
+ continue;
+ }
+
+ // If the next statement has a store earlier than the use and that
+ // store affects a local on the RHS of the forward sub candidate,
+ // then we have interference.
+ if ((useStmtLcl->GetLclNum() == defStmtLclNum) || (useStmtLcl->GetLclNum() == defStmtParentLclNum))
+ {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
//------------------------------------------------------------------------
// fgForwardSubUpdateLiveness: correct liveness after performing a forward
// substitution that added a new sub list of locals in a statement.
diff --git a/src/coreclr/jit/gentree.cpp b/src/coreclr/jit/gentree.cpp
index f6089645f8c41..c568deaa7467f 100644
--- a/src/coreclr/jit/gentree.cpp
+++ b/src/coreclr/jit/gentree.cpp
@@ -18194,9 +18194,7 @@ CORINFO_CLASS_HANDLE Compiler::gtGetHelperCallClassHandle(GenTreeCall* call, boo
// for the result, unless it is an interface type.
//
// TODO-CQ: when we have default interface methods then
- // this might not be the best assumption. We could also
- // explore calling something like mergeClasses to identify
- // the more specific class. A similar issue arises when
+ // this might not be the best assumption. A similar issue arises when
// typing the temp in impCastClassOrIsInstToTree, when we
// expand the cast inline.
if (castHnd != nullptr)
diff --git a/src/coreclr/jit/jitee.h b/src/coreclr/jit/jitee.h
index ac51df952ab42..27963ac356efb 100644
--- a/src/coreclr/jit/jitee.h
+++ b/src/coreclr/jit/jitee.h
@@ -1,109 +1,56 @@
// Licensed to the .NET Foundation under one or more agreements.
// The .NET Foundation licenses this file to you under the MIT license.
-// This class wraps the CORJIT_FLAGS type in the JIT-EE interface (in corjit.h) such that the JIT can
-// build with either the old flags (COR_JIT_EE_VERSION <= 460) or the new flags (COR_JIT_EE_VERSION > 460).
-// It actually is exactly the same as the new definition, and must be kept up-to-date with the new definition.
-// When built against an old JIT-EE interface, the old flags are converted into this structure.
+// This class wraps the CORJIT_FLAGS type in the JIT-EE interface (in corjit.h).
+// If this changes, also change spmidumphelper.cpp.
class JitFlags
{
public:
// clang-format off
enum JitFlag
{
- JIT_FLAG_SPEED_OPT = 0,
- JIT_FLAG_SIZE_OPT = 1,
+ JIT_FLAG_SPEED_OPT = 0, // optimize for speed
+ JIT_FLAG_SIZE_OPT = 1, // optimize for code size
JIT_FLAG_DEBUG_CODE = 2, // generate "debuggable" code (no code-mangling optimizations)
JIT_FLAG_DEBUG_EnC = 3, // We are in Edit-n-Continue mode
JIT_FLAG_DEBUG_INFO = 4, // generate line and local-var info
JIT_FLAG_MIN_OPT = 5, // disable all jit optimizations (not necessarily debuggable code)
JIT_FLAG_ENABLE_CFG = 6, // generate CFG enabled code
- JIT_FLAG_MCJIT_BACKGROUND = 7, // Calling from multicore JIT background thread, do not call JitComplete
-
- #if defined(TARGET_X86)
- JIT_FLAG_PINVOKE_RESTORE_ESP = 8, // Restore ESP after returning from inlined PInvoke
- #else // !defined(TARGET_X86)
- JIT_FLAG_UNUSED2 = 8,
- #endif // !defined(TARGET_X86)
-
- JIT_FLAG_UNUSED3 = 9,
- JIT_FLAG_UNUSED4 = 10,
- JIT_FLAG_UNUSED5 = 11,
- JIT_FLAG_UNUSED6 = 12,
-
- JIT_FLAG_OSR = 13, // Generate alternate version for On Stack Replacement
-
- JIT_FLAG_ALT_JIT = 14, // JIT should consider itself an ALT_JIT
- JIT_FLAG_FROZEN_ALLOC_ALLOWED = 15, // JIT is allowed to use *_MAYBEFROZEN allocators
- JIT_FLAG_UNUSED9 = 16,
-
- #if defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64)
- JIT_FLAG_FEATURE_SIMD = 17,
- #else
- JIT_FLAG_UNUSED10 = 17,
- #endif // !(defined(TARGET_X86) || defined(TARGET_AMD64) || defined(TARGET_ARM64))
-
- JIT_FLAG_MAKEFINALCODE = 18, // Use the final code generator, i.e., not the interpreter.
- JIT_FLAG_READYTORUN = 19, // Use version-resilient code generation
- JIT_FLAG_PROF_ENTERLEAVE = 20, // Instrument prologues/epilogues
- JIT_FLAG_UNUSED11 = 21,
- JIT_FLAG_PROF_NO_PINVOKE_INLINE = 22, // Disables PInvoke inlining
- JIT_FLAG_UNUSED12 = 23,
- JIT_FLAG_PREJIT = 24, // jit or prejit is the execution engine.
- JIT_FLAG_RELOC = 25, // Generate relocatable code
- JIT_FLAG_IMPORT_ONLY = 26, // Only import the function
- JIT_FLAG_IL_STUB = 27, // method is an IL stub
- JIT_FLAG_PROCSPLIT = 28, // JIT should separate code into hot and cold sections
- JIT_FLAG_BBINSTR = 29, // Collect basic block profile information
- JIT_FLAG_BBOPT = 30, // Optimize method based on profile information
- JIT_FLAG_FRAMED = 31, // All methods have an EBP frame
- JIT_FLAG_BBINSTR_IF_LOOPS = 32, // JIT must instrument current method if it has loops
- JIT_FLAG_PUBLISH_SECRET_PARAM = 33, // JIT must place stub secret param into local 0. (used by IL stubs)
- JIT_FLAG_UNUSED13 = 34,
- JIT_FLAG_UNUSED14 = 35,
- JIT_FLAG_USE_PINVOKE_HELPERS = 36, // The JIT should use the PINVOKE_{BEGIN,END} helpers instead of emitting inline transitions
- JIT_FLAG_REVERSE_PINVOKE = 37, // The JIT should insert REVERSE_PINVOKE_{ENTER,EXIT} helpers into method prolog/epilog
- JIT_FLAG_TRACK_TRANSITIONS = 38, // The JIT should insert the helper variants that track transitions.
- JIT_FLAG_TIER0 = 39, // This is the initial tier for tiered compilation which should generate code as quickly as possible
- JIT_FLAG_TIER1 = 40, // This is the final tier (for now) for tiered compilation which should generate high quality code
+ JIT_FLAG_OSR = 7, // Generate alternate version for On Stack Replacement
+ JIT_FLAG_ALT_JIT = 8, // JIT should consider itself an ALT_JIT
+ JIT_FLAG_FROZEN_ALLOC_ALLOWED = 9, // JIT is allowed to use *_MAYBEFROZEN allocators
+ JIT_FLAG_MAKEFINALCODE = 10, // Use the final code generator, i.e., not the interpreter.
+ JIT_FLAG_READYTORUN = 11, // Use version-resilient code generation
+ JIT_FLAG_PROF_ENTERLEAVE = 12, // Instrument prologues/epilogues
+ JIT_FLAG_PROF_NO_PINVOKE_INLINE = 13, // Disables PInvoke inlining
+ JIT_FLAG_PREJIT = 14, // prejit is the execution engine.
+ JIT_FLAG_RELOC = 15, // Generate relocatable code
+ JIT_FLAG_IL_STUB = 16, // method is an IL stub
+ JIT_FLAG_PROCSPLIT = 17, // JIT should separate code into hot and cold sections
+ JIT_FLAG_BBINSTR = 18, // Collect basic block profile information
+ JIT_FLAG_BBINSTR_IF_LOOPS = 19, // JIT must instrument current method if it has loops
+ JIT_FLAG_BBOPT = 20, // Optimize method based on profile information
+ JIT_FLAG_FRAMED = 21, // All methods have an EBP frame
+ JIT_FLAG_PUBLISH_SECRET_PARAM = 22, // JIT must place stub secret param into local 0. (used by IL stubs)
+ JIT_FLAG_USE_PINVOKE_HELPERS = 23, // The JIT should use the PINVOKE_{BEGIN,END} helpers instead of emitting inline transitions
+ JIT_FLAG_REVERSE_PINVOKE = 24, // The JIT should insert REVERSE_PINVOKE_{ENTER,EXIT} helpers into method prolog/epilog
+ JIT_FLAG_TRACK_TRANSITIONS = 25, // The JIT should insert the helper variants that track transitions.
+ JIT_FLAG_TIER0 = 26, // This is the initial tier for tiered compilation which should generate code as quickly as possible
+ JIT_FLAG_TIER1 = 27, // This is the final tier (for now) for tiered compilation which should generate high quality code
+ JIT_FLAG_NO_INLINING = 28, // JIT should not inline any called method into this method
#if defined(TARGET_ARM)
- JIT_FLAG_RELATIVE_CODE_RELOCS = 41, // JIT should generate PC-relative address computations instead of EE relocation records
-#else // !defined(TARGET_ARM)
- JIT_FLAG_UNUSED15 = 41,
-#endif // !defined(TARGET_ARM)
-
- JIT_FLAG_NO_INLINING = 42, // JIT should not inline any called method into this method
-
-#if defined(TARGET_ARM)
- JIT_FLAG_SOFTFP_ABI = 43, // On ARM should enable armel calling convention
-#elif defined(TARGET_XARCH)
- JIT_FLAG_VECTOR512_THROTTLING = 43, // On Xarch, 512-bit vector usage may incur CPU frequency throttling
-#else
- JIT_FLAG_UNUSED16 = 43,
+ JIT_FLAG_RELATIVE_CODE_RELOCS = 29, // JIT should generate PC-relative address computations instead of EE relocation records
+ JIT_FLAG_SOFTFP_ABI = 30, // Enable armel calling convention
#endif
- JIT_FLAG_UNUSED17 = 44,
- JIT_FLAG_UNUSED18 = 45,
- JIT_FLAG_UNUSED19 = 46,
- JIT_FLAG_UNUSED20 = 47,
- JIT_FLAG_UNUSED21 = 48,
- JIT_FLAG_UNUSED22 = 49,
- JIT_FLAG_UNUSED23 = 50,
- JIT_FLAG_UNUSED24 = 51,
- JIT_FLAG_UNUSED25 = 52,
- JIT_FLAG_UNUSED26 = 53,
- JIT_FLAG_UNUSED27 = 54,
- JIT_FLAG_UNUSED28 = 55,
- JIT_FLAG_UNUSED29 = 56,
- JIT_FLAG_UNUSED30 = 57,
- JIT_FLAG_UNUSED31 = 58,
- JIT_FLAG_UNUSED32 = 59,
- JIT_FLAG_UNUSED33 = 60,
- JIT_FLAG_UNUSED34 = 61,
- JIT_FLAG_UNUSED35 = 62,
- JIT_FLAG_UNUSED36 = 63
+#if defined(TARGET_XARCH)
+ JIT_FLAG_VECTOR512_THROTTLING = 31, // On Xarch, 512-bit vector usage may incur CPU frequency throttling
+#endif
+ // Note: the mcs tool uses the currently unused upper flags bits when outputting SuperPMI MC file flags.
+ // See EXTRA_JIT_FLAGS and spmidumphelper.cpp. Currently, these are bits 56 through 63. If they overlap,
+ // something needs to change.
};
// clang-format on
@@ -171,12 +118,7 @@ class JitFlags
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_DEBUG_INFO, JIT_FLAG_DEBUG_INFO);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_MIN_OPT, JIT_FLAG_MIN_OPT);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_ENABLE_CFG, JIT_FLAG_ENABLE_CFG);
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_MCJIT_BACKGROUND, JIT_FLAG_MCJIT_BACKGROUND);
-
-#if defined(TARGET_X86)
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_PINVOKE_RESTORE_ESP, JIT_FLAG_PINVOKE_RESTORE_ESP);
-#endif
-
+ FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_OSR, JIT_FLAG_OSR);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_ALT_JIT, JIT_FLAG_ALT_JIT);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_FROZEN_ALLOC_ALLOWED, JIT_FLAG_FROZEN_ALLOC_ALLOWED);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_MAKEFINALCODE, JIT_FLAG_MAKEFINALCODE);
@@ -194,17 +136,18 @@ class JitFlags
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_PUBLISH_SECRET_PARAM, JIT_FLAG_PUBLISH_SECRET_PARAM);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_USE_PINVOKE_HELPERS, JIT_FLAG_USE_PINVOKE_HELPERS);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_REVERSE_PINVOKE, JIT_FLAG_REVERSE_PINVOKE);
+ FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_TRACK_TRANSITIONS, JIT_FLAG_TRACK_TRANSITIONS);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_TIER0, JIT_FLAG_TIER0);
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_TIER1, JIT_FLAG_TIER1);
+ FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_NO_INLINING, JIT_FLAG_NO_INLINING);
#if defined(TARGET_ARM)
FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_RELATIVE_CODE_RELOCS, JIT_FLAG_RELATIVE_CODE_RELOCS);
+ FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_SOFTFP_ABI, JIT_FLAG_SOFTFP_ABI);
#endif // TARGET_ARM
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_NO_INLINING, JIT_FLAG_NO_INLINING);
-
-#if defined(TARGET_ARM)
- FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_SOFTFP_ABI, JIT_FLAG_SOFTFP_ABI);
+#if defined(TARGET_X86) || defined(TARGET_AMD64)
+ FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_VECTOR512_THROTTLING, JIT_FLAG_VECTOR512_THROTTLING);
#endif // TARGET_ARM
#undef FLAGS_EQUAL
diff --git a/src/coreclr/jit/lowerarmarch.cpp b/src/coreclr/jit/lowerarmarch.cpp
index 64826c428b77f..25d07c53e0929 100644
--- a/src/coreclr/jit/lowerarmarch.cpp
+++ b/src/coreclr/jit/lowerarmarch.cpp
@@ -2579,7 +2579,7 @@ void Lowering::TryLowerCselToCinvOrCneg(GenTreeOp* select, GenTree* cond)
assert(trueVal->OperIs(GT_NOT, GT_NEG) || falseVal->OperIs(GT_NOT, GT_NEG));
- if (trueVal->OperIs(GT_NOT) || trueVal->OperIs(GT_NEG))
+ if ((isCneg && trueVal->OperIs(GT_NEG)) || (!isCneg && trueVal->OperIs(GT_NOT)))
{
shouldReverseCondition = true;
invertedOrNegatedVal = trueVal->gtGetOp1();
diff --git a/src/coreclr/jit/promotion.cpp b/src/coreclr/jit/promotion.cpp
index 277f5fb9a29a7..ee2ba01e1bb0a 100644
--- a/src/coreclr/jit/promotion.cpp
+++ b/src/coreclr/jit/promotion.cpp
@@ -202,13 +202,28 @@ bool AggregateInfo::OverlappingReplacements(unsigned offset,
return true;
}
+struct PrimitiveAccess
+{
+ weight_t CountWtd = 0;
+ unsigned Offset;
+ var_types AccessType;
+
+ INDEBUG(unsigned Count = 0);
+
+ PrimitiveAccess(unsigned offset, var_types accessType) : Offset(offset), AccessType(accessType)
+ {
+ }
+};
+
// Tracks all the accesses into one particular struct local.
class LocalUses
{
- jitstd::vector m_accesses;
+ jitstd::vector m_accesses;
+ jitstd::vector m_inducedAccesses;
public:
- LocalUses(Compiler* comp) : m_accesses(comp->getAllocator(CMK_Promotion))
+ LocalUses(Compiler* comp)
+ : m_accesses(comp->getAllocator(CMK_Promotion)), m_inducedAccesses(comp->getAllocator(CMK_Promotion))
{
}
@@ -307,6 +322,56 @@ class LocalUses
#endif
}
+ //------------------------------------------------------------------------
+ // RecordInducedAccess:
+ // Record an induced access into this local with the specified offset and access type.
+ //
+ // Parameters:
+ // offs - The offset being accessed
+ // accessType - The type of the access
+ // weight - Weight of the block containing the access
+ //
+ // Remarks:
+ // Induced accesses are accesses that are induced by physical promotion
+ // due to assignment decompositon. They are always of primitive type.
+ //
+ void RecordInducedAccess(unsigned offs, var_types accessType, weight_t weight)
+ {
+ PrimitiveAccess* access = nullptr;
+
+ size_t index = 0;
+ if (m_inducedAccesses.size() > 0)
+ {
+ index = Promotion::BinarySearch(m_inducedAccesses, offs);
+ if ((ssize_t)index >= 0)
+ {
+ do
+ {
+ PrimitiveAccess& candidateAccess = m_inducedAccesses[index];
+ if (candidateAccess.AccessType == accessType)
+ {
+ access = &candidateAccess;
+ break;
+ }
+
+ index++;
+ } while (index < m_inducedAccesses.size() && m_inducedAccesses[index].Offset == offs);
+ }
+ else
+ {
+ index = ~index;
+ }
+ }
+
+ if (access == nullptr)
+ {
+ access = &*m_inducedAccesses.insert(m_inducedAccesses.begin() + index, PrimitiveAccess(offs, accessType));
+ }
+
+ access->CountWtd += weight;
+ INDEBUG(access->Count++);
+ }
+
//------------------------------------------------------------------------
// PickPromotions:
// Pick specific replacements to make for this struct local after a set
@@ -336,33 +401,121 @@ class LocalUses
continue;
}
- if (!EvaluateReplacement(comp, lclNum, access))
+ if (!EvaluateReplacement(comp, lclNum, access, 0))
{
continue;
}
-#ifdef DEBUG
- char buf[32];
- sprintf_s(buf, sizeof(buf), "V%02u.[%03u..%03u)", lclNum, access.Offset,
- access.Offset + genTypeSize(access.AccessType));
- size_t len = strlen(buf) + 1;
- char* bufp = new (comp, CMK_DebugOnly) char[len];
- strcpy_s(bufp, len, buf);
-#endif
- unsigned newLcl = comp->lvaGrabTemp(false DEBUGARG(bufp));
- LclVarDsc* dsc = comp->lvaGetDesc(newLcl);
- dsc->lvType = access.AccessType;
+ if (*aggregateInfo == nullptr)
+ {
+ *aggregateInfo = new (comp, CMK_Promotion) AggregateInfo(comp->getAllocator(CMK_Promotion), lclNum);
+ }
+
+ (*aggregateInfo)->Replacements.push_back(Replacement(access.Offset, access.AccessType));
+ }
+
+ JITDUMP("\n");
+ }
+
+ //------------------------------------------------------------------------
+ // PickInducedPromotions:
+ // Pick additional promotions to make based on the fact that some
+ // accesses will be induced by assignment decomposition.
+ //
+ // Parameters:
+ // comp - Compiler instance
+ // lclNum - Local num for this struct local
+ // aggregateInfo - [out] Pointer to aggregate info to create and insert replacements into.
+ //
+ bool PickInducedPromotions(Compiler* comp, unsigned lclNum, AggregateInfo** aggregateInfo)
+ {
+ if (m_inducedAccesses.size() <= 0)
+ {
+ return false;
+ }
+
+ bool any = false;
+ JITDUMP("Picking induced promotions for V%02u\n", lclNum);
+ for (PrimitiveAccess& inducedAccess : m_inducedAccesses)
+ {
+ bool overlapsOtherInducedAccess = false;
+ for (PrimitiveAccess& otherInducedAccess : m_inducedAccesses)
+ {
+ if (&otherInducedAccess == &inducedAccess)
+ {
+ continue;
+ }
+
+ if (inducedAccess.Offset + genTypeSize(inducedAccess.AccessType) <= otherInducedAccess.Offset)
+ {
+ break;
+ }
+
+ if (otherInducedAccess.Offset + genTypeSize(otherInducedAccess.AccessType) <= inducedAccess.Offset)
+ {
+ continue;
+ }
+
+ overlapsOtherInducedAccess = true;
+ break;
+ }
+
+ if (overlapsOtherInducedAccess)
+ {
+ continue;
+ }
+
+ Access* access = FindAccess(inducedAccess.Offset, inducedAccess.AccessType);
+
+ if (access == nullptr)
+ {
+ Access fakeAccess(inducedAccess.Offset, inducedAccess.AccessType, nullptr);
+ if (!EvaluateReplacement(comp, lclNum, fakeAccess, inducedAccess.CountWtd))
+ {
+ continue;
+ }
+ }
+ else
+ {
+ if (!EvaluateReplacement(comp, lclNum, *access, inducedAccess.CountWtd))
+ {
+ continue;
+ }
+ }
if (*aggregateInfo == nullptr)
{
*aggregateInfo = new (comp, CMK_Promotion) AggregateInfo(comp->getAllocator(CMK_Promotion), lclNum);
}
+ size_t insertionIndex;
+ if ((*aggregateInfo)->Replacements.size() > 0)
+ {
+#ifdef DEBUG
+ Replacement* overlapRep;
+ assert(!(*aggregateInfo)
+ ->OverlappingReplacements(inducedAccess.Offset, genTypeSize(inducedAccess.AccessType),
+ &overlapRep, nullptr));
+#endif
+
+ insertionIndex =
+ Promotion::BinarySearch((*aggregateInfo)->Replacements,
+ inducedAccess.Offset);
+ assert((ssize_t)insertionIndex < 0);
+ insertionIndex = ~insertionIndex;
+ }
+ else
+ {
+ insertionIndex = 0;
+ }
+
(*aggregateInfo)
- ->Replacements.push_back(Replacement(access.Offset, access.AccessType, newLcl DEBUGARG(bufp)));
+ ->Replacements.insert((*aggregateInfo)->Replacements.begin() + insertionIndex,
+ Replacement(inducedAccess.Offset, inducedAccess.AccessType));
+ any = true;
}
- JITDUMP("\n");
+ return any;
}
//------------------------------------------------------------------------
@@ -370,14 +523,15 @@ class LocalUses
// Evaluate legality and profitability of a single replacement candidate.
//
// Parameters:
- // comp - Compiler instance
- // lclNum - Local num for this struct local
- // access - Access information for the candidate.
+ // comp - Compiler instance
+ // lclNum - Local num for this struct local
+ // access - Access information for the candidate.
+ // inducedCountWtd - Additional weighted count due to induced accesses.
//
// Returns:
// True if we should promote this access and create a replacement; otherwise false.
//
- bool EvaluateReplacement(Compiler* comp, unsigned lclNum, const Access& access)
+ bool EvaluateReplacement(Compiler* comp, unsigned lclNum, const Access& access, weight_t inducedCountWtd)
{
weight_t countOverlappedCallArgWtd = 0;
weight_t countOverlappedRetbufsWtd = 0;
@@ -387,7 +541,9 @@ class LocalUses
for (const Access& otherAccess : m_accesses)
{
if (&otherAccess == &access)
+ {
continue;
+ }
if (!otherAccess.Overlaps(access.Offset, genTypeSize(access.AccessType)))
{
@@ -407,14 +563,14 @@ class LocalUses
weight_t costWithout = 0;
// We cost any normal access (which is a struct load or store) without promotion at 3 cycles.
- costWithout += access.CountWtd * 3;
+ costWithout += (access.CountWtd + inducedCountWtd) * 3;
weight_t costWith = 0;
// For promoted accesses we expect these to turn into reg-reg movs (and in many cases be fully contained in the
// parent).
// We cost these at 0.5 cycles.
- costWith += access.CountWtd * 0.5;
+ costWith += (access.CountWtd + inducedCountWtd) * 0.5;
// Now look at the overlapping struct uses that promotion will make more expensive.
@@ -501,8 +657,24 @@ class LocalUses
return false;
}
+ //------------------------------------------------------------------------
+ // ClearInducedAccesses:
+ // Clear the stored induced access metrics.
+ //
+ void ClearInducedAccesses()
+ {
+ m_inducedAccesses.clear();
+ }
+
#ifdef DEBUG
- void Dump(unsigned lclNum)
+ //------------------------------------------------------------------------
+ // DumpAccesses:
+ // Dump the stored access metrics for a specified local.
+ //
+ // Parameters:
+ // lclNum - The local
+ //
+ void DumpAccesses(unsigned lclNum)
{
if (m_accesses.size() <= 0)
{
@@ -535,15 +707,85 @@ class LocalUses
access.CountReturnsWtd);
}
}
+
+ //------------------------------------------------------------------------
+ // DumpInducedAccesses:
+ // Dump induced accesses for a specified struct local.
+ //
+ // Parameters:
+ // lclNum - The local
+ //
+ void DumpInducedAccesses(unsigned lclNum)
+ {
+ if (m_inducedAccesses.size() <= 0)
+ {
+ return;
+ }
+
+ printf("Induced accesses for V%02u\n", lclNum);
+ for (PrimitiveAccess& access : m_inducedAccesses)
+ {
+ printf(" %s @ %03u\n", varTypeName(access.AccessType), access.Offset);
+ printf(" #: (%u, " FMT_WT ")\n", access.Count, access.CountWtd);
+ }
+ }
#endif
+
+private:
+ //------------------------------------------------------------------------
+ // FindAccess:
+ // Find access metrics information for the specified offset and access type.
+ //
+ // Parameters:
+ // offs - The offset
+ // accessType - Access type
+ //
+ // Returns:
+ // Pointer to a matching access, or nullptr if no match was found.
+ //
+ Access* FindAccess(unsigned offs, var_types accessType)
+ {
+ if (m_accesses.size() <= 0)
+ {
+ return nullptr;
+ }
+
+ size_t index = Promotion::BinarySearch(m_accesses, offs);
+ if ((ssize_t)index < 0)
+ {
+ return nullptr;
+ }
+
+ do
+ {
+ Access& candidateAccess = m_accesses[index];
+ if (candidateAccess.AccessType == accessType)
+ {
+ return &candidateAccess;
+ }
+
+ index++;
+ } while ((index < m_inducedAccesses.size()) && (m_inducedAccesses[index].Offset == offs));
+
+ return nullptr;
+ }
+};
+
+// Struct used to save all struct stores involving physical promotion candidates.
+// These stores can induce new field accesses as part of assignment decomposition.
+struct CandidateStore
+{
+ GenTreeLclVarCommon* Store;
+ BasicBlock* Block;
};
// Visitor that records information about uses of struct locals.
class LocalsUseVisitor : public GenTreeVisitor
{
- Promotion* m_prom;
- LocalUses** m_uses;
- BasicBlock* m_curBB = nullptr;
+ Promotion* m_prom;
+ LocalUses** m_uses;
+ BasicBlock* m_curBB = nullptr;
+ ArrayStack m_candidateStores;
public:
enum
@@ -552,7 +794,10 @@ class LocalsUseVisitor : public GenTreeVisitor
ComputeStack = true,
};
- LocalsUseVisitor(Promotion* prom) : GenTreeVisitor(prom->m_compiler), m_prom(prom)
+ LocalsUseVisitor(Promotion* prom)
+ : GenTreeVisitor(prom->m_compiler)
+ , m_prom(prom)
+ , m_candidateStores(prom->m_compiler->getAllocator(CMK_Promotion))
{
m_uses = new (prom->m_compiler, CMK_Promotion) LocalUses*[prom->m_compiler->lvaCount]{};
}
@@ -570,21 +815,16 @@ class LocalsUseVisitor : public GenTreeVisitor
}
//------------------------------------------------------------------------
- // GetUsesByLocal:
- // Get the uses information for a specified local.
+ // PreOrderVisit:
+ // Visit a node in preorder and add its use information to the metrics.
//
// Parameters:
- // bb - The current basic block.
+ // use - The use edge
+ // user - The user
//
// Returns:
- // Information about uses, or null if this local has no uses information
- // associated with it.
+ // Visitor result
//
- LocalUses* GetUsesByLocal(unsigned lcl)
- {
- return m_uses[lcl];
- }
-
fgWalkResult PreOrderVisit(GenTree** use, GenTree* user)
{
GenTree* tree = *use;
@@ -629,6 +869,16 @@ class LocalsUseVisitor : public GenTreeVisitor
accessType = lcl->TypeGet();
accessLayout = accessType == TYP_STRUCT ? lcl->GetLayout(m_compiler) : nullptr;
accessFlags = ClassifyLocalAccess(lcl, effectiveUser);
+
+ if (lcl->TypeIs(TYP_STRUCT) &&
+ ((user != nullptr) && user->OperIsLocalStore() && user->Data()->OperIsLocalRead()))
+ {
+ // Make sure we add it only once if both the destination and source are candidates.
+ if ((m_candidateStores.Height() <= 0) || (m_candidateStores.Top().Store != user))
+ {
+ m_candidateStores.Push(CandidateStore{user->AsLclVarCommon(), m_curBB});
+ }
+ }
}
LocalUses* uses = GetOrCreateUses(lcl->GetLclNum());
@@ -640,6 +890,200 @@ class LocalsUseVisitor : public GenTreeVisitor
return fgWalkResult::WALK_CONTINUE;
}
+ //------------------------------------------------------------------------
+ // PickPromotions:
+ // Pick promotions and create aggregate information for each promoted
+ // struct with promotions.
+ //
+ // Parameters:
+ // aggregates - Appropriately sized vector to create aggregate information in.
+ //
+ // Returns:
+ // True if any struct was physically promoted with at least one replacement;
+ // otherwise false.
+ //
+ bool PickPromotions(jitstd::vector& aggregates)
+ {
+ unsigned numLocals = (unsigned)aggregates.size();
+ JITDUMP("Picking promotions\n");
+
+ bool any = false;
+
+ for (unsigned lclNum = 0; lclNum < numLocals; lclNum++)
+ {
+ LocalUses* uses = m_uses[lclNum];
+ if (uses == nullptr)
+ {
+ continue;
+ }
+
+#ifdef DEBUG
+ if (m_compiler->verbose)
+ {
+ uses->DumpAccesses(lclNum);
+ }
+#endif
+
+ uses->PickPromotions(m_compiler, lclNum, &aggregates[lclNum]);
+
+ any |= aggregates[lclNum] != nullptr;
+ }
+
+ if (!any)
+ {
+ return false;
+ }
+
+ if (m_candidateStores.Height() > 0)
+ {
+ // Now look for induced accesses due to assignment decomposition.
+
+ JITDUMP("Looking for induced accesses with %d stores between candidates\n", m_candidateStores.Height());
+ // Expand the set of fields iteratively based on the current picked
+ // set. We put a limit on this fixpoint computation to avoid
+ // pathological cases. From measurements no methods in our own
+ // collections need more than 10 iterations and 99.5% of methods
+ // need fewer than 5 iterations.
+ for (int iters = 0; iters < 10; iters++)
+ {
+ for (int i = 0; i < m_candidateStores.Height(); i++)
+ {
+ const CandidateStore& candidateStore = m_candidateStores.BottomRef(i);
+ GenTreeLclVarCommon* store = candidateStore.Store;
+
+ assert(store->TypeIs(TYP_STRUCT));
+ assert(store->Data()->OperIsLocalRead());
+
+ GenTreeLclVarCommon* src = store->Data()->AsLclVarCommon();
+
+ LclVarDsc* dstDsc = m_compiler->lvaGetDesc(store);
+ LclVarDsc* srcDsc = m_compiler->lvaGetDesc(src);
+
+ assert(Promotion::IsCandidateForPhysicalPromotion(dstDsc) ||
+ Promotion::IsCandidateForPhysicalPromotion(srcDsc));
+
+ if (dstDsc->lvPromoted)
+ {
+ InduceAccessesFromRegularlyPromotedStruct(aggregates, src, store, candidateStore.Block);
+ }
+ else if (srcDsc->lvPromoted)
+ {
+ InduceAccessesFromRegularlyPromotedStruct(aggregates, store, src, candidateStore.Block);
+ }
+ else
+ {
+ if (Promotion::IsCandidateForPhysicalPromotion(dstDsc))
+ {
+ InduceAccessesInCandidate(aggregates, store, src, candidateStore.Block);
+ }
+
+ if (Promotion::IsCandidateForPhysicalPromotion(srcDsc))
+ {
+ InduceAccessesInCandidate(aggregates, src, store, candidateStore.Block);
+ }
+ }
+ }
+
+ bool any = false;
+ for (unsigned lclNum = 0; lclNum < numLocals; lclNum++)
+ {
+ LocalUses* uses = m_uses[lclNum];
+ if (uses == nullptr)
+ {
+ continue;
+ }
+#ifdef DEBUG
+ if (m_compiler->verbose)
+ {
+ uses->DumpInducedAccesses(lclNum);
+ }
+#endif
+
+ any |= uses->PickInducedPromotions(m_compiler, lclNum, &aggregates[lclNum]);
+ }
+
+ if (!any)
+ {
+ break;
+ }
+
+ for (unsigned lclNum = 0; lclNum < numLocals; lclNum++)
+ {
+ if (m_uses[lclNum] != nullptr)
+ {
+ m_uses[lclNum]->ClearInducedAccesses();
+ }
+ }
+ }
+ }
+
+ for (AggregateInfo* agg : aggregates)
+ {
+ if (agg == nullptr)
+ {
+ continue;
+ }
+
+ jitstd::vector& reps = agg->Replacements;
+
+ assert(reps.size() > 0);
+ // Create locals
+ for (Replacement& rep : reps)
+ {
+#ifdef DEBUG
+ char buf[32];
+ sprintf_s(buf, sizeof(buf), "V%02u.[%03u..%03u)", agg->LclNum, rep.Offset,
+ rep.Offset + genTypeSize(rep.AccessType));
+ size_t len = strlen(buf) + 1;
+ char* bufp = new (m_compiler, CMK_DebugOnly) char[len];
+ strcpy_s(bufp, len, buf);
+ rep.Description = bufp;
+#endif
+
+ rep.LclNum = m_compiler->lvaGrabTemp(false DEBUGARG(rep.Description));
+ LclVarDsc* dsc = m_compiler->lvaGetDesc(rep.LclNum);
+ dsc->lvType = rep.AccessType;
+ }
+
+#ifdef DEBUG
+ JITDUMP("V%02u promoted with %d replacements\n", agg->LclNum, (int)reps.size());
+ for (const Replacement& rep : reps)
+ {
+ JITDUMP(" [%03u..%03u) promoted as %s V%02u\n", rep.Offset, rep.Offset + genTypeSize(rep.AccessType),
+ varTypeName(rep.AccessType), rep.LclNum);
+ }
+#endif
+
+ JITDUMP("Computing unpromoted remainder for V%02u\n", agg->LclNum);
+ StructSegments unpromotedParts =
+ Promotion::SignificantSegments(m_compiler, m_compiler->lvaGetDesc(agg->LclNum)->GetLayout());
+ for (Replacement& rep : reps)
+ {
+ unpromotedParts.Subtract(StructSegments::Segment(rep.Offset, rep.Offset + genTypeSize(rep.AccessType)));
+ }
+
+ JITDUMP(" Remainder: ");
+ DBEXEC(m_compiler->verbose, unpromotedParts.Dump());
+ JITDUMP("\n\n");
+
+ StructSegments::Segment unpromotedSegment;
+ if (unpromotedParts.CoveringSegment(&unpromotedSegment))
+ {
+ agg->UnpromotedMin = unpromotedSegment.Start;
+ agg->UnpromotedMax = unpromotedSegment.End;
+ assert(unpromotedSegment.Start < unpromotedSegment.End);
+ }
+ else
+ {
+ // Aggregate is fully promoted, leave UnpromotedMin == UnpromotedMax to indicate this.
+ }
+
+ any = true;
+ }
+
+ return any;
+ }
+
private:
//------------------------------------------------------------------------
// GetOrCreateUses:
@@ -661,6 +1105,112 @@ class LocalsUseVisitor : public GenTreeVisitor
return m_uses[lclNum];
}
+ //------------------------------------------------------------------------
+ // InduceAccessesFromRegularlyPromotedStruct:
+ // Create induced accesses based on the fact that there is an assignment
+ // between a physical promotion candidate and regularly promoted struct.
+ //
+ // Parameters:
+ // aggregates - Aggregate information with current set of replacements
+ // for each struct local.
+ // candidateLcl - The local node for a physical promotion candidate.
+ // regPromLcl - The local node for the regularly promoted struct that
+ // may induce new LCL_FLD nodes in the candidate.
+ // block - The block that the assignment appears in.
+ //
+ void InduceAccessesFromRegularlyPromotedStruct(jitstd::vector& aggregates,
+ GenTreeLclVarCommon* candidateLcl,
+ GenTreeLclVarCommon* regPromLcl,
+ BasicBlock* block)
+ {
+ unsigned regPromOffs = regPromLcl->GetLclOffs();
+ unsigned candidateOffs = candidateLcl->GetLclOffs();
+ unsigned size = regPromLcl->GetLayout(m_compiler)->GetSize();
+
+ LclVarDsc* regPromDsc = m_compiler->lvaGetDesc(regPromLcl);
+ for (unsigned fieldLcl = regPromDsc->lvFieldLclStart, i = 0; i < regPromDsc->lvFieldCnt; fieldLcl++, i++)
+ {
+ LclVarDsc* fieldDsc = m_compiler->lvaGetDesc(fieldLcl);
+ if ((fieldDsc->lvFldOffset >= regPromOffs) &&
+ (fieldDsc->lvFldOffset + genTypeSize(fieldDsc->lvType) <= (regPromOffs + size)))
+ {
+ InduceAccess(aggregates, candidateLcl->GetLclNum(),
+ candidateLcl->GetLclOffs() + (fieldDsc->lvFldOffset - regPromOffs), fieldDsc->lvType,
+ block);
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ // InduceAccessesInCandidate:
+ // Create induced accesses based on the fact that a specified candidate
+ // is being assigned from another struct local (the inducer).
+ //
+ // Parameters:
+ // aggregates - Aggregate information with current set of replacements
+ // for each struct local.
+ // candidate - The local node for the physical promotion candidate.
+ // inducer - The local node that may induce new LCL_FLD nodes in the candidate.
+ // block - The block that the assignment appears in.
+ //
+ void InduceAccessesInCandidate(jitstd::vector& aggregates,
+ GenTreeLclVarCommon* candidate,
+ GenTreeLclVarCommon* inducer,
+ BasicBlock* block)
+ {
+ unsigned candOffs = candidate->GetLclOffs();
+ unsigned inducerOffs = inducer->GetLclOffs();
+ unsigned size = candidate->GetLayout(m_compiler)->GetSize();
+
+ AggregateInfo* inducerAgg = aggregates[inducer->GetLclNum()];
+ if (inducerAgg != nullptr)
+ {
+ Replacement* firstRep;
+ Replacement* endRep;
+ if (inducerAgg->OverlappingReplacements(inducerOffs, size, &firstRep, &endRep))
+ {
+ for (Replacement* rep = firstRep; rep < endRep; rep++)
+ {
+ if ((rep->Offset >= inducerOffs) &&
+ (rep->Offset + genTypeSize(rep->AccessType) <= (inducerOffs + size)))
+ {
+ InduceAccess(aggregates, candidate->GetLclNum(), candOffs + (rep->Offset - inducerOffs),
+ rep->AccessType, block);
+ }
+ }
+ }
+ }
+ }
+
+ //------------------------------------------------------------------------
+ // InduceAccessesInCandidate:
+ // Record an induced access in a candidate for physical promotion.
+ //
+ // Parameters:
+ // aggregates - Aggregate information with current set of replacements
+ // for each struct local.
+ // lclNum - Local that has the induced access.
+ // offset - Offset at which the induced access starts.
+ // type - Type of the induced access.
+ // block - The block with the induced access.
+ //
+ void InduceAccess(
+ jitstd::vector& aggregates, unsigned lclNum, unsigned offset, var_types type, BasicBlock* block)
+ {
+ AggregateInfo* agg = aggregates[lclNum];
+ if (agg != nullptr)
+ {
+ Replacement* overlapRep;
+ if (agg->OverlappingReplacements(offset, genTypeSize(type), &overlapRep, nullptr))
+ {
+ return;
+ }
+ }
+
+ LocalUses* uses = GetOrCreateUses(lclNum);
+ uses->RecordInducedAccess(offset, type, block->getBBWeight(m_compiler));
+ }
+
//------------------------------------------------------------------------
// ClassifyLocalAccess:
// Given a local node and its user, classify information about it.
@@ -1139,8 +1689,6 @@ StructSegments Promotion::SignificantSegments(Compiler* compiler,
}
#endif
- // TODO-TP: Cache this per class layout, we call this for every struct
- // operation on a promoted local.
return segments;
}
@@ -1163,6 +1711,11 @@ GenTree* Promotion::CreateWriteBack(Compiler* compiler, unsigned structLclNum, c
{
GenTree* value = compiler->gtNewLclVarNode(replacement.LclNum);
GenTree* store = compiler->gtNewStoreLclFldNode(structLclNum, replacement.AccessType, replacement.Offset, value);
+
+ if (!compiler->lvaGetDesc(structLclNum)->lvDoNotEnregister)
+ {
+ compiler->lvaSetVarDoNotEnregister(structLclNum DEBUGARG(DoNotEnregisterReason::LocalField));
+ }
return store;
}
@@ -1185,9 +1738,85 @@ GenTree* Promotion::CreateReadBack(Compiler* compiler, unsigned structLclNum, co
{
GenTree* value = compiler->gtNewLclFldNode(structLclNum, replacement.AccessType, replacement.Offset);
GenTree* store = compiler->gtNewStoreLclVarNode(replacement.LclNum, value);
+
+ if (!compiler->lvaGetDesc(structLclNum)->lvDoNotEnregister)
+ {
+ compiler->lvaSetVarDoNotEnregister(structLclNum DEBUGARG(DoNotEnregisterReason::LocalField));
+ }
return store;
}
+//------------------------------------------------------------------------
+// StartBlock:
+// Handle reaching the end of the currently started block by preparing
+// internal state for upcoming basic blocks, and inserting any necessary
+// readbacks.
+//
+// Parameters:
+// block - The block
+//
+void ReplaceVisitor::StartBlock(BasicBlock* block)
+{
+ m_currentBlock = block;
+
+#ifdef DEBUG
+ // At the start of every block we expect all replacements to be in their
+ // local home.
+ for (AggregateInfo* agg : m_aggregates)
+ {
+ if (agg == nullptr)
+ {
+ continue;
+ }
+
+ for (Replacement& rep : agg->Replacements)
+ {
+ assert(!rep.NeedsReadBack);
+ assert(rep.NeedsWriteBack);
+ }
+ }
+#endif
+
+ // OSR locals and parameters may need an initial read back, which we mark
+ // when we start the scratch BB.
+ if (!m_compiler->fgBBisScratch(block))
+ {
+ return;
+ }
+
+ for (AggregateInfo* agg : m_aggregates)
+ {
+ if (agg == nullptr)
+ {
+ continue;
+ }
+
+ LclVarDsc* dsc = m_compiler->lvaGetDesc(agg->LclNum);
+ if (!dsc->lvIsParam && !dsc->lvIsOSRLocal)
+ {
+ continue;
+ }
+
+ JITDUMP("Marking fields of %s V%02u as needing read-back in scratch " FMT_BB "\n",
+ dsc->lvIsParam ? "parameter" : "OSR-local", agg->LclNum, block->bbNum);
+
+ for (size_t i = 0; i < agg->Replacements.size(); i++)
+ {
+ Replacement& rep = agg->Replacements[i];
+ rep.NeedsWriteBack = false;
+ if (m_liveness->IsReplacementLiveIn(block, agg->LclNum, (unsigned)i))
+ {
+ rep.NeedsReadBack = true;
+ JITDUMP(" V%02u (%s) marked\n", rep.LclNum, rep.Description);
+ }
+ else
+ {
+ JITDUMP(" V%02u (%s) not marked (not live-in to scratch BB)\n", rep.LclNum, rep.Description);
+ }
+ }
+ }
+}
+
//------------------------------------------------------------------------
// EndBlock:
// Handle reaching the end of the currently started block by preparing
@@ -1229,11 +1858,27 @@ void ReplaceVisitor::EndBlock()
}
else
{
+ // We only mark fields as requiring read-back if they are
+ // live at the point where the stack local was written, so
+ // at first glance we would not expect this case to ever
+ // happen. However, it is possible that the field is live
+ // because it has a future struct use, in which case we may
+ // not need to insert any readbacks anywhere. For example,
+ // consider:
+ //
+ // V03 = CALL() // V03 is a struct with promoted V03.[000..008)
+ // CALL(struct V03) // V03.[000.008) marked as live here
+ //
+ // While V03.[000.008) gets marked for readback at the
+ // assignment, no readback is necessary at the location of
+ // the call argument, and it may die after that.
+
JITDUMP("Skipping reading back dead replacement V%02u.[%03u..%03u) -> V%02u near the end of " FMT_BB
"\n",
agg->LclNum, rep.Offset, rep.Offset + genTypeSize(rep.AccessType), rep.LclNum,
m_currentBlock->bbNum);
}
+
rep.NeedsReadBack = false;
}
@@ -1244,6 +1889,18 @@ void ReplaceVisitor::EndBlock()
m_hasPendingReadBacks = false;
}
+//------------------------------------------------------------------------
+// PostOrderVisit:
+// Visit a node in post-order and make necessary changes for promoted field
+// uses.
+//
+// Parameters:
+// use - The use edge
+// user - The user
+//
+// Returns:
+// Visitor result.
+//
Compiler::fgWalkResult ReplaceVisitor::PostOrderVisit(GenTree** use, GenTree* user)
{
GenTree* tree = *use;
@@ -1338,16 +1995,13 @@ GenTree** ReplaceVisitor::InsertMidTreeReadBacksIfNecessary(GenTree** use)
for (Replacement& rep : agg->Replacements)
{
- // TODO-CQ: We should ensure we do not mark dead fields as
- // requiring readback. Currently it is handled by querying liveness
- // as part of end-of-block readback insertion, but for these
- // mid-tree readbacks we cannot query liveness information for
- // arbitrary locals.
if (!rep.NeedsReadBack)
{
continue;
}
+ JITDUMP(" V%02.[%03u..%03u) -> V%02u\n", agg->LclNum, rep.Offset, genTypeSize(rep.AccessType), rep.LclNum);
+
rep.NeedsReadBack = false;
GenTree* readBack = Promotion::CreateReadBack(m_compiler, agg->LclNum, rep);
*use =
@@ -1411,10 +2065,7 @@ void ReplaceVisitor::LoadStoreAroundCall(GenTreeCall* call, GenTree* user)
GenTreeLclVarCommon* retBufLcl = retBufArg->GetNode()->AsLclVarCommon();
unsigned size = m_compiler->typGetObjLayout(call->gtRetClsHnd)->GetSize();
- if (MarkForReadBack(retBufLcl->GetLclNum(), retBufLcl->GetLclOffs(), size))
- {
- JITDUMP("Retbuf has replacements that were marked for read back\n");
- }
+ MarkForReadBack(retBufLcl, size DEBUGARG("used as retbuf"));
}
}
@@ -1548,9 +2199,9 @@ void ReplaceVisitor::ReplaceLocal(GenTree** use, GenTree* user)
Replacement& rep = replacements[index];
assert(accessType == rep.AccessType);
- JITDUMP(" ..replaced with promoted lcl V%02u\n", rep.LclNum);
bool isDef = lcl->OperIsLocalStore();
+
if (isDef)
{
*use = m_compiler->gtNewStoreLclVarNode(rep.LclNum, lcl->Data());
@@ -1573,6 +2224,7 @@ void ReplaceVisitor::ReplaceLocal(GenTree** use, GenTree* user)
}
else if (rep.NeedsReadBack)
{
+ JITDUMP(" ..needs a read back\n");
*use = m_compiler->gtNewOperNode(GT_COMMA, (*use)->TypeGet(),
Promotion::CreateReadBack(m_compiler, lclNum, rep), *use);
rep.NeedsReadBack = false;
@@ -1603,6 +2255,8 @@ void ReplaceVisitor::ReplaceLocal(GenTree** use, GenTree* user)
m_compiler->lvaGetDesc(rep.LclNum)->lvRedefinedInEmbeddedStatement = true;
}
+ JITDUMP(" ..replaced with V%02u\n", rep.LclNum);
+
m_madeChanges = true;
}
@@ -1714,18 +2368,19 @@ void ReplaceVisitor::WriteBackBefore(GenTree** use, unsigned lcl, unsigned offs,
// back before their next use.
//
// Parameters:
-// lcl - The struct local
-// offs - The starting offset of the range in the struct local that needs to be read back from.
-// size - The size of the range
+// lcl - Local node. Its offset is the start of the range.
+// size - The size of the range
+// reason - The reason the readback is required
//
-bool ReplaceVisitor::MarkForReadBack(unsigned lcl, unsigned offs, unsigned size)
+void ReplaceVisitor::MarkForReadBack(GenTreeLclVarCommon* lcl, unsigned size DEBUGARG(const char* reason))
{
- if (m_aggregates[lcl] == nullptr)
+ if (m_aggregates[lcl->GetLclNum()] == nullptr)
{
- return false;
+ return;
}
- jitstd::vector& replacements = m_aggregates[lcl]->Replacements;
+ unsigned offs = lcl->GetLclOffs();
+ jitstd::vector& replacements = m_aggregates[lcl->GetLclNum()]->Replacements;
size_t index = Promotion::BinarySearch(replacements, offs);
if ((ssize_t)index < 0)
@@ -1737,20 +2392,37 @@ bool ReplaceVisitor::MarkForReadBack(unsigned lcl, unsigned offs, unsigned size)
}
}
- bool any = false;
unsigned end = offs + size;
- while ((index < replacements.size()) && (replacements[index].Offset < end))
+ if ((index >= replacements.size()) || (replacements[index].Offset >= end))
+ {
+ // No overlap with any field.
+ return;
+ }
+
+ StructDeaths deaths = m_liveness->GetDeathsForStructLocal(lcl);
+ JITDUMP("Fields of [%06u] in range [%03u..%03u) need to be read back: %s\n", Compiler::dspTreeID(lcl), offs,
+ offs + size, reason);
+
+ do
{
- any = true;
Replacement& rep = replacements[index];
assert(rep.Overlaps(offs, size));
- rep.NeedsReadBack = true;
- rep.NeedsWriteBack = false;
- m_hasPendingReadBacks = true;
- index++;
- }
- return any;
+ if (deaths.IsReplacementDying((unsigned)index))
+ {
+ JITDUMP(" V%02u (%s) not marked (is dying)\n", rep.LclNum, rep.Description);
+ }
+ else
+ {
+ rep.NeedsReadBack = true;
+ m_hasPendingReadBacks = true;
+ JITDUMP(" V%02u (%s) marked\n", rep.LclNum, rep.Description);
+ }
+
+ rep.NeedsWriteBack = false;
+
+ index++;
+ } while ((index < replacements.size()) && (replacements[index].Offset < end));
}
//------------------------------------------------------------------------
@@ -1786,94 +2458,51 @@ PhaseStatus Promotion::Run()
}
}
- unsigned numLocals = m_compiler->lvaCount;
-
-#ifdef DEBUG
- if (m_compiler->verbose)
- {
- for (unsigned lcl = 0; lcl < m_compiler->lvaCount; lcl++)
- {
- LocalUses* uses = localsUse.GetUsesByLocal(lcl);
- if (uses != nullptr)
- {
- uses->Dump(lcl);
- }
- }
- }
-#endif
-
// Pick promotions based on the use information we just collected.
- bool anyReplacements = false;
jitstd::vector aggregates(m_compiler->lvaCount, nullptr, m_compiler->getAllocator(CMK_Promotion));
- for (unsigned i = 0; i < numLocals; i++)
+ if (!localsUse.PickPromotions(aggregates))
{
- LocalUses* uses = localsUse.GetUsesByLocal(i);
- if (uses == nullptr)
- {
- continue;
- }
-
- uses->PickPromotions(m_compiler, i, &aggregates[i]);
+ // No promotions picked.
+ return PhaseStatus::MODIFIED_NOTHING;
+ }
- if (aggregates[i] == nullptr)
+ // Check for parameters and OSR locals that need to be read back on entry
+ // to the function.
+ for (AggregateInfo* agg : aggregates)
+ {
+ if (agg == nullptr)
{
continue;
}
- jitstd::vector& reps = aggregates[i]->Replacements;
-
- assert(reps.size() > 0);
- anyReplacements = true;
-#ifdef DEBUG
- JITDUMP("V%02u promoted with %d replacements\n", i, (int)reps.size());
- for (const Replacement& rep : reps)
- {
- JITDUMP(" [%03u..%03u) promoted as %s V%02u\n", rep.Offset, rep.Offset + genTypeSize(rep.AccessType),
- varTypeName(rep.AccessType), rep.LclNum);
- }
-#endif
-
- JITDUMP("Computing unpromoted remainder for V%02u\n", i);
- StructSegments unpromotedParts = SignificantSegments(m_compiler, m_compiler->lvaGetDesc(i)->GetLayout());
- for (size_t i = 0; i < reps.size(); i++)
- {
- unpromotedParts.Subtract(
- StructSegments::Segment(reps[i].Offset, reps[i].Offset + genTypeSize(reps[i].AccessType)));
- }
-
- JITDUMP(" Remainder: ");
- DBEXEC(m_compiler->verbose, unpromotedParts.Dump());
- JITDUMP("\n\n");
-
- StructSegments::Segment unpromotedSegment;
- if (unpromotedParts.CoveringSegment(&unpromotedSegment))
- {
- aggregates[i]->UnpromotedMin = unpromotedSegment.Start;
- aggregates[i]->UnpromotedMax = unpromotedSegment.End;
- assert(unpromotedSegment.Start < unpromotedSegment.End);
- }
- else
+ LclVarDsc* dsc = m_compiler->lvaGetDesc(agg->LclNum);
+ if (dsc->lvIsParam || dsc->lvIsOSRLocal)
{
- // Aggregate is fully promoted, leave UnpromotedMin == UnpromotedMax to indicate this.
+ // We will need an initial readback. We create the scratch BB ahead
+ // of time so that we get correct liveness and mark the
+ // parameters/OSR-locals as requiring read-back as part of
+ // ReplaceVisitor::StartBlock when we get to the scratch block.
+ m_compiler->fgEnsureFirstBBisScratch();
+ break;
}
}
- if (!anyReplacements)
- {
- return PhaseStatus::MODIFIED_NOTHING;
- }
-
// Compute liveness for the fields and remainders.
PromotionLiveness liveness(m_compiler, aggregates);
liveness.Run();
JITDUMP("Making replacements\n\n");
+
// Make all replacements we decided on.
ReplaceVisitor replacer(this, aggregates, &liveness);
for (BasicBlock* bb : m_compiler->Blocks())
{
replacer.StartBlock(bb);
+ JITDUMP("\nReplacing in ");
+ DBEXEC(m_compiler->verbose, bb->dspBlockHeader(m_compiler));
+ JITDUMP("\n");
+
for (Statement* stmt : bb->Statements())
{
DISPSTMT(stmt);
@@ -1901,29 +2530,24 @@ PhaseStatus Promotion::Run()
replacer.EndBlock();
}
- // Insert initial IR to read arguments/OSR locals into replacement locals,
- // and add necessary explicit zeroing.
+ // Add necessary explicit zeroing for some locals.
Statement* prevStmt = nullptr;
- for (unsigned lclNum = 0; lclNum < numLocals; lclNum++)
+ for (AggregateInfo* agg : aggregates)
{
- if (aggregates[lclNum] == nullptr)
+ if (agg == nullptr)
{
continue;
}
- LclVarDsc* dsc = m_compiler->lvaGetDesc(lclNum);
- if (dsc->lvIsParam || dsc->lvIsOSRLocal)
- {
- InsertInitialReadBack(lclNum, aggregates[lclNum]->Replacements, &prevStmt);
- }
- else if (dsc->lvSuppressedZeroInit)
+ LclVarDsc* dsc = m_compiler->lvaGetDesc(agg->LclNum);
+ if (dsc->lvSuppressedZeroInit)
{
// We may have suppressed inserting an explicit zero init based on the
// assumption that the entire local will be zero inited in the prolog.
// Now that we are promoting some fields that assumption may be
// invalidated for those fields, and we may need to insert explicit
// zero inits again.
- ExplicitlyZeroInitReplacementLocals(lclNum, aggregates[lclNum]->Replacements, &prevStmt);
+ ExplicitlyZeroInitReplacementLocals(agg->LclNum, agg->Replacements, &prevStmt);
}
}
@@ -1962,27 +2586,6 @@ bool Promotion::IsCandidateForPhysicalPromotion(LclVarDsc* dsc)
return (dsc->TypeGet() == TYP_STRUCT) && !dsc->lvPromoted && !dsc->IsAddressExposed();
}
-//------------------------------------------------------------------------
-// Promotion::InsertInitialReadBack:
-// Insert IR to initially read a struct local's value into its promoted field locals.
-//
-// Parameters:
-// lclNum - The struct local
-// replacements - Replacements for the struct local
-// prevStmt - [in, out] Previous statement to insert after
-//
-void Promotion::InsertInitialReadBack(unsigned lclNum,
- const jitstd::vector& replacements,
- Statement** prevStmt)
-{
- for (unsigned i = 0; i < replacements.size(); i++)
- {
- const Replacement& rep = replacements[i];
- GenTree* readBack = CreateReadBack(m_compiler, lclNum, rep);
- InsertInitStatement(prevStmt, readBack);
- }
-}
-
//------------------------------------------------------------------------
// Promotion::ExplicitlyZeroInitReplacementLocals:
// Insert IR to zero out replacement locals if necessary.
diff --git a/src/coreclr/jit/promotion.h b/src/coreclr/jit/promotion.h
index 2eb279c17a308..416d761fe1bb8 100644
--- a/src/coreclr/jit/promotion.h
+++ b/src/coreclr/jit/promotion.h
@@ -12,7 +12,7 @@ struct Replacement
{
unsigned Offset;
var_types AccessType;
- unsigned LclNum;
+ unsigned LclNum = BAD_VAR_NUM;
// Is the replacement local (given by LclNum) fresher than the value in the struct local?
bool NeedsWriteBack = true;
// Is the value in the struct local fresher than the replacement local?
@@ -21,16 +21,10 @@ struct Replacement
// back before transferring control if necessary.
bool NeedsReadBack = false;
#ifdef DEBUG
- const char* Description;
+ const char* Description = "";
#endif
- Replacement(unsigned offset, var_types accessType, unsigned lclNum DEBUGARG(const char* description))
- : Offset(offset)
- , AccessType(accessType)
- , LclNum(lclNum)
-#ifdef DEBUG
- , Description(description)
-#endif
+ Replacement(unsigned offset, var_types accessType) : Offset(offset), AccessType(accessType)
{
}
@@ -118,7 +112,6 @@ class Promotion
static StructSegments SignificantSegments(Compiler* compiler,
ClassLayout* layout DEBUGARG(FixedBitVect** bitVectRepr = nullptr));
- void InsertInitialReadBack(unsigned lclNum, const jitstd::vector& replacements, Statement** prevStmt);
void ExplicitlyZeroInitReplacementLocals(unsigned lclNum,
const jitstd::vector& replacements,
Statement** prevStmt);
@@ -226,6 +219,7 @@ class PromotionLiveness
}
void Run();
+ bool IsReplacementLiveIn(BasicBlock* bb, unsigned structLcl, unsigned replacement);
bool IsReplacementLiveOut(BasicBlock* bb, unsigned structLcl, unsigned replacement);
StructDeaths GetDeathsForStructLocal(GenTreeLclVarCommon* use);
@@ -280,11 +274,7 @@ class ReplaceVisitor : public GenTreeVisitor
return m_mayHaveForwardSub;
}
- void StartBlock(BasicBlock* block)
- {
- m_currentBlock = block;
- }
-
+ void StartBlock(BasicBlock* block);
void EndBlock();
void StartStatement(Statement* stmt)
@@ -305,7 +295,7 @@ class ReplaceVisitor : public GenTreeVisitor
void CheckForwardSubForLastUse(unsigned lclNum);
void StoreBeforeReturn(GenTreeUnOp* ret);
void WriteBackBefore(GenTree** use, unsigned lcl, unsigned offs, unsigned size);
- bool MarkForReadBack(unsigned lcl, unsigned offs, unsigned size);
+ void MarkForReadBack(GenTreeLclVarCommon* lcl, unsigned size DEBUGARG(const char* reason));
void HandleStore(GenTree** use, GenTree* user);
bool OverlappingReplacements(GenTreeLclVarCommon* lcl,
diff --git a/src/coreclr/jit/promotiondecomposition.cpp b/src/coreclr/jit/promotiondecomposition.cpp
index 9be8ed9c8b095..3bdc88c5a4b3f 100644
--- a/src/coreclr/jit/promotiondecomposition.cpp
+++ b/src/coreclr/jit/promotiondecomposition.cpp
@@ -1202,10 +1202,7 @@ void ReplaceVisitor::HandleStore(GenTree** use, GenTree* user)
{
GenTreeLclVarCommon* lclStore = store->AsLclVarCommon();
unsigned size = lclStore->GetLayout(m_compiler)->GetSize();
- if (MarkForReadBack(lclStore->GetLclNum(), lclStore->GetLclOffs(), size))
- {
- JITDUMP("Marked store destination replacements to be read back (could not decompose this store)\n");
- }
+ MarkForReadBack(lclStore, size DEBUGARG("cannot decompose store"));
}
}
}
diff --git a/src/coreclr/jit/promotionliveness.cpp b/src/coreclr/jit/promotionliveness.cpp
index 71ac5a96b3b8d..43b3722614489 100644
--- a/src/coreclr/jit/promotionliveness.cpp
+++ b/src/coreclr/jit/promotionliveness.cpp
@@ -190,7 +190,7 @@ void PromotionLiveness::ComputeUseDefSets()
// useSet - The use set to mark in.
// defSet - The def set to mark in.
//
-void PromotionLiveness::MarkUseDef(GenTreeLclVarCommon* lcl, BitSetShortLongRep& useSet, BitSetShortLongRep& defSet)
+void PromotionLiveness::MarkUseDef(GenTreeLclVarCommon* lcl, BitVec& useSet, BitVec& defSet)
{
AggregateInfo* agg = m_aggregates[lcl->GetLclNum()];
if (agg == nullptr)
@@ -688,8 +688,19 @@ void PromotionLiveness::FillInLiveness(BitVec& life, BitVec volatileVars, GenTre
if (lcl->OperIs(GT_LCL_ADDR))
{
// Retbuf -- these are definitions but we do not know of how much.
- // We never mark them as dead and we never treat them as killing anything.
- assert(isDef);
+ // We never treat them as killing anything, but we do store liveness information for them.
+ BitVecTraits aggTraits(1 + (unsigned)agg->Replacements.size(), m_compiler);
+ BitVec aggDeaths(BitVecOps::MakeEmpty(&aggTraits));
+ // Copy preexisting liveness information.
+ for (size_t i = 0; i <= agg->Replacements.size(); i++)
+ {
+ unsigned varIndex = baseIndex + (unsigned)i;
+ if (!BitVecOps::IsMember(m_bvTraits, life, varIndex))
+ {
+ BitVecOps::AddElemD(&aggTraits, aggDeaths, (unsigned)i);
+ }
+ }
+ m_aggDeaths.Set(lcl, aggDeaths);
return;
}
@@ -747,6 +758,24 @@ void PromotionLiveness::FillInLiveness(BitVec& life, BitVec volatileVars, GenTre
}
}
+//------------------------------------------------------------------------
+// IsReplacementLiveIn:
+// Check if a replacement field is live at the start of a basic block.
+//
+// Parameters:
+// structLcl - The struct (base) local
+// replacementIndex - Index of the replacement
+//
+// Returns:
+// True if the field is in the live-in set.
+//
+bool PromotionLiveness::IsReplacementLiveIn(BasicBlock* bb, unsigned structLcl, unsigned replacementIndex)
+{
+ BitVec liveIn = m_bbInfo[bb->bbNum].LiveIn;
+ unsigned baseIndex = m_structLclToTrackedIndex[structLcl];
+ return BitVecOps::IsMember(m_bvTraits, liveIn, baseIndex + 1 + replacementIndex);
+}
+
//------------------------------------------------------------------------
// IsReplacementLiveOut:
// Check if a replacement field is live at the end of a basic block.
@@ -778,7 +807,8 @@ bool PromotionLiveness::IsReplacementLiveOut(BasicBlock* bb, unsigned structLcl,
//
StructDeaths PromotionLiveness::GetDeathsForStructLocal(GenTreeLclVarCommon* lcl)
{
- assert(lcl->OperIsLocal() && lcl->TypeIs(TYP_STRUCT) && (m_aggregates[lcl->GetLclNum()] != nullptr));
+ assert((lcl->TypeIs(TYP_STRUCT) || (lcl->OperIs(GT_LCL_ADDR) && ((lcl->gtFlags & GTF_VAR_DEF) != 0))) &&
+ (m_aggregates[lcl->GetLclNum()] != nullptr));
BitVec aggDeaths;
bool found = m_aggDeaths.Lookup(lcl, &aggDeaths);
assert(found);
diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets
index c2cdf1613aaf8..edaae4c781177 100644
--- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets
+++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.Unix.targets
@@ -112,7 +112,7 @@ The .NET Foundation licenses this file to you under the MIT license.
-
+
diff --git a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets
index 4948020c84965..bb2afb85cf22b 100644
--- a/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets
+++ b/src/coreclr/nativeaot/BuildIntegration/Microsoft.NETCore.Native.targets
@@ -242,6 +242,7 @@ The .NET Foundation licenses this file to you under the MIT license.
+
diff --git a/src/coreclr/nativeaot/Runtime/amd64/MiscStubs.S b/src/coreclr/nativeaot/Runtime/amd64/MiscStubs.S
index 7faf58c75c45e..c2247d8be5e24 100644
--- a/src/coreclr/nativeaot/Runtime/amd64/MiscStubs.S
+++ b/src/coreclr/nativeaot/Runtime/amd64/MiscStubs.S
@@ -45,6 +45,7 @@ LOCAL_LABEL(ProbeLoop):
ret
NESTED_END RhpStackProbe, _TEXT
+#ifndef TARGET_ANDROID
NESTED_ENTRY RhpGetInlinedThreadStaticBase, _TEXT, NoHandler
// On exit:
// rax - the thread static base for the given type
@@ -61,4 +62,4 @@ NESTED_ENTRY RhpGetInlinedThreadStaticBase, _TEXT, NoHandler
// return it
ret
NESTED_END RhpGetInlinedThreadStaticBase, _TEXT
-
+#endif
diff --git a/src/coreclr/nativeaot/Runtime/arm64/MiscStubs.S b/src/coreclr/nativeaot/Runtime/arm64/MiscStubs.S
index 6c70614f38136..34af83ccf0b98 100644
--- a/src/coreclr/nativeaot/Runtime/arm64/MiscStubs.S
+++ b/src/coreclr/nativeaot/Runtime/arm64/MiscStubs.S
@@ -4,6 +4,7 @@
#include
#include "AsmOffsets.inc"
+#ifndef TARGET_ANDROID
NESTED_ENTRY RhpGetInlinedThreadStaticBase, _TEXT, NoHandler
// On exit:
// x0 - the thread static base for the given type
@@ -22,3 +23,4 @@ HaveValue:
ret
NESTED_END RhpGetInlinedThreadStaticBase, _TEXT
+#endif
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj b/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj
index 1e3c312188395..39ee2daf5bf8f 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System.Private.CoreLib.csproj
@@ -253,9 +253,6 @@
System\Runtime\InteropServices\Variant.cs
-
- Interop\Windows\Kernel32\Interop.GetCurrentThreadId.cs
-
Interop\Windows\Kernel32\Interop.IsDebuggerPresent.cs
diff --git a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Diagnostics/Debugger.cs b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Diagnostics/Debugger.cs
index 0f6614fd3ca7e..7f0fa52f98825 100644
--- a/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Diagnostics/Debugger.cs
+++ b/src/coreclr/nativeaot/System.Private.CoreLib/src/System/Diagnostics/Debugger.cs
@@ -24,7 +24,8 @@ public static bool IsAttached
{
get
{
- return _isDebuggerAttached;
+ // Managed debugger is never attached because we don't have one
+ return false;
}
}
@@ -38,11 +39,6 @@ public static void NotifyOfCrossThreadDependency()
// nothing to do...yet
}
-#pragma warning disable 649 // Suppress compiler warning about _isDebuggerAttached never being assigned to.
- // _isDebuggerAttached: Do not remove: This field is known to the debugger and modified directly by the debugger.
- private static bool _isDebuggerAttached;
-#pragma warning restore 649
-
///
/// Constants representing the importance level of messages to be logged.
///
diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
index f831e74c904a8..5c3eb76470fa3 100644
--- a/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
+++ b/src/coreclr/tools/Common/JitInterface/CorInfoImpl.cs
@@ -1236,22 +1236,6 @@ private void getEHinfo(CORINFO_METHOD_STRUCT_* ftn, uint EHnumber, ref CORINFO_E
return ObjectToHandle(m.OwningType);
}
- private CORINFO_MODULE_STRUCT_* getMethodModule(CORINFO_METHOD_STRUCT_* method)
- {
- MethodDesc m = HandleToObject(method);
- if (m is UnboxingMethodDesc unboxingMethodDesc)
- {
- m = unboxingMethodDesc.Target;
- }
-
- MethodIL methodIL = _compilation.GetMethodIL(m);
- if (methodIL == null)
- {
- return null;
- }
- return ObjectToHandle(methodIL);
- }
-
private bool resolveVirtualMethod(CORINFO_DEVIRTUALIZATION_INFO* info)
{
// Initialize OUT fields
@@ -1565,8 +1549,6 @@ private static CorInfoCallConvExtension ToCorInfoCallConvExtension(UnmanagedCall
private bool satisfiesMethodConstraints(CORINFO_CLASS_STRUCT_* parent, CORINFO_METHOD_STRUCT_* method)
{ throw new NotImplementedException("satisfiesMethodConstraints"); }
- private bool isCompatibleDelegate(CORINFO_CLASS_STRUCT_* objCls, CORINFO_CLASS_STRUCT_* methodParentCls, CORINFO_METHOD_STRUCT_* method, CORINFO_CLASS_STRUCT_* delegateCls, ref bool pfIsOpenDelegate)
- { throw new NotImplementedException("isCompatibleDelegate"); }
private void setPatchpointInfo(PatchpointInfo* patchpointInfo)
{ throw new NotImplementedException("setPatchpointInfo"); }
private PatchpointInfo* getOSRInfo(ref uint ilOffset)
@@ -1813,12 +1795,6 @@ private void resolveToken(ref CORINFO_RESOLVED_TOKEN pResolvedToken)
pResolvedToken.cbMethodSpec = 0;
}
- private bool tryResolveToken(ref CORINFO_RESOLVED_TOKEN pResolvedToken)
- {
- resolveToken(ref pResolvedToken);
- return true;
- }
-
private void findSig(CORINFO_MODULE_STRUCT_* module, uint sigTOK, CORINFO_CONTEXT_STRUCT* context, CORINFO_SIG_INFO* sig)
{
var methodIL = HandleToObject(module);
@@ -1865,11 +1841,6 @@ private static CorInfoCanSkipVerificationResult canSkipVerification(CORINFO_MODU
return CorInfoCanSkipVerificationResult.CORINFO_VERIFICATION_CAN_SKIP;
}
- private bool isValidToken(CORINFO_MODULE_STRUCT_* module, uint metaTOK)
- { throw new NotImplementedException("isValidToken"); }
- private bool isValidStringRef(CORINFO_MODULE_STRUCT_* module, uint metaTOK)
- { throw new NotImplementedException("isValidStringRef"); }
-
private int getStringLiteral(CORINFO_MODULE_STRUCT_* module, uint metaTOK, char* buffer, int size, int startIndex)
{
Debug.Assert(size >= 0);
@@ -2675,58 +2646,6 @@ private TypeCompareState compareTypesForEquality(CORINFO_CLASS_STRUCT_* cls1, CO
};
}
- private CORINFO_CLASS_STRUCT_* mergeClasses(CORINFO_CLASS_STRUCT_* cls1, CORINFO_CLASS_STRUCT_* cls2)
- {
- TypeDesc type1 = HandleToObject(cls1);
- TypeDesc type2 = HandleToObject(cls2);
-
- TypeDesc merged = TypeExtensions.MergeTypesToCommonParent(type1, type2);
-
-#if DEBUG
- // Make sure the merge is reflexive in the cases we "support".
- TypeDesc reflexive = TypeExtensions.MergeTypesToCommonParent(type2, type1);
-
- // If both sides are classes than either they have a common non-interface parent (in which case it is
- // reflexive)
- // OR they share a common interface, and it can be order dependent (if they share multiple interfaces
- // in common)
- if (!type1.IsInterface && !type2.IsInterface)
- {
- if (merged.IsInterface)
- {
- Debug.Assert(reflexive.IsInterface);
- }
- else
- {
- Debug.Assert(merged == reflexive);
- }
- }
- // Both results must either be interfaces or classes. They cannot be mixed.
- Debug.Assert(merged.IsInterface == reflexive.IsInterface);
-
- // If the result of the merge was a class, then the result of the reflexive merge was the same class.
- if (!merged.IsInterface)
- {
- Debug.Assert(merged == reflexive);
- }
-
- // If both sides are arrays, then the result is either an array or g_pArrayClass. The above is
- // actually true for reference types as well, but it is a little excessive to deal with.
- if (type1.IsArray && type2.IsArray)
- {
- TypeDesc arrayClass = _compilation.TypeSystemContext.GetWellKnownType(WellKnownType.Array);
- Debug.Assert((merged.IsArray && reflexive.IsArray)
- || ((merged == arrayClass) && (reflexive == arrayClass)));
- }
-
- // The results must always be assignable
- Debug.Assert(type1.CanCastTo(merged) && type2.CanCastTo(merged) && type1.CanCastTo(reflexive)
- && type2.CanCastTo(reflexive));
-#endif
-
- return ObjectToHandle(merged);
- }
-
private bool isMoreSpecificType(CORINFO_CLASS_STRUCT_* cls1, CORINFO_CLASS_STRUCT_* cls2)
{
TypeDesc type1 = HandleToObject(cls1);
@@ -2802,9 +2721,6 @@ private CorInfoType getChildType(CORINFO_CLASS_STRUCT_* clsHnd, CORINFO_CLASS_ST
return result;
}
- private bool satisfiesClassConstraints(CORINFO_CLASS_STRUCT_* cls)
- { throw new NotImplementedException("satisfiesClassConstraints"); }
-
private bool isSDArray(CORINFO_CLASS_STRUCT_* cls)
{
var td = HandleToObject(cls);
@@ -3228,9 +3144,6 @@ private uint getMethodHash(CORINFO_METHOD_STRUCT_* ftn)
return (uint)HandleToObject(ftn).GetHashCode();
}
- private UIntPtr findNameOfToken(CORINFO_MODULE_STRUCT_* moduleHandle, mdToken token, byte* szFQName, UIntPtr FQNameCapacity)
- { throw new NotImplementedException("findNameOfToken"); }
-
private bool getSystemVAmd64PassStructInRegisterDescriptor(CORINFO_CLASS_STRUCT_* structHnd, SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr)
{
TypeDesc typeDesc = HandleToObject(structHnd);
@@ -3253,8 +3166,6 @@ private uint getRISCV64PassStructInRegisterFlags(CORINFO_CLASS_STRUCT_* cls)
private uint getThreadTLSIndex(ref void* ppIndirection)
{ throw new NotImplementedException("getThreadTLSIndex"); }
- private void* getInlinedCallFrameVptr(ref void* ppIndirection)
- { throw new NotImplementedException("getInlinedCallFrameVptr"); }
private Dictionary _helperCache = new Dictionary();
private void* getHelperFtn(CorInfoHelpFunc ftnNum, ref void* ppIndirection)
@@ -3369,10 +3280,6 @@ private CORINFO_CONST_LOOKUP CreateConstLookupToSymbol(ISymbolNode symbol)
return constLookup;
}
- private bool canAccessFamily(CORINFO_METHOD_STRUCT_* hCaller, CORINFO_CLASS_STRUCT_* hInstanceType)
- { throw new NotImplementedException("canAccessFamily"); }
- private bool isRIDClassDomainID(CORINFO_CLASS_STRUCT_* cls)
- { throw new NotImplementedException("isRIDClassDomainID"); }
private uint getClassDomainID(CORINFO_CLASS_STRUCT_* cls, ref void* ppIndirection)
{ throw new NotImplementedException("getClassDomainID"); }
@@ -3738,11 +3645,8 @@ private static RelocType GetRelocType(TargetArchitecture targetArchitecture, ush
}
}
- private void recordRelocation(void* location, void* locationRW, void* target, ushort fRelocType, ushort slotNum, int addlDelta)
+ private void recordRelocation(void* location, void* locationRW, void* target, ushort fRelocType, int addlDelta)
{
- // slotNum is not used
- Debug.Assert(slotNum == 0);
-
int relocOffset;
BlockType locationBlock = findKnownBlock(location, out relocOffset);
Debug.Assert(locationBlock != BlockType.Unknown, "BlockType.Unknown not expected");
diff --git a/src/coreclr/tools/Common/JitInterface/CorInfoImpl_generated.cs b/src/coreclr/tools/Common/JitInterface/CorInfoImpl_generated.cs
index 772a97e0cfb82..b31a87daec7c7 100644
--- a/src/coreclr/tools/Common/JitInterface/CorInfoImpl_generated.cs
+++ b/src/coreclr/tools/Common/JitInterface/CorInfoImpl_generated.cs
@@ -186,21 +186,6 @@ private static void _getEHinfo(IntPtr thisHandle, IntPtr* ppException, CORINFO_M
}
}
- [UnmanagedCallersOnly]
- private static CORINFO_MODULE_STRUCT_* _getMethodModule(IntPtr thisHandle, IntPtr* ppException, CORINFO_METHOD_STRUCT_* method)
- {
- var _this = GetThis(thisHandle);
- try
- {
- return _this.getMethodModule(method);
- }
- catch (Exception ex)
- {
- *ppException = _this.AllocException(ex);
- return default;
- }
- }
-
[UnmanagedCallersOnly]
private static void _getMethodVTableOffset(IntPtr thisHandle, IntPtr* ppException, CORINFO_METHOD_STRUCT_* method, uint* offsetOfIndirection, uint* offsetAfterIndirection, bool* isRelative)
{
@@ -349,21 +334,6 @@ private static byte _satisfiesMethodConstraints(IntPtr thisHandle, IntPtr* ppExc
}
}
- [UnmanagedCallersOnly]
- private static byte _isCompatibleDelegate(IntPtr thisHandle, IntPtr* ppException, CORINFO_CLASS_STRUCT_* objCls, CORINFO_CLASS_STRUCT_* methodParentCls, CORINFO_METHOD_STRUCT_* method, CORINFO_CLASS_STRUCT_* delegateCls, bool* pfIsOpenDelegate)
- {
- var _this = GetThis(thisHandle);
- try
- {
- return _this.isCompatibleDelegate(objCls, methodParentCls, method, delegateCls, ref *pfIsOpenDelegate) ? (byte)1 : (byte)0;
- }
- catch (Exception ex)
- {
- *ppException = _this.AllocException(ex);
- return default;
- }
- }
-
[UnmanagedCallersOnly]
private static void _methodMustBeLoadedBeforeCodeIsRun(IntPtr thisHandle, IntPtr* ppException, CORINFO_METHOD_STRUCT_* method)
{
@@ -450,21 +420,6 @@ private static void _resolveToken(IntPtr thisHandle, IntPtr* ppException, CORINF
}
}
- [UnmanagedCallersOnly]
- private static byte _tryResolveToken(IntPtr thisHandle, IntPtr* ppException, CORINFO_RESOLVED_TOKEN* pResolvedToken)
- {
- var _this = GetThis(thisHandle);
- try
- {
- return _this.tryResolveToken(ref *pResolvedToken) ? (byte)1 : (byte)0;
- }
- catch (Exception ex)
- {
- *ppException = _this.AllocException(ex);
- return default;
- }
- }
-
[UnmanagedCallersOnly]
private static void _findSig(IntPtr thisHandle, IntPtr* ppException, CORINFO_MODULE_STRUCT_* module, uint sigTOK, CORINFO_CONTEXT_STRUCT* context, CORINFO_SIG_INFO* sig)
{
@@ -508,36 +463,6 @@ private static void _findCallSiteSig(IntPtr thisHandle, IntPtr* ppException, COR
}
}
- [UnmanagedCallersOnly]
- private static byte _isValidToken(IntPtr thisHandle, IntPtr* ppException, CORINFO_MODULE_STRUCT_* module, uint metaTOK)
- {
- var _this = GetThis(thisHandle);
- try
- {
- return _this.isValidToken(module, metaTOK) ? (byte)1 : (byte)0;
- }
- catch (Exception ex)
- {
- *ppException = _this.AllocException(ex);
- return default;
- }
- }
-
- [UnmanagedCallersOnly]
- private static byte _isValidStringRef(IntPtr thisHandle, IntPtr* ppException, CORINFO_MODULE_STRUCT_* module, uint metaTOK)
- {
- var _this = GetThis(thisHandle);
- try
- {
- return _this.isValidStringRef(module, metaTOK) ? (byte)1 : (byte)0;
- }
- catch (Exception ex)
- {
- *ppException = _this.AllocException(ex);
- return default;
- }
- }
-
[UnmanagedCallersOnly]
private static int _getStringLiteral(IntPtr thisHandle, IntPtr* ppException, CORINFO_MODULE_STRUCT_* module, uint metaTOK, char* buffer, int bufferSize, int startIndex)
{
@@ -1225,21 +1150,6 @@ private static TypeCompareState _compareTypesForEquality(IntPtr thisHandle, IntP
}
}
- [UnmanagedCallersOnly]
- private static CORINFO_CLASS_STRUCT_* _mergeClasses(IntPtr thisHandle, IntPtr* ppException, CORINFO_CLASS_STRUCT_* cls1, CORINFO_CLASS_STRUCT_* cls2)
- {
- var _this = GetThis(thisHandle);
- try
- {
- return _this.mergeClasses(cls1, cls2);
- }
- catch (Exception ex)
- {
- *ppException = _this.AllocException(ex);
- return default;
- }
- }
-
[UnmanagedCallersOnly]
private static byte _isMoreSpecificType(IntPtr thisHandle, IntPtr* ppException, CORINFO_CLASS_STRUCT_* cls1, CORINFO_CLASS_STRUCT_* cls2)
{
@@ -1300,21 +1210,6 @@ private static CorInfoType _getChildType(IntPtr thisHandle, IntPtr* ppException,
}
}
- [UnmanagedCallersOnly]
- private static byte _satisfiesClassConstraints(IntPtr thisHandle, IntPtr* ppException, CORINFO_CLASS_STRUCT_* cls)
- {
- var _this = GetThis(thisHandle);
- try
- {
- return _this.satisfiesClassConstraints(cls) ? (byte)1 : (byte)0;
- }
- catch (Exception ex)
- {
- *ppException = _this.AllocException(ex);
- return default;
- }
- }
-
[UnmanagedCallersOnly]
private static byte _isSDArray(IntPtr thisHandle, IntPtr* ppException, CORINFO_CLASS_STRUCT_* cls)
{
@@ -1816,21 +1711,6 @@ private static uint _getMethodHash(IntPtr thisHandle, IntPtr* ppException, CORIN
}
}
- [UnmanagedCallersOnly]
- private static UIntPtr _findNameOfToken(IntPtr thisHandle, IntPtr* ppException, CORINFO_MODULE_STRUCT_* moduleHandle, mdToken token, byte* szFQName, UIntPtr FQNameCapacity)
- {
- var _this = GetThis(thisHandle);
- try
- {
- return _this.findNameOfToken(moduleHandle, token, szFQName, FQNameCapacity);
- }
- catch (Exception ex)
- {
- *ppException = _this.AllocException(ex);
- return default;
- }
- }
-
[UnmanagedCallersOnly]
private static byte _getSystemVAmd64PassStructInRegisterDescriptor(IntPtr thisHandle, IntPtr* ppException, CORINFO_CLASS_STRUCT_* structHnd, SYSTEMV_AMD64_CORINFO_STRUCT_REG_PASSING_DESCRIPTOR* structPassInRegDescPtr)
{
@@ -1891,21 +1771,6 @@ private static uint _getThreadTLSIndex(IntPtr thisHandle, IntPtr* ppException, v
}
}
- [UnmanagedCallersOnly]
- private static void* _getInlinedCallFrameVptr(IntPtr thisHandle, IntPtr* ppException, void** ppIndirection)
- {
- var _this = GetThis(thisHandle);
- try
- {
- return _this.getInlinedCallFrameVptr(ref *ppIndirection);
- }
- catch (Exception ex)
- {
- *ppException = _this.AllocException(ex);
- return default;
- }
- }
-
[UnmanagedCallersOnly]
private static int* _getAddrOfCaptureThreadGlobal(IntPtr thisHandle, IntPtr* ppException, void** ppIndirection)
{
@@ -2169,36 +2034,6 @@ private static void _getCallInfo(IntPtr thisHandle, IntPtr* ppException, CORINFO
}
}
- [UnmanagedCallersOnly]
- private static byte _canAccessFamily(IntPtr thisHandle, IntPtr* ppException, CORINFO_METHOD_STRUCT_* hCaller, CORINFO_CLASS_STRUCT_* hInstanceType)
- {
- var _this = GetThis(thisHandle);
- try
- {
- return _this.canAccessFamily(hCaller, hInstanceType) ? (byte)1 : (byte)0;
- }
- catch (Exception ex)
- {
- *ppException = _this.AllocException(ex);
- return default;
- }
- }
-
- [UnmanagedCallersOnly]
- private static byte _isRIDClassDomainID(IntPtr thisHandle, IntPtr* ppException, CORINFO_CLASS_STRUCT_* cls)
- {
- var _this = GetThis(thisHandle);
- try
- {
- return _this.isRIDClassDomainID(cls) ? (byte)1 : (byte)0;
- }
- catch (Exception ex)
- {
- *ppException = _this.AllocException(ex);
- return default;
- }
- }
-
[UnmanagedCallersOnly]
private static uint _getClassDomainID(IntPtr thisHandle, IntPtr* ppException, CORINFO_CLASS_STRUCT_* cls, void** ppIndirection)
{
@@ -2596,12 +2431,12 @@ private static void _recordCallSite(IntPtr thisHandle, IntPtr* ppException, uint
}
[UnmanagedCallersOnly]
- private static void _recordRelocation(IntPtr thisHandle, IntPtr* ppException, void* location, void* locationRW, void* target, ushort fRelocType, ushort slotNum, int addlDelta)
+ private static void _recordRelocation(IntPtr thisHandle, IntPtr* ppException, void* location, void* locationRW, void* target, ushort fRelocType, int addlDelta)
{
var _this = GetThis(thisHandle);
try
{
- _this.recordRelocation(location, locationRW, target, fRelocType, slotNum, addlDelta);
+ _this.recordRelocation(location, locationRW, target, fRelocType, addlDelta);
}
catch (Exception ex)
{
@@ -2657,7 +2492,7 @@ private static uint _getJitFlags(IntPtr thisHandle, IntPtr* ppException, CORJIT_
private static IntPtr GetUnmanagedCallbacks()
{
- void** callbacks = (void**)Marshal.AllocCoTaskMem(sizeof(IntPtr) * 179);
+ void** callbacks = (void**)Marshal.AllocCoTaskMem(sizeof(IntPtr) * 168);
callbacks[0] = (delegate* unmanaged)&_isIntrinsic;
callbacks[1] = (delegate* unmanaged)&_getMethodAttribs;
@@ -2671,173 +2506,162 @@ private static IntPtr GetUnmanagedCallbacks()
callbacks[9] = (delegate* unmanaged)&_reportTailCallDecision;
callbacks[10] = (delegate* unmanaged)&_getEHinfo;
callbacks[11] = (delegate* unmanaged)&_getMethodClass;
- callbacks[12] = (delegate* unmanaged)&_getMethodModule;
- callbacks[13] = (delegate* unmanaged)&_getMethodVTableOffset;
- callbacks[14] = (delegate* unmanaged)&_resolveVirtualMethod;
- callbacks[15] = (delegate* unmanaged)&_getUnboxedEntry;
- callbacks[16] = (delegate* unmanaged)&_getDefaultComparerClass;
- callbacks[17] = (delegate* unmanaged)&_getDefaultEqualityComparerClass;
- callbacks[18] = (delegate* unmanaged)&_expandRawHandleIntrinsic;
- callbacks[19] = (delegate* unmanaged)&_isIntrinsicType;
- callbacks[20] = (delegate* unmanaged)&_getUnmanagedCallConv;
- callbacks[21] = (delegate* unmanaged)&_pInvokeMarshalingRequired;
- callbacks[22] = (delegate* unmanaged)&_satisfiesMethodConstraints;
- callbacks[23] = (delegate* unmanaged)&_isCompatibleDelegate;
- callbacks[24] = (delegate* unmanaged)&_methodMustBeLoadedBeforeCodeIsRun;
- callbacks[25] = (delegate* unmanaged)&_mapMethodDeclToMethodImpl;
- callbacks[26] = (delegate* unmanaged)&_getGSCookie;
- callbacks[27] = (delegate* unmanaged)&_setPatchpointInfo;
- callbacks[28] = (delegate* unmanaged)&_getOSRInfo;
- callbacks[29] = (delegate* unmanaged)&_resolveToken;
- callbacks[30] = (delegate* unmanaged)&_tryResolveToken;
- callbacks[31] = (delegate* unmanaged)&_findSig;
- callbacks[32] = (delegate* unmanaged)&_findCallSiteSig;
- callbacks[33] = (delegate* unmanaged)&_getTokenTypeAsHandle;
- callbacks[34] = (delegate* unmanaged)&_isValidToken;
- callbacks[35] = (delegate* unmanaged)&_isValidStringRef;
- callbacks[36] = (delegate* unmanaged)&_getStringLiteral;
- callbacks[37] = (delegate* unmanaged)&_printObjectDescription;
- callbacks[38] = (delegate* unmanaged)&_asCorInfoType;
- callbacks[39] = (delegate* unmanaged)&_getClassNameFromMetadata;
- callbacks[40] = (delegate* unmanaged)&_getTypeInstantiationArgument;
- callbacks[41] = (delegate* unmanaged)&_printClassName;
- callbacks[42] = (delegate* unmanaged)&_isValueClass;
- callbacks[43] = (delegate* unmanaged)&_canInlineTypeCheck;
- callbacks[44] = (delegate* unmanaged)&_getClassAttribs;
- callbacks[45] = (delegate* unmanaged)&_getClassModule;
- callbacks[46] = (delegate* unmanaged)&_getModuleAssembly;
- callbacks[47] = (delegate* unmanaged)&_getAssemblyName;
- callbacks[48] = (delegate* unmanaged)&_LongLifetimeMalloc;
- callbacks[49] = (delegate* unmanaged)&_LongLifetimeFree;
- callbacks[50] = (delegate* unmanaged)&_getClassModuleIdForStatics;
- callbacks[51] = (delegate* unmanaged)&_getIsClassInitedFlagAddress;
- callbacks[52] = (delegate* unmanaged)&_getStaticBaseAddress;
- callbacks[53] = (delegate* unmanaged)&_getClassSize;
- callbacks[54] = (delegate* unmanaged)&_getHeapClassSize;
- callbacks[55] = (delegate* unmanaged)&_canAllocateOnStack;
- callbacks[56] = (delegate* unmanaged)&_getClassAlignmentRequirement;
- callbacks[57] = (delegate* unmanaged)&_getClassGClayout;
- callbacks[58] = (delegate* unmanaged)&_getClassNumInstanceFields;
- callbacks[59] = (delegate* unmanaged)&_getFieldInClass;
- callbacks[60] = (delegate* unmanaged)&_checkMethodModifier;
- callbacks[61] = (delegate* unmanaged)&_getNewHelper;
- callbacks[62] = (delegate* unmanaged)&_getNewArrHelper;
- callbacks[63] = (delegate* unmanaged)&_getCastingHelper;
- callbacks[64] = (delegate* unmanaged)&_getSharedCCtorHelper;
- callbacks[65] = (delegate* unmanaged)&_getTypeForBox;
- callbacks[66] = (delegate* unmanaged)&_getBoxHelper;
- callbacks[67] = (delegate* unmanaged)&_getUnBoxHelper;
- callbacks[68] = (delegate* unmanaged)&_getRuntimeTypePointer;
- callbacks[69] = (delegate* unmanaged)&_isObjectImmutable;
- callbacks[70] = (delegate* unmanaged)&_getStringChar;
- callbacks[71] = (delegate* unmanaged)&_getObjectType;
- callbacks[72] = (delegate* unmanaged)&_getReadyToRunHelper;
- callbacks[73] = (delegate* unmanaged)&_getReadyToRunDelegateCtorHelper;
- callbacks[74] = (delegate* unmanaged)&_initClass;
- callbacks[75] = (delegate* unmanaged)&_classMustBeLoadedBeforeCodeIsRun;
- callbacks[76] = (delegate* unmanaged)&_getBuiltinClass;
- callbacks[77] = (delegate* unmanaged)&_getTypeForPrimitiveValueClass;
- callbacks[78] = (delegate* unmanaged)&_getTypeForPrimitiveNumericClass;
- callbacks[79] = (delegate* unmanaged)&_canCast;
- callbacks[80] = (delegate* unmanaged)&_compareTypesForCast;
- callbacks[81] = (delegate* unmanaged)&_compareTypesForEquality;
- callbacks[82] = (delegate* unmanaged)&_mergeClasses;
- callbacks[83] = (delegate* unmanaged)&_isMoreSpecificType;
- callbacks[84] = (delegate* unmanaged)&_isEnum;
- callbacks[85] = (delegate* unmanaged)&_getParentType;
- callbacks[86] = (delegate* unmanaged)&_getChildType;
- callbacks[87] = (delegate* unmanaged)&_satisfiesClassConstraints;
- callbacks[88] = (delegate* unmanaged)&_isSDArray;
- callbacks[89] = (delegate* unmanaged)&_getArrayRank;
- callbacks[90] = (delegate* unmanaged)&_getArrayIntrinsicID;
- callbacks[91] = (delegate* unmanaged)&_getArrayInitializationData;
- callbacks[92] = (delegate* unmanaged)&_canAccessClass;
- callbacks[93] = (delegate* unmanaged)&_printFieldName;
- callbacks[94] = (delegate* unmanaged)&_getFieldClass;
- callbacks[95] = (delegate* unmanaged)&_getFieldType;
- callbacks[96] = (delegate* unmanaged)&_getFieldOffset;
- callbacks[97] = (delegate* unmanaged)&_getFieldInfo;
- callbacks[98] = (delegate* unmanaged)&_getThreadLocalFieldInfo;
- callbacks[99] = (delegate* unmanaged)&_getThreadLocalStaticBlocksInfo;
- callbacks[100] = (delegate* unmanaged)&_isFieldStatic;
- callbacks[101] = (delegate* unmanaged)&_getArrayOrStringLength;
- callbacks[102] = (delegate* unmanaged)&_getBoundaries;
- callbacks[103] = (delegate* unmanaged)&_setBoundaries;
- callbacks[104] = (delegate* unmanaged)&_getVars;
- callbacks[105] = (delegate* unmanaged)&_setVars;
- callbacks[106] = (delegate* unmanaged)&_reportRichMappings;
- callbacks[107] = (delegate* unmanaged)&_allocateArray;
- callbacks[108] = (delegate* unmanaged)&_freeArray;
- callbacks[109] = (delegate* unmanaged)&_getArgNext;
- callbacks[110] = (delegate* unmanaged)&_getArgType;
- callbacks[111] = (delegate* unmanaged)&_getExactClasses;
- callbacks[112] = (delegate* unmanaged)&_getArgClass;
- callbacks[113] = (delegate* unmanaged)&_getHFAType;
- callbacks[114] = (delegate* unmanaged)&_runWithErrorTrap;
- callbacks[115] = (delegate* unmanaged)&_runWithSPMIErrorTrap;
- callbacks[116] = (delegate* unmanaged)&_getEEInfo;
- callbacks[117] = (delegate* unmanaged)&_getJitTimeLogFilename;
- callbacks[118] = (delegate* unmanaged)&_getMethodDefFromMethod;
- callbacks[119] = (delegate* unmanaged)&_printMethodName;
- callbacks[120] = (delegate* unmanaged)&_getMethodNameFromMetadata;
- callbacks[121] = (delegate* unmanaged)&_getMethodHash;
- callbacks[122] = (delegate* unmanaged)&_findNameOfToken;
- callbacks[123] = (delegate* unmanaged)&_getSystemVAmd64PassStructInRegisterDescriptor;
- callbacks[124] = (delegate* unmanaged)&_getLoongArch64PassStructInRegisterFlags;
- callbacks[125] = (delegate* unmanaged)&_getRISCV64PassStructInRegisterFlags;
- callbacks[126] = (delegate* unmanaged)&_getThreadTLSIndex;
- callbacks[127] = (delegate* unmanaged)&_getInlinedCallFrameVptr;
- callbacks[128] = (delegate* unmanaged)&_getAddrOfCaptureThreadGlobal;
- callbacks[129] = (delegate* unmanaged)&_getHelperFtn;
- callbacks[130] = (delegate* unmanaged)&_getFunctionEntryPoint;
- callbacks[131] = (delegate* unmanaged)&_getFunctionFixedEntryPoint;
- callbacks[132] = (delegate* unmanaged)&_getMethodSync;
- callbacks[133] = (delegate* unmanaged)&_getLazyStringLiteralHelper;
- callbacks[134] = (delegate* unmanaged)&_embedModuleHandle;
- callbacks[135] = (delegate* unmanaged)&_embedClassHandle;
- callbacks[136] = (delegate* unmanaged)&_embedMethodHandle;
- callbacks[137] = (delegate* unmanaged)&_embedFieldHandle;
- callbacks[138] = (delegate* unmanaged)&_embedGenericHandle;
- callbacks[139] = (delegate* unmanaged)&_getLocationOfThisType;
- callbacks[140] = (delegate* unmanaged)&_getAddressOfPInvokeTarget;
- callbacks[141] = (delegate* unmanaged)&_GetCookieForPInvokeCalliSig;
- callbacks[142] = (delegate* unmanaged)&_canGetCookieForPInvokeCalliSig;
- callbacks[143] = (delegate* unmanaged)&_getJustMyCodeHandle;
- callbacks[144] = (delegate* unmanaged)&_GetProfilingHandle;
- callbacks[145] = (delegate* unmanaged)&_getCallInfo;
- callbacks[146] = (delegate* unmanaged)&_canAccessFamily;
- callbacks[147] = (delegate* unmanaged)&_isRIDClassDomainID;
- callbacks[148] = (delegate* unmanaged)&_getClassDomainID;
- callbacks[149] = (delegate* unmanaged)&_getStaticFieldContent;
- callbacks[150] = (delegate* unmanaged)&_getObjectContent;
- callbacks[151] = (delegate* unmanaged)&_getStaticFieldCurrentClass;
- callbacks[152] = (delegate* unmanaged)&_getVarArgsHandle;
- callbacks[153] = (delegate* unmanaged)&_canGetVarArgsHandle;
- callbacks[154] = (delegate* unmanaged)&_constructStringLiteral;
- callbacks[155] = (delegate* unmanaged)&_emptyStringLiteral;
- callbacks[156] = (delegate* unmanaged)&_getFieldThreadLocalStoreID;
- callbacks[157] = (delegate* unmanaged)&_GetDelegateCtor;
- callbacks[158] = (delegate* unmanaged)&_MethodCompileComplete;
- callbacks[159] = (delegate* unmanaged)&_getTailCallHelpers;
- callbacks[160] = (delegate* unmanaged)&_convertPInvokeCalliToCall;
- callbacks[161] = (delegate* unmanaged)&_notifyInstructionSetUsage;
- callbacks[162] = (delegate* unmanaged)&_updateEntryPointForTailCall;
- callbacks[163] = (delegate* unmanaged)&_allocMem;
- callbacks[164] = (delegate* unmanaged)&_reserveUnwindInfo;
- callbacks[165] = (delegate* unmanaged)&_allocUnwindInfo;
- callbacks[166] = (delegate* unmanaged)&_allocGCInfo;
- callbacks[167] = (delegate* unmanaged)&_setEHcount;
- callbacks[168] = (delegate* unmanaged)&_setEHinfo;
- callbacks[169] = (delegate* unmanaged)&_logMsg;
- callbacks[170] = (delegate* unmanaged)&_doAssert;
- callbacks[171] = (delegate* unmanaged)&_reportFatalError;
- callbacks[172] = (delegate* unmanaged)&_getPgoInstrumentationResults;
- callbacks[173] = (delegate* unmanaged)&_allocPgoInstrumentationBySchema;
- callbacks[174] = (delegate* unmanaged)&_recordCallSite;
- callbacks[175] = (delegate* unmanaged)&_recordRelocation;
- callbacks[176] = (delegate* unmanaged)&_getRelocTypeHint;
- callbacks[177] = (delegate* unmanaged)&_getExpectedTargetArchitecture;
- callbacks[178] = (delegate* unmanaged)&_getJitFlags;
+ callbacks[12] = (delegate* unmanaged)&_getMethodVTableOffset;
+ callbacks[13] = (delegate* unmanaged)&_resolveVirtualMethod;
+ callbacks[14] = (delegate* unmanaged)&_getUnboxedEntry;
+ callbacks[15] = (delegate* unmanaged)&_getDefaultComparerClass;
+ callbacks[16] = (delegate* unmanaged)&_getDefaultEqualityComparerClass;
+ callbacks[17] = (delegate* unmanaged)&_expandRawHandleIntrinsic;
+ callbacks[18] = (delegate* unmanaged)&_isIntrinsicType;
+ callbacks[19] = (delegate* unmanaged)&_getUnmanagedCallConv;
+ callbacks[20] = (delegate* unmanaged)&_pInvokeMarshalingRequired;
+ callbacks[21] = (delegate* unmanaged)&_satisfiesMethodConstraints;
+ callbacks[22] = (delegate* unmanaged)&_methodMustBeLoadedBeforeCodeIsRun;
+ callbacks[23] = (delegate* unmanaged)&_mapMethodDeclToMethodImpl;
+ callbacks[24] = (delegate* unmanaged)&_getGSCookie;
+ callbacks[25] = (delegate* unmanaged)&_setPatchpointInfo;
+ callbacks[26] = (delegate* unmanaged)&_getOSRInfo;
+ callbacks[27] = (delegate* unmanaged)&_resolveToken;
+ callbacks[28] = (delegate* unmanaged)&_findSig;
+ callbacks[29] = (delegate* unmanaged)&_findCallSiteSig;
+ callbacks[30] = (delegate* unmanaged)&_getTokenTypeAsHandle;
+ callbacks[31] = (delegate* unmanaged)&_getStringLiteral;
+ callbacks[32] = (delegate* unmanaged)&_printObjectDescription;
+ callbacks[33] = (delegate* unmanaged)&_asCorInfoType;
+ callbacks[34] = (delegate* unmanaged)&_getClassNameFromMetadata;
+ callbacks[35] = (delegate* unmanaged)&_getTypeInstantiationArgument;
+ callbacks[36] = (delegate* unmanaged)&_printClassName;
+ callbacks[37] = (delegate* unmanaged)&_isValueClass;
+ callbacks[38] = (delegate* unmanaged)&_canInlineTypeCheck;
+ callbacks[39] = (delegate* unmanaged)&_getClassAttribs;
+ callbacks[40] = (delegate* unmanaged)&_getClassModule;
+ callbacks[41] = (delegate* unmanaged)&_getModuleAssembly;
+ callbacks[42] = (delegate* unmanaged)&_getAssemblyName;
+ callbacks[43] = (delegate* unmanaged)&_LongLifetimeMalloc;
+ callbacks[44] = (delegate* unmanaged)&_LongLifetimeFree;
+ callbacks[45] = (delegate* unmanaged)&_getClassModuleIdForStatics;
+ callbacks[46] = (delegate* unmanaged)&_getIsClassInitedFlagAddress;
+ callbacks[47] = (delegate* unmanaged)&_getStaticBaseAddress;
+ callbacks[48] = (delegate* unmanaged)&_getClassSize;
+ callbacks[49] = (delegate* unmanaged)&_getHeapClassSize;
+ callbacks[50] = (delegate* unmanaged)&_canAllocateOnStack;
+ callbacks[51] = (delegate* unmanaged)&_getClassAlignmentRequirement;
+ callbacks[52] = (delegate* unmanaged)&_getClassGClayout;
+ callbacks[53] = (delegate* unmanaged)&_getClassNumInstanceFields;
+ callbacks[54] = (delegate* unmanaged)&_getFieldInClass;
+ callbacks[55] = (delegate* unmanaged)&_checkMethodModifier;
+ callbacks[56] = (delegate* unmanaged)&_getNewHelper;
+ callbacks[57] = (delegate* unmanaged