summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/debug/ee/arm64/arm64walker.cpp8
-rw-r--r--src/gc/gcinterface.h2
-rw-r--r--src/jit/codegenarm64.cpp53
-rw-r--r--src/jit/compiler.cpp1
-rw-r--r--src/jit/compiler.h3
-rw-r--r--src/jit/flowgraph.cpp69
-rw-r--r--src/jit/importer.cpp89
-rw-r--r--src/jit/inline.def7
-rw-r--r--src/jit/inline.h8
-rw-r--r--src/jit/inlinepolicy.cpp227
-rw-r--r--src/jit/inlinepolicy.h73
-rw-r--r--src/jit/jitconfigvalues.h2
-rw-r--r--src/jit/lowerarmarch.cpp5
-rw-r--r--src/jit/lsraarm64.cpp16
-rw-r--r--src/jit/morph.cpp4
-rw-r--r--src/jit/optimizer.cpp24
-rw-r--r--src/jit/simd.cpp8
-rw-r--r--src/mscorlib/System.Private.CoreLib.csproj1
-rw-r--r--src/mscorlib/shared/System.Private.CoreLib.Shared.projitems3
-rw-r--r--src/mscorlib/shared/System/Collections/Generic/Dictionary.cs (renamed from src/mscorlib/src/System/Collections/Generic/Dictionary.cs)79
-rw-r--r--src/mscorlib/shared/System/Collections/Generic/NonRandomizedStringEqualityComparer.cs38
-rw-r--r--src/mscorlib/shared/System/Collections/HashHelpers.cs108
-rw-r--r--src/mscorlib/src/System/Collections/Generic/EqualityComparer.cs50
-rw-r--r--src/mscorlib/src/System/Collections/Hashtable.cs103
-rw-r--r--src/publish.proj8
-rw-r--r--src/vm/siginfo.cpp2
-rw-r--r--src/vm/synch.cpp1
27 files changed, 511 insertions, 481 deletions
diff --git a/src/debug/ee/arm64/arm64walker.cpp b/src/debug/ee/arm64/arm64walker.cpp
index 96aff1708f..7a51dc9665 100644
--- a/src/debug/ee/arm64/arm64walker.cpp
+++ b/src/debug/ee/arm64/arm64walker.cpp
@@ -105,10 +105,10 @@ BYTE* NativeWalker::SetupOrSimulateInstructionForPatchSkip(T_CONTEXT * context,
/*
Modify the patchBypass if the opcode is IP-relative, otherwise return it
The following are the instructions that are IP-relative :
- • ADR and ADRP.
- • The Load register (literal) instruction class.
- • Direct branches that use an immediate offset.
- • The unconditional branch with link instructions, BL and BLR, that use the PC to create the return link
+ . ADR and ADRP.
+ . The Load register (literal) instruction class.
+ . Direct branches that use an immediate offset.
+ . The unconditional branch with link instructions, BL and BLR, that use the PC to create the return link
address.
*/
diff --git a/src/gc/gcinterface.h b/src/gc/gcinterface.h
index 4a35d30f88..b1d6b8090c 100644
--- a/src/gc/gcinterface.h
+++ b/src/gc/gcinterface.h
@@ -841,13 +841,11 @@ struct ScanContext
void* _unused1;
#endif //CHECK_APP_DOMAIN_LEAKS || FEATURE_APPDOMAIN_RESOURCE_MONITORING || DACCESS_COMPILE
-#ifndef FEATURE_REDHAWK
#if defined(GC_PROFILING) || defined (DACCESS_COMPILE)
MethodDesc *pMD;
#else
void* _unused2;
#endif //GC_PROFILING || DACCESS_COMPILE
-#endif // FEATURE_REDHAWK
#if defined(GC_PROFILING) || defined(FEATURE_EVENT_TRACE)
EtwGCRootKind dwEtwRootKind;
#else
diff --git a/src/jit/codegenarm64.cpp b/src/jit/codegenarm64.cpp
index 89fe0c92e3..74ef06d588 100644
--- a/src/jit/codegenarm64.cpp
+++ b/src/jit/codegenarm64.cpp
@@ -4365,6 +4365,14 @@ void CodeGen::genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode)
getEmitter()->emitIns_R_R_R(ins, attr, tmpFloatReg, op1Reg, op2Reg, opt);
+ if ((simdNode->gtFlags & GTF_SIMD12_OP) != 0)
+ {
+ // For 12Byte vectors we must set upper bits to get correct comparison
+ // We do not assume upper bits are zero.
+ instGen_Set_Reg_To_Imm(EA_4BYTE, targetReg, -1);
+ getEmitter()->emitIns_R_R_I(INS_ins, EA_4BYTE, tmpFloatReg, targetReg, 3);
+ }
+
getEmitter()->emitIns_R_R(INS_uminv, attr, tmpFloatReg, tmpFloatReg,
(simdNode->gtSIMDSize > 8) ? INS_OPTS_16B : INS_OPTS_8B);
@@ -4423,6 +4431,13 @@ void CodeGen::genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode)
// Vector multiply
getEmitter()->emitIns_R_R_R(ins, attr, tmpReg, op1Reg, op2Reg, opt);
+ if ((simdNode->gtFlags & GTF_SIMD12_OP) != 0)
+ {
+ // For 12Byte vectors we must zero upper bits to get correct dot product
+ // We do not assume upper bits are zero.
+ getEmitter()->emitIns_R_R_I(INS_ins, EA_4BYTE, tmpReg, REG_ZR, 3);
+ }
+
// Vector add horizontal
if (varTypeIsFloating(baseType))
{
@@ -4432,11 +4447,11 @@ void CodeGen::genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode)
{
getEmitter()->emitIns_R_R_R(INS_faddp, attr, tmpReg, tmpReg, tmpReg, INS_OPTS_4S);
}
- getEmitter()->emitIns_R_R(INS_faddp, EA_8BYTE, targetReg, tmpReg, INS_OPTS_2S);
+ getEmitter()->emitIns_R_R(INS_faddp, EA_4BYTE, targetReg, tmpReg);
}
else
{
- getEmitter()->emitIns_R_R(INS_faddp, EA_16BYTE, targetReg, tmpReg, INS_OPTS_2D);
+ getEmitter()->emitIns_R_R(INS_faddp, EA_8BYTE, targetReg, tmpReg);
}
}
else
@@ -4499,19 +4514,43 @@ void CodeGen::genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode)
// Optimize the case of op1 is in memory and trying to access ith element.
assert(op1->isUsedFromReg());
+ emitAttr baseTypeSize = emitTypeSize(baseType);
+
if (op2->IsCnsIntOrI())
{
assert(op2->isContained());
- emitAttr attr = emitTypeSize(baseType);
- unsigned int index = (unsigned int)op2->gtIntCon.gtIconVal;
+ ssize_t index = op2->gtIntCon.gtIconVal;
- getEmitter()->emitIns_R_R_I(INS_mov, attr, targetReg, srcReg, index);
+ if (getEmitter()->isValidVectorIndex(emitTypeSize(simdType), baseTypeSize, index))
+ {
+ // Only generate code for the get if the index is valid
+ // Otherwise generated code will throw
+ getEmitter()->emitIns_R_R_I(INS_mov, baseTypeSize, targetReg, srcReg, index);
+ }
}
else
{
- NYI("getItem() with non const index");
- assert(op2->IsCnsIntOrI());
+ unsigned simdInitTempVarNum = compiler->lvaSIMDInitTempVarNum;
+ noway_assert(compiler->lvaSIMDInitTempVarNum != BAD_VAR_NUM);
+
+ regNumber indexReg = op2->gtRegNum;
+ regNumber tmpReg = simdNode->ExtractTempReg();
+
+ assert(genIsValidIntReg(tmpReg));
+ assert(tmpReg != indexReg);
+
+ unsigned baseTypeScale = genLog2(EA_SIZE_IN_BYTES(baseTypeSize));
+
+ // Load the address of simdInitTempVarNum
+ getEmitter()->emitIns_R_S(INS_lea, EA_PTRSIZE, tmpReg, simdInitTempVarNum, 0);
+
+ // Store the vector to simdInitTempVarNum
+ getEmitter()->emitIns_R_R(INS_str, emitTypeSize(simdType), srcReg, tmpReg);
+
+ // Load item at simdInitTempVarNum[index]
+ getEmitter()->emitIns_R_R_R_Ext(ins_Load(baseType), baseTypeSize, targetReg, tmpReg, indexReg, INS_OPTS_LSL,
+ baseTypeScale);
}
genProduceReg(simdNode);
diff --git a/src/jit/compiler.cpp b/src/jit/compiler.cpp
index 515912c673..efad4cfd0f 100644
--- a/src/jit/compiler.cpp
+++ b/src/jit/compiler.cpp
@@ -1997,6 +1997,7 @@ void Compiler::compInit(ArenaAllocator* pAlloc, InlineInfo* inlineInfo)
compLongUsed = false;
compTailCallUsed = false;
compLocallocUsed = false;
+ compLocallocOptimized = false;
compQmarkRationalized = false;
compQmarkUsed = false;
compFloatingPointUsed = false;
diff --git a/src/jit/compiler.h b/src/jit/compiler.h
index 87d3f3965f..ffaa8cec9d 100644
--- a/src/jit/compiler.h
+++ b/src/jit/compiler.h
@@ -7917,6 +7917,7 @@ public:
bool compFloatingPointUsed; // Does the method use TYP_FLOAT or TYP_DOUBLE
bool compTailCallUsed; // Does the method do a tailcall
bool compLocallocUsed; // Does the method use localloc.
+ bool compLocallocOptimized; // Does the method have an optimized localloc
bool compQmarkUsed; // Does the method use GT_QMARK/GT_COLON
bool compQmarkRationalized; // Is it allowed to use a GT_QMARK/GT_COLON node.
bool compUnsafeCastUsed; // Does the method use LDIND/STIND to cast between scalar/refernce types
@@ -9295,6 +9296,8 @@ public:
#define DEFAULT_MAX_INLINE_DEPTH 20 // Methods at more than this level deep will not be inlined
+#define DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE 32 // fixed locallocs of this size or smaller will convert to local buffers
+
private:
#ifdef FEATURE_JIT_METHOD_PERF
JitTimer* pCompJitTimer; // Timer data structure (by phases) for current compilation.
diff --git a/src/jit/flowgraph.cpp b/src/jit/flowgraph.cpp
index b7d50d105f..15f0d5d53c 100644
--- a/src/jit/flowgraph.cpp
+++ b/src/jit/flowgraph.cpp
@@ -4333,6 +4333,18 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, BYTE*
compInlineResult->NoteBool(InlineObservation::CALLEE_IS_FORCE_INLINE, isForceInline);
compInlineResult->NoteInt(InlineObservation::CALLEE_IL_CODE_SIZE, codeSize);
+ // Determine if call site is within a try.
+ if (isInlining && impInlineInfo->iciBlock->hasTryIndex())
+ {
+ compInlineResult->Note(InlineObservation::CALLSITE_IN_TRY_REGION);
+ }
+
+ // Determine if the call site is in a loop.
+ if (isInlining && ((impInlineInfo->iciBlock->bbFlags & BBF_BACKWARD_JUMP) != 0))
+ {
+ compInlineResult->Note(InlineObservation::CALLSITE_IN_LOOP);
+ }
+
#ifdef DEBUG
// If inlining, this method should still be a candidate.
@@ -4807,8 +4819,6 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, BYTE*
// the list of other opcodes (for all platforms).
__fallthrough;
-
- case CEE_LOCALLOC:
case CEE_MKREFANY:
case CEE_RETHROW:
if (makeInlineObservations)
@@ -4826,6 +4836,19 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, BYTE*
}
break;
+ case CEE_LOCALLOC:
+
+ // We now allow localloc callees to become candidates in some cases.
+ if (makeInlineObservations)
+ {
+ compInlineResult->Note(InlineObservation::CALLEE_HAS_LOCALLOC);
+ if (isInlining && compInlineResult->IsFailure())
+ {
+ return;
+ }
+ }
+ break;
+
case CEE_LDARG_0:
case CEE_LDARG_1:
case CEE_LDARG_2:
@@ -4900,25 +4923,16 @@ void Compiler::fgFindJumpTargets(const BYTE* codeAddr, IL_OFFSET codeSize, BYTE*
{
compInlineResult->Note(InlineObservation::CALLEE_END_OPCODE_SCAN);
- if (!compInlineResult->UsesLegacyPolicy())
- {
- // If there are no return blocks we know it does not return, however if there
- // return blocks we don't know it returns as it may be counting unreachable code.
- // However we will still make the CALLEE_DOES_NOT_RETURN observation.
+ // If there are no return blocks we know it does not return, however if there
+ // return blocks we don't know it returns as it may be counting unreachable code.
+ // However we will still make the CALLEE_DOES_NOT_RETURN observation.
- compInlineResult->NoteBool(InlineObservation::CALLEE_DOES_NOT_RETURN, retBlocks == 0);
-
- if (retBlocks == 0 && isInlining)
- {
- // Mark the call node as "no return" as it can impact caller's code quality.
- impInlineInfo->iciCall->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
- }
- }
+ compInlineResult->NoteBool(InlineObservation::CALLEE_DOES_NOT_RETURN, retBlocks == 0);
- // Determine if call site is within a try.
- if (isInlining && impInlineInfo->iciBlock->hasTryIndex())
+ if (retBlocks == 0 && isInlining)
{
- compInlineResult->Note(InlineObservation::CALLSITE_IN_TRY_REGION);
+ // Mark the call node as "no return" as it can impact caller's code quality.
+ impInlineInfo->iciCall->gtCallMoreFlags |= GTF_CALL_M_DOES_NOT_RETURN;
}
// If the inline is viable and discretionary, do the
@@ -5040,14 +5054,6 @@ void Compiler::fgObserveInlineConstants(OPCODE opcode, const FgStack& stack, boo
// The stack only has to be 1 deep for BRTRUE/FALSE
bool lookForBranchCases = stack.IsStackAtLeastOneDeep();
- if (compInlineResult->UsesLegacyPolicy())
- {
- // LegacyPolicy misses cases where the stack is really one
- // deep but the model says it's two deep. We need to do
- // likewise to preseve old behavior.
- lookForBranchCases &= !stack.IsStackTwoDeep();
- }
-
if (lookForBranchCases)
{
if (opcode == CEE_BRFALSE || opcode == CEE_BRFALSE_S || opcode == CEE_BRTRUE || opcode == CEE_BRTRUE_S)
@@ -5896,14 +5902,8 @@ void Compiler::fgFindBasicBlocks()
#ifdef DEBUG
// If fgFindJumpTargets marked the call as "no return" there
// really should be no BBJ_RETURN blocks in the method.
- //
- // Note LegacyPolicy does not mark calls as no return, so if
- // it's active, skip the check.
- if (!compInlineResult->UsesLegacyPolicy())
- {
- bool markedNoReturn = (impInlineInfo->iciCall->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
- assert((markedNoReturn && (retBlocks == 0)) || (!markedNoReturn && (retBlocks >= 1)));
- }
+ bool markedNoReturn = (impInlineInfo->iciCall->gtCallMoreFlags & GTF_CALL_M_DOES_NOT_RETURN) != 0;
+ assert((markedNoReturn && (retBlocks == 0)) || (!markedNoReturn && (retBlocks >= 1)));
#endif // DEBUG
if (compInlineResult->IsFailure())
@@ -22959,6 +22959,7 @@ _Done:
compLongUsed |= InlineeCompiler->compLongUsed;
compFloatingPointUsed |= InlineeCompiler->compFloatingPointUsed;
compLocallocUsed |= InlineeCompiler->compLocallocUsed;
+ compLocallocOptimized |= InlineeCompiler->compLocallocOptimized;
compQmarkUsed |= InlineeCompiler->compQmarkUsed;
compUnsafeCastUsed |= InlineeCompiler->compUnsafeCastUsed;
compNeedsGSSecurityCookie |= InlineeCompiler->compNeedsGSSecurityCookie;
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index bb02ca809f..b5df87b0ec 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -14279,8 +14279,6 @@ void Compiler::impImportBlockCode(BasicBlock* block)
break;
case CEE_LOCALLOC:
- assert(!compIsForInlining());
-
if (tiVerificationNeeded)
{
Verify(false, "bad opcode");
@@ -14292,11 +14290,6 @@ void Compiler::impImportBlockCode(BasicBlock* block)
BADCODE("Localloc can't be inside handler");
}
- /* The FP register may not be back to the original value at the end
- of the method, even if the frame size is 0, as localloc may
- have modified it. So we will HAVE to reset it */
-
- compLocallocUsed = true;
setNeedsGSSecurityCookie();
// Get the size to allocate
@@ -14309,11 +14302,81 @@ void Compiler::impImportBlockCode(BasicBlock* block)
BADCODE("Localloc can only be used when the stack is empty");
}
- op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
+ // If the localloc is not in a loop and its size is a small constant,
+ // create a new local var of TYP_BLK and return its address.
+ {
+ bool convertedToLocal = false;
+
+ // Need to aggressively fold here, as even fixed-size locallocs
+ // will have casts in the way.
+ op2 = gtFoldExpr(op2);
+
+ if (op2->IsIntegralConst())
+ {
+ const ssize_t allocSize = op2->AsIntCon()->IconValue();
+
+ if (allocSize == 0)
+ {
+ // Result is nullptr
+ JITDUMP("Converting stackalloc of 0 bytes to push null unmanaged pointer\n");
+ op1 = gtNewIconNode(0, TYP_I_IMPL);
+ convertedToLocal = true;
+ }
+ else if ((allocSize > 0) && ((compCurBB->bbFlags & BBF_BACKWARD_JUMP) == 0))
+ {
+ // Get the size threshold for local conversion
+ ssize_t maxSize = DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE;
+
+#ifdef DEBUG
+ // Optionally allow this to be modified
+ maxSize = JitConfig.JitStackAllocToLocalSize();
+#endif // DEBUG
+
+ if (allocSize <= maxSize)
+ {
+ const unsigned stackallocAsLocal = lvaGrabTemp(false DEBUGARG("stackallocLocal"));
+ JITDUMP("Converting stackalloc of %lld bytes to new local V%02u\n", allocSize,
+ stackallocAsLocal);
+ lvaTable[stackallocAsLocal].lvType = TYP_BLK;
+ lvaTable[stackallocAsLocal].lvExactSize = (unsigned)allocSize;
+ lvaTable[stackallocAsLocal].lvIsUnsafeBuffer = true;
+ op1 = gtNewLclvNode(stackallocAsLocal, TYP_BLK);
+ op1 = gtNewOperNode(GT_ADDR, TYP_I_IMPL, op1);
+ convertedToLocal = true;
+ compGSReorderStackLayout = true;
+ }
+ }
+ }
+
+ if (!convertedToLocal)
+ {
+ // Bail out if inlining and the localloc was not converted.
+ //
+ // Note we might consider allowing the inline, if the call
+ // site is not in a loop.
+ if (compIsForInlining())
+ {
+ InlineObservation obs = op2->IsIntegralConst()
+ ? InlineObservation::CALLEE_LOCALLOC_TOO_LARGE
+ : InlineObservation::CALLSITE_LOCALLOC_SIZE_UNKNOWN;
+ compInlineResult->NoteFatal(obs);
+ return;
+ }
- // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
+ op1 = gtNewOperNode(GT_LCLHEAP, TYP_I_IMPL, op2);
+ // May throw a stack overflow exception. Obviously, we don't want locallocs to be CSE'd.
+ op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
- op1->gtFlags |= (GTF_EXCEPT | GTF_DONT_CSE);
+ /* The FP register may not be back to the original value at the end
+ of the method, even if the frame size is 0, as localloc may
+ have modified it. So we will HAVE to reset it */
+ compLocallocUsed = true;
+ }
+ else
+ {
+ compLocallocOptimized = true;
+ }
+ }
impPushOnStack(op1, tiRetVal);
break;
@@ -14811,7 +14874,7 @@ void Compiler::impImportBlockCode(BasicBlock* block)
impResolveToken(codeAddr + (sz + 1), &unboxResolvedToken, CORINFO_TOKENKIND_Class);
- // See if token types a equal.
+ // See if the resolved tokens describe types that are equal.
const TypeCompareState compare =
info.compCompHnd->compareTypesForEquality(unboxResolvedToken.hClass, resolvedToken.hClass);
@@ -14823,10 +14886,6 @@ void Compiler::impImportBlockCode(BasicBlock* block)
sz += sizeof(mdToken) + 1;
break;
}
- else
- {
- assert(unboxResolvedToken.hClass != resolvedToken.hClass);
- }
}
impImportAndPushBox(&resolvedToken);
diff --git a/src/jit/inline.def b/src/jit/inline.def
index ff0b21100e..0a13950b48 100644
--- a/src/jit/inline.def
+++ b/src/jit/inline.def
@@ -48,6 +48,7 @@ INLINE_OBSERVATION(IS_SYNCHRONIZED, bool, "is synchronized",
INLINE_OBSERVATION(IS_VM_NOINLINE, bool, "noinline per VM", FATAL, CALLEE)
INLINE_OBSERVATION(LACKS_RETURN, bool, "no return opcode", FATAL, CALLEE)
INLINE_OBSERVATION(LDFLD_NEEDS_HELPER, bool, "ldfld needs helper", FATAL, CALLEE)
+INLINE_OBSERVATION(LOCALLOC_TOO_LARGE, bool, "localloc size too large", FATAL, CALLEE)
INLINE_OBSERVATION(LOG_REPLAY_REJECT, bool, "rejected by log replay", FATAL, CALLEE)
INLINE_OBSERVATION(MARKED_AS_SKIPPED, bool, "skipped by complus request", FATAL, CALLEE)
INLINE_OBSERVATION(MAXSTACK_TOO_BIG, bool, "maxstack too big" , FATAL, CALLEE)
@@ -78,6 +79,7 @@ INLINE_OBSERVATION(CLASS_PROMOTABLE, bool, "promotable value class",
INLINE_OBSERVATION(DOES_NOT_RETURN, bool, "does not return", INFORMATION, CALLEE)
INLINE_OBSERVATION(END_OPCODE_SCAN, bool, "done looking at opcodes", INFORMATION, CALLEE)
INLINE_OBSERVATION(HAS_GC_STRUCT, bool, "has gc field in struct local", INFORMATION, CALLEE)
+INLINE_OBSERVATION(HAS_LOCALLOC, bool, "has localloc", INFORMATION, CALLEE)
INLINE_OBSERVATION(HAS_PINNED_LOCALS, bool, "has pinned locals", INFORMATION, CALLEE)
INLINE_OBSERVATION(HAS_SIMD, bool, "has SIMD arg, local, or ret", INFORMATION, CALLEE)
INLINE_OBSERVATION(HAS_SWITCH, bool, "has switch", INFORMATION, CALLEE)
@@ -143,6 +145,8 @@ INLINE_OBSERVATION(IS_WITHIN_FILTER, bool, "within filter region",
INLINE_OBSERVATION(LDARGA_NOT_LOCAL_VAR, bool, "ldarga not on local var", FATAL, CALLSITE)
INLINE_OBSERVATION(LDFLD_NEEDS_HELPER, bool, "ldfld needs helper", FATAL, CALLSITE)
INLINE_OBSERVATION(LDVIRTFN_ON_NON_VIRTUAL, bool, "ldvirtfn on non-virtual", FATAL, CALLSITE)
+INLINE_OBSERVATION(LOCALLOC_IN_LOOP, bool, "within loop, has localloc", FATAL, CALLSITE)
+INLINE_OBSERVATION(LOCALLOC_SIZE_UNKNOWN, bool, "localloc size unknown", FATAL, CALLSITE)
INLINE_OBSERVATION(LOG_REPLAY_REJECT, bool, "rejected by log replay", FATAL, CALLSITE)
INLINE_OBSERVATION(NOT_CANDIDATE, bool, "not inline candidate", FATAL, CALLSITE)
INLINE_OBSERVATION(NOT_PROFITABLE_INLINE, bool, "unprofitable inline", FATAL, CALLSITE)
@@ -164,7 +168,8 @@ INLINE_OBSERVATION(RARE_GC_STRUCT, bool, "rarely called, has gc str
INLINE_OBSERVATION(CONSTANT_ARG_FEEDS_TEST, bool, "constant argument feeds test", INFORMATION, CALLSITE)
INLINE_OBSERVATION(DEPTH, int, "depth", INFORMATION, CALLSITE)
INLINE_OBSERVATION(FREQUENCY, int, "rough call site frequency", INFORMATION, CALLSITE)
-INLINE_OBSERVATION(IN_TRY_REGION, bool, "call site in try region", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(IN_LOOP, bool, "call site is in a loop", INFORMATION, CALLSITE)
+INLINE_OBSERVATION(IN_TRY_REGION, bool, "call site is in a try region", INFORMATION, CALLSITE)
INLINE_OBSERVATION(IS_PROFITABLE_INLINE, bool, "profitable inline", INFORMATION, CALLSITE)
INLINE_OBSERVATION(IS_SAME_THIS, bool, "same this as root caller", INFORMATION, CALLSITE)
INLINE_OBSERVATION(IS_SIZE_DECREASING_INLINE, bool, "size decreasing inline", INFORMATION, CALLSITE)
diff --git a/src/jit/inline.h b/src/jit/inline.h
index ee07130676..f06b4f7a6f 100644
--- a/src/jit/inline.h
+++ b/src/jit/inline.h
@@ -248,7 +248,6 @@ public:
// Policy policies
virtual bool PropagateNeverToRuntime() const = 0;
- virtual bool IsLegacyPolicy() const = 0;
// Policy estimates
virtual int CodeSizeEstimate() = 0;
@@ -454,13 +453,6 @@ public:
return m_Policy;
}
- // True if the policy used for this result is (exactly) the legacy
- // policy.
- bool UsesLegacyPolicy() const
- {
- return m_Policy->IsLegacyPolicy();
- }
-
// SetReported indicates that this particular result doesn't need
// to be reported back to the runtime, either because the runtime
// already knows, or we aren't actually inlining yet.
diff --git a/src/jit/inlinepolicy.cpp b/src/jit/inlinepolicy.cpp
index 61e70c3ed4..5847bbcb54 100644
--- a/src/jit/inlinepolicy.cpp
+++ b/src/jit/inlinepolicy.cpp
@@ -85,16 +85,8 @@ InlinePolicy* InlinePolicy::GetPolicy(Compiler* compiler, bool isPrejitRoot)
return new (compiler, CMK_Inlining) ModelPolicy(compiler, isPrejitRoot);
}
- // Optionally fallback to the original legacy policy
- bool useLegacyPolicy = JitConfig.JitInlinePolicyLegacy() != 0;
-
- if (useLegacyPolicy)
- {
- return new (compiler, CMK_Inlining) LegacyPolicy(compiler, isPrejitRoot);
- }
-
- // Use the enhanced legacy policy by default
- return new (compiler, CMK_Inlining) EnhancedLegacyPolicy(compiler, isPrejitRoot);
+ // Use the default policy by default
+ return new (compiler, CMK_Inlining) DefaultPolicy(compiler, isPrejitRoot);
}
//------------------------------------------------------------------------
@@ -225,7 +217,7 @@ void LegalPolicy::SetCandidate(InlineObservation obs)
//------------------------------------------------------------------------
// NoteSuccess: handle finishing all the inlining checks successfully
-void LegacyPolicy::NoteSuccess()
+void DefaultPolicy::NoteSuccess()
{
assert(InlDecisionIsCandidate(m_Decision));
m_Decision = InlineDecision::SUCCESS;
@@ -237,7 +229,7 @@ void LegacyPolicy::NoteSuccess()
// Arguments:
// obs - the current obsevation
// value - the value of the observation
-void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
+void DefaultPolicy::NoteBool(InlineObservation obs, bool value)
{
// Check the impact
InlineImpact impact = InlGetImpact(obs);
@@ -275,7 +267,7 @@ void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
break;
case InlineObservation::CALLEE_LOOKS_LIKE_WRAPPER:
- // LegacyPolicy ignores this for prejit roots.
+ // DefaultPolicy ignores this for prejit roots.
if (!m_IsPrejitRoot)
{
m_LooksLikeWrapperMethod = value;
@@ -283,7 +275,7 @@ void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
break;
case InlineObservation::CALLEE_ARG_FEEDS_CONSTANT_TEST:
- // LegacyPolicy ignores this for prejit roots.
+ // DefaultPolicy ignores this for prejit roots.
if (!m_IsPrejitRoot)
{
m_ArgFeedsConstantTest++;
@@ -291,7 +283,7 @@ void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
break;
case InlineObservation::CALLEE_ARG_FEEDS_RANGE_CHECK:
- // LegacyPolicy ignores this for prejit roots.
+ // DefaultPolicy ignores this for prejit roots.
if (!m_IsPrejitRoot)
{
m_ArgFeedsRangeCheck++;
@@ -300,7 +292,7 @@ void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
case InlineObservation::CALLEE_HAS_SWITCH:
case InlineObservation::CALLEE_UNSUPPORTED_OPCODE:
- // LegacyPolicy ignores these for prejit roots.
+ // DefaultPolicy ignores these for prejit roots.
if (!m_IsPrejitRoot)
{
// Pass these on, they should cause inlining to fail.
@@ -383,10 +375,52 @@ void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
break;
}
+ case InlineObservation::CALLSITE_IN_TRY_REGION:
+ m_CallsiteIsInTryRegion = true;
+ break;
+
+ case InlineObservation::CALLSITE_IN_LOOP:
+ m_CallsiteIsInLoop = true;
+ break;
+
+ case InlineObservation::CALLEE_DOES_NOT_RETURN:
+ m_IsNoReturn = value;
+ m_IsNoReturnKnown = true;
+ break;
+
+ case InlineObservation::CALLSITE_RARE_GC_STRUCT:
+ // If this is a discretionary or always inline candidate
+ // with a gc struct, we may change our mind about inlining
+ // if the call site is rare, to avoid costs associated with
+ // zeroing the GC struct up in the root prolog.
+ if (m_Observation == InlineObservation::CALLEE_BELOW_ALWAYS_INLINE_SIZE)
+ {
+ assert(m_CallsiteFrequency == InlineCallsiteFrequency::UNUSED);
+ SetFailure(obs);
+ return;
+ }
+ else if (m_Observation == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE)
+ {
+ assert(m_CallsiteFrequency == InlineCallsiteFrequency::RARE);
+ SetFailure(obs);
+ return;
+ }
+ break;
+
case InlineObservation::CALLEE_HAS_PINNED_LOCALS:
- // The legacy policy is to never inline methods with
- // pinned locals.
- SetNever(obs);
+ if (m_CallsiteIsInTryRegion)
+ {
+ // Inlining a method with pinned locals in a try
+ // region requires wrapping the inline body in a
+ // try/finally to ensure unpinning. Bail instead.
+ SetFailure(InlineObservation::CALLSITE_PIN_IN_TRY_REGION);
+ return;
+ }
+ break;
+
+ case InlineObservation::CALLEE_HAS_LOCALLOC:
+ // We see this during the IL prescan. Ignore for now, we will
+ // bail out, if necessary, during importation
break;
default:
@@ -408,7 +442,7 @@ void LegacyPolicy::NoteBool(InlineObservation obs, bool value)
// obs - the current obsevation
// value - the value being observed
-void LegacyPolicy::NoteInt(InlineObservation obs, int value)
+void DefaultPolicy::NoteInt(InlineObservation obs, int value)
{
switch (obs)
{
@@ -430,10 +464,23 @@ void LegacyPolicy::NoteInt(InlineObservation obs, int value)
{
assert(m_IsForceInlineKnown);
assert(value != 0);
+ assert(m_IsNoReturnKnown);
+
+ //
+ // Let's be conservative for now and reject inlining of "no return" methods only
+ // if the callee contains a single basic block. This covers most of the use cases
+ // (typical throw helpers simply do "throw new X();" and so they have a single block)
+ // without affecting more exotic cases (loops that do actual work for example) where
+ // failure to inline could negatively impact code quality.
+ //
unsigned basicBlockCount = static_cast<unsigned>(value);
- if (!m_IsForceInline && (basicBlockCount > MAX_BASIC_BLOCKS))
+ if (m_IsNoReturn && (basicBlockCount == 1))
+ {
+ SetNever(InlineObservation::CALLEE_DOES_NOT_RETURN);
+ }
+ else if (!m_IsForceInline && (basicBlockCount > MAX_BASIC_BLOCKS))
{
SetNever(InlineObservation::CALLEE_TOO_MANY_BASIC_BLOCKS);
}
@@ -542,7 +589,7 @@ void LegacyPolicy::NoteInt(InlineObservation obs, int value)
// Notes: uses the accumulated set of observations to compute a
// profitability boost for the inline candidate.
-double LegacyPolicy::DetermineMultiplier()
+double DefaultPolicy::DetermineMultiplier()
{
double multiplier = 0;
@@ -664,7 +711,7 @@ double LegacyPolicy::DetermineMultiplier()
// candidates. Should not be needed for forced or always
// candidates.
-int LegacyPolicy::DetermineNativeSizeEstimate()
+int DefaultPolicy::DetermineNativeSizeEstimate()
{
// Should be a discretionary candidate.
assert(m_StateMachine != nullptr);
@@ -684,7 +731,7 @@ int LegacyPolicy::DetermineNativeSizeEstimate()
// call site. While the quality of the estimate here is questionable
// (especially for x64) it is being left as is for legacy compatibility.
-int LegacyPolicy::DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methInfo)
+int DefaultPolicy::DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methInfo)
{
int callsiteSize = 55; // Direct call take 5 native bytes; indirect call takes 6 native bytes.
@@ -747,7 +794,7 @@ int LegacyPolicy::DetermineCallsiteNativeSizeEstimate(CORINFO_METHOD_INFO* methI
// candidates, since it does not make sense to do this assessment for
// failed, always, or forced inlines.
-void LegacyPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
+void DefaultPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
{
#if defined(DEBUG)
@@ -772,7 +819,7 @@ void LegacyPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
m_Multiplier = DetermineMultiplier();
const int threshold = (int)(m_CallsiteNativeSizeEstimate * m_Multiplier);
- // Note the LegacyPolicy estimates are scaled up by SIZE_SCALE
+ // Note the DefaultPolicy estimates are scaled up by SIZE_SCALE
JITDUMP("\ncalleeNativeSizeEstimate=%d\n", m_CalleeNativeSizeEstimate)
JITDUMP("callsiteNativeSizeEstimate=%d\n", m_CallsiteNativeSizeEstimate);
JITDUMP("benefit multiplier=%g\n", m_Multiplier);
@@ -828,11 +875,11 @@ void LegacyPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
// not). For always or force inlines the legacy policy doesn't
// estimate size impact.
-int LegacyPolicy::CodeSizeEstimate()
+int DefaultPolicy::CodeSizeEstimate()
{
if (m_StateMachine != nullptr)
{
- // This is not something the LegacyPolicy explicitly computed,
+ // This is not something the DefaultPolicy explicitly computed,
// since it uses a blended evaluation model (mixing size and time
// together for overall profitability). But it's effecitvely an
// estimate of the size impact.
@@ -845,118 +892,10 @@ int LegacyPolicy::CodeSizeEstimate()
}
//------------------------------------------------------------------------
-// NoteBool: handle a boolean observation with non-fatal impact
-//
-// Arguments:
-// obs - the current observation
-// value - the value of the observation
-
-void EnhancedLegacyPolicy::NoteBool(InlineObservation obs, bool value)
-{
-
-#ifdef DEBUG
- // Check the impact
- InlineImpact impact = InlGetImpact(obs);
-
- // As a safeguard, all fatal impact must be
- // reported via NoteFatal.
- assert(impact != InlineImpact::FATAL);
-#endif // DEBUG
-
- switch (obs)
- {
- case InlineObservation::CALLEE_DOES_NOT_RETURN:
- m_IsNoReturn = value;
- m_IsNoReturnKnown = true;
- break;
-
- case InlineObservation::CALLSITE_RARE_GC_STRUCT:
- // If this is a discretionary or always inline candidate
- // with a gc struct, we may change our mind about inlining
- // if the call site is rare, to avoid costs associated with
- // zeroing the GC struct up in the root prolog.
- if (m_Observation == InlineObservation::CALLEE_BELOW_ALWAYS_INLINE_SIZE)
- {
- assert(m_CallsiteFrequency == InlineCallsiteFrequency::UNUSED);
- SetFailure(obs);
- return;
- }
- else if (m_Observation == InlineObservation::CALLEE_IS_DISCRETIONARY_INLINE)
- {
- assert(m_CallsiteFrequency == InlineCallsiteFrequency::RARE);
- SetFailure(obs);
- return;
- }
- break;
-
- case InlineObservation::CALLEE_HAS_PINNED_LOCALS:
- if (m_CallsiteIsInTryRegion)
- {
- // Inlining a method with pinned locals in a try
- // region requires wrapping the inline body in a
- // try/finally to ensure unpinning. Bail instead.
- SetFailure(InlineObservation::CALLSITE_PIN_IN_TRY_REGION);
- return;
- }
- break;
-
- default:
- // Pass all other information to the legacy policy
- LegacyPolicy::NoteBool(obs, value);
- break;
- }
-}
-
-//------------------------------------------------------------------------
-// NoteInt: handle an observed integer value
-//
-// Arguments:
-// obs - the current obsevation
-// value - the value being observed
-
-void EnhancedLegacyPolicy::NoteInt(InlineObservation obs, int value)
-{
- switch (obs)
- {
- case InlineObservation::CALLEE_NUMBER_OF_BASIC_BLOCKS:
- {
- assert(value != 0);
- assert(m_IsNoReturnKnown);
-
- //
- // Let's be conservative for now and reject inlining of "no return" methods only
- // if the callee contains a single basic block. This covers most of the use cases
- // (typical throw helpers simply do "throw new X();" and so they have a single block)
- // without affecting more exotic cases (loops that do actual work for example) where
- // failure to inline could negatively impact code quality.
- //
-
- unsigned basicBlockCount = static_cast<unsigned>(value);
-
- if (m_IsNoReturn && (basicBlockCount == 1))
- {
- SetNever(InlineObservation::CALLEE_DOES_NOT_RETURN);
- }
- else
- {
- LegacyPolicy::NoteInt(obs, value);
- }
-
- break;
- }
-
- default:
- // Pass all other information to the legacy policy
- LegacyPolicy::NoteInt(obs, value);
- break;
- }
-}
-
-//------------------------------------------------------------------------
// PropagateNeverToRuntime: determine if a never result should cause the
// method to be marked as un-inlinable.
-bool EnhancedLegacyPolicy::PropagateNeverToRuntime() const
+bool DefaultPolicy::PropagateNeverToRuntime() const
{
//
// Do not propagate the "no return" observation. If we do this then future inlining
@@ -969,8 +908,6 @@ bool EnhancedLegacyPolicy::PropagateNeverToRuntime() const
bool propagate = (m_Observation != InlineObservation::CALLEE_DOES_NOT_RETURN);
- propagate &= LegacyPolicy::PropagateNeverToRuntime();
-
return propagate;
}
@@ -1065,7 +1002,7 @@ void RandomPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
}
// Use a probability curve that roughly matches the observed
- // behavior of the LegacyPolicy. That way we're inlining
+ // behavior of the DefaultPolicy. That way we're inlining
// differently but not creating enormous methods.
//
// We vary a bit at the extremes. The RandomPolicy won't always
@@ -1158,7 +1095,7 @@ void RandomPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo)
// clang-format off
DiscretionaryPolicy::DiscretionaryPolicy(Compiler* compiler, bool isPrejitRoot)
- : EnhancedLegacyPolicy(compiler, isPrejitRoot)
+ : DefaultPolicy(compiler, isPrejitRoot)
, m_Depth(0)
, m_BlockCount(0)
, m_Maxstack(0)
@@ -1266,7 +1203,7 @@ void DiscretionaryPolicy::NoteBool(InlineObservation obs, bool value)
break;
default:
- EnhancedLegacyPolicy::NoteBool(obs, value);
+ DefaultPolicy::NoteBool(obs, value);
break;
}
}
@@ -1309,7 +1246,7 @@ void DiscretionaryPolicy::NoteInt(InlineObservation obs, int value)
// on similarity of impact on codegen.
OPCODE opcode = static_cast<OPCODE>(value);
ComputeOpcodeBin(opcode);
- EnhancedLegacyPolicy::NoteInt(obs, value);
+ DefaultPolicy::NoteInt(obs, value);
break;
}
@@ -1331,7 +1268,7 @@ void DiscretionaryPolicy::NoteInt(InlineObservation obs, int value)
default:
// Delegate remainder to the super class.
- EnhancedLegacyPolicy::NoteInt(obs, value);
+ DefaultPolicy::NoteInt(obs, value);
break;
}
}
@@ -1647,7 +1584,7 @@ void DiscretionaryPolicy::DetermineProfitability(CORINFO_METHOD_INFO* methodInfo
EstimatePerformanceImpact();
// Delegate to super class for the rest
- EnhancedLegacyPolicy::DetermineProfitability(methodInfo);
+ DefaultPolicy::DetermineProfitability(methodInfo);
}
//------------------------------------------------------------------------
diff --git a/src/jit/inlinepolicy.h b/src/jit/inlinepolicy.h
index 3239dcbe89..0ff0b33279 100644
--- a/src/jit/inlinepolicy.h
+++ b/src/jit/inlinepolicy.h
@@ -10,9 +10,8 @@
// -- CLASSES --
//
// LegalPolicy - partial class providing common legality checks
-// LegacyPolicy - policy that provides legacy inline behavior
-// EnhancedLegacyPolicy - legacy variant with some enhancements
-// DiscretionaryPolicy - legacy variant with uniform size policy
+// DefaultPolicy - default inliner policy
+// DiscretionaryPolicy - default variant with uniform size policy
// ModelPolicy - policy based on statistical modelling
//
// These experimental policies are available only in
@@ -22,7 +21,7 @@
// FullPolicy - inlines everything up to size and depth limits
// SizePolicy - tries not to increase method sizes
//
-// The default policy in use is the EnhancedLegacyPolicy.
+// The default policy in use is the DefaultPolicy.
#ifndef _INLINE_POLICY_H_
#define _INLINE_POLICY_H_
@@ -66,18 +65,17 @@ protected:
};
// Forward declaration for the state machine class used by the
-// LegacyPolicy
+// DefaultPolicy
class CodeSeqSM;
-// LegacyPolicy implements the inlining policy used by the jit in its
-// initial release.
+// DefaultPolicy implements the default inlining policy for the jit.
-class LegacyPolicy : public LegalPolicy
+class DefaultPolicy : public LegalPolicy
{
public:
- // Construct a LegacyPolicy
- LegacyPolicy(Compiler* compiler, bool isPrejitRoot)
+ // Construct a DefaultPolicy
+ DefaultPolicy(Compiler* compiler, bool isPrejitRoot)
: LegalPolicy(isPrejitRoot)
, m_RootCompiler(compiler)
, m_StateMachine(nullptr)
@@ -99,6 +97,9 @@ public:
, m_LooksLikeWrapperMethod(false)
, m_MethodIsMostlyLoadStore(false)
, m_CallsiteIsInTryRegion(false)
+ , m_CallsiteIsInLoop(false)
+ , m_IsNoReturn(false)
+ , m_IsNoReturnKnown(false)
{
// empty
}
@@ -112,14 +113,7 @@ public:
void DetermineProfitability(CORINFO_METHOD_INFO* methodInfo) override;
// Policy policies
- bool PropagateNeverToRuntime() const override
- {
- return true;
- }
- bool IsLegacyPolicy() const override
- {
- return true;
- }
+ bool PropagateNeverToRuntime() const override;
// Policy estimates
int CodeSizeEstimate() override;
@@ -128,7 +122,7 @@ public:
const char* GetName() const override
{
- return "LegacyPolicy";
+ return "DefaultPolicy";
}
#endif // (DEBUG) || defined(INLINE_DATA)
@@ -167,46 +161,19 @@ protected:
bool m_LooksLikeWrapperMethod : 1;
bool m_MethodIsMostlyLoadStore : 1;
bool m_CallsiteIsInTryRegion : 1;
+ bool m_CallsiteIsInLoop : 1;
+ bool m_IsNoReturn : 1;
+ bool m_IsNoReturnKnown : 1;
};
-// EnhancedLegacyPolicy extends the legacy policy by rejecting
-// inlining of methods that never return because they throw.
-
-class EnhancedLegacyPolicy : public LegacyPolicy
-{
-public:
- EnhancedLegacyPolicy(Compiler* compiler, bool isPrejitRoot)
- : LegacyPolicy(compiler, isPrejitRoot), m_IsNoReturn(false), m_IsNoReturnKnown(false)
- {
- // empty
- }
-
- // Policy observations
- void NoteBool(InlineObservation obs, bool value) override;
- void NoteInt(InlineObservation obs, int value) override;
-
- // Policy policies
- bool PropagateNeverToRuntime() const override;
- bool IsLegacyPolicy() const override
- {
- return false;
- }
-
-protected:
- // Data members
- bool m_IsNoReturn : 1;
- bool m_IsNoReturnKnown : 1;
-};
-
-// DiscretionaryPolicy is a variant of the enhanced legacy policy. It
+// DiscretionaryPolicy is a variant of the default policy. It
// differs in that there is no ALWAYS_INLINE class, there is no IL
-// size limit, it does not try and maintain legacy compatabilty, and
-// in prejit mode, discretionary failures do not set the "NEVER"
-// inline bit.
+// size limit, and in prejit mode, discretionary failures do not
+// propagate the "NEVER" inline bit to the runtime.
//
// It is useful for gathering data about inline costs.
-class DiscretionaryPolicy : public EnhancedLegacyPolicy
+class DiscretionaryPolicy : public DefaultPolicy
{
public:
// Construct a DiscretionaryPolicy
diff --git a/src/jit/jitconfigvalues.h b/src/jit/jitconfigvalues.h
index b978e4a7df..bb764f4e50 100644
--- a/src/jit/jitconfigvalues.h
+++ b/src/jit/jitconfigvalues.h
@@ -101,6 +101,7 @@ CONFIG_INTEGER(JitPrintInlinedMethods, W("JitPrintInlinedMethods"), 0)
CONFIG_INTEGER(JitPrintDevirtualizedMethods, W("JitPrintDevirtualizedMethods"), 0)
CONFIG_INTEGER(JitRequired, W("JITRequired"), -1)
CONFIG_INTEGER(JitRoundFloat, W("JITRoundFloat"), DEFAULT_ROUND_LEVEL)
+CONFIG_INTEGER(JitStackAllocToLocalSize, W("JitStackAllocToLocalSize"), DEFAULT_MAX_LOCALLOC_TO_LOCAL_SIZE)
CONFIG_INTEGER(JitSkipArrayBoundCheck, W("JitSkipArrayBoundCheck"), 0)
CONFIG_INTEGER(JitSlowDebugChecksEnabled, W("JitSlowDebugChecksEnabled"), 1) // Turn on slow debug checks
CONFIG_INTEGER(JitSplitFunctionSize, W("JitSplitFunctionSize"), 0) // On ARM, use this as the maximum function/funclet
@@ -322,7 +323,6 @@ CONFIG_STRING(JitNoInlineRange, W("JitNoInlineRange"))
CONFIG_STRING(JitInlineReplayFile, W("JitInlineReplayFile"))
#endif // defined(DEBUG) || defined(INLINE_DATA)
-CONFIG_INTEGER(JitInlinePolicyLegacy, W("JitInlinePolicyLegacy"), 0)
CONFIG_INTEGER(JitInlinePolicyModel, W("JitInlinePolicyModel"), 0)
CONFIG_INTEGER(JitEECallTimingInfo, W("JitEECallTimingInfo"), 0)
diff --git a/src/jit/lowerarmarch.cpp b/src/jit/lowerarmarch.cpp
index 9f73fb47e1..2213960c11 100644
--- a/src/jit/lowerarmarch.cpp
+++ b/src/jit/lowerarmarch.cpp
@@ -777,7 +777,10 @@ void Lowering::ContainCheckSIMD(GenTreeSIMD* simdNode)
op2 = simdNode->gtOp.gtOp2;
// If the index is a constant, mark it as contained.
- CheckImmedAndMakeContained(simdNode, op2);
+ if (op2->IsCnsIntOrI())
+ {
+ MakeSrcContained(simdNode, op2);
+ }
break;
}
diff --git a/src/jit/lsraarm64.cpp b/src/jit/lsraarm64.cpp
index cf00f6d17c..346115fba9 100644
--- a/src/jit/lsraarm64.cpp
+++ b/src/jit/lsraarm64.cpp
@@ -812,7 +812,21 @@ void LinearScan::TreeNodeInfoInitSIMD(GenTreeSIMD* simdTree)
break;
case SIMDIntrinsicGetItem:
- info->srcCount = 1;
+ // We have an object and an item, which may be contained.
+ info->srcCount = simdTree->gtGetOp2()->isContained() ? 1 : 2;
+
+ if (!simdTree->gtGetOp2()->IsCnsIntOrI())
+ {
+ // If the index is not a constant, we will need a general purpose register
+ info->internalIntCount = 1;
+
+ // If the index is not a constant, we will use the SIMD temp location to store the vector.
+ compiler->getSIMDInitTempVarNum();
+
+ // internal register must not clobber input index
+ simdTree->gtOp.gtOp2->gtLsraInfo.isDelayFree = true;
+ info->hasDelayFreeSrc = true;
+ }
break;
case SIMDIntrinsicAdd:
diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
index 8519f42fdd..a881d34dfb 100644
--- a/src/jit/morph.cpp
+++ b/src/jit/morph.cpp
@@ -8272,7 +8272,7 @@ GenTreePtr Compiler::fgMorphCall(GenTreeCall* call)
{
szFailReason = "Needs security check";
}
- else if (compLocallocUsed)
+ else if (compLocallocUsed || compLocallocOptimized)
{
szFailReason = "Localloc used";
}
@@ -16882,7 +16882,7 @@ void Compiler::fgMorphBlocks()
GenTreePtr last = (block->bbTreeList != nullptr) ? block->bbTreeList->gtPrev : nullptr;
GenTreePtr ret = (last != nullptr) ? last->gtStmt.gtStmtExpr : nullptr;
- if ((ret != nullptr) && ((ret->gtFlags & GTF_RET_MERGED) != 0))
+ if ((ret != nullptr) && (ret->OperGet() == GT_RETURN) && ((ret->gtFlags & GTF_RET_MERGED) != 0))
{
// This return was generated during epilog merging, so leave it alone
}
diff --git a/src/jit/optimizer.cpp b/src/jit/optimizer.cpp
index 04285916ff..87221fde58 100644
--- a/src/jit/optimizer.cpp
+++ b/src/jit/optimizer.cpp
@@ -2832,6 +2832,7 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd)
// multiple times while canonicalizing multiple loop nests, we'll attempt to redirect a predecessor multiple times.
// This is ok, because after the first redirection, the topPredBlock branch target will no longer match the source
// edge of the blockMap, so nothing will happen.
+ bool firstPred = true;
for (flowList* topPred = t->bbPreds; topPred != nullptr; topPred = topPred->flNext)
{
BasicBlock* topPredBlock = topPred->flBlock;
@@ -2851,6 +2852,29 @@ bool Compiler::optCanonicalizeLoop(unsigned char loopInd)
JITDUMP("in optCanonicalizeLoop: redirect top predecessor BB%02u to BB%02u\n", topPredBlock->bbNum,
newT->bbNum);
optRedirectBlock(topPredBlock, blockMap);
+
+ // When we have profile data then the 'newT' block will inherit topPredBlock profile weight
+ if (topPredBlock->hasProfileWeight())
+ {
+ // This corrects an issue when the topPredBlock has a profile based weight
+ //
+ if (firstPred)
+ {
+ JITDUMP("in optCanonicalizeLoop: block BB%02u will inheritWeight from BB%02u\n", newT->bbNum,
+ topPredBlock->bbNum);
+
+ newT->inheritWeight(topPredBlock);
+ firstPred = false;
+ }
+ else
+ {
+ JITDUMP("in optCanonicalizeLoop: block BB%02u will also contribute to the weight of BB%02u\n",
+ newT->bbNum, topPredBlock->bbNum);
+
+ BasicBlock::weight_t newWeight = newT->getBBWeight(this) + topPredBlock->getBBWeight(this);
+ newT->setBBWeight(newWeight);
+ }
+ }
}
assert(newT->bbNext == f);
diff --git a/src/jit/simd.cpp b/src/jit/simd.cpp
index def834b76a..490d1369f8 100644
--- a/src/jit/simd.cpp
+++ b/src/jit/simd.cpp
@@ -1364,8 +1364,14 @@ GenTreePtr Compiler::impSIMDSelect(
GenTree* andExpr = gtNewSIMDNode(simdType, op2, tmp, SIMDIntrinsicBitwiseAnd, baseType, size);
GenTree* dupOp1 = gtCloneExpr(tmp);
assert(dupOp1 != nullptr);
+#ifdef _TARGET_ARM64_
+ // ARM64 implements SIMDIntrinsicBitwiseAndNot as Left & ~Right
+ GenTree* andNotExpr = gtNewSIMDNode(simdType, op3, dupOp1, SIMDIntrinsicBitwiseAndNot, baseType, size);
+#else
+ // XARCH implements SIMDIntrinsicBitwiseAndNot as ~Left & Right
GenTree* andNotExpr = gtNewSIMDNode(simdType, dupOp1, op3, SIMDIntrinsicBitwiseAndNot, baseType, size);
- GenTree* simdTree = gtNewSIMDNode(simdType, andExpr, andNotExpr, SIMDIntrinsicBitwiseOr, baseType, size);
+#endif
+ GenTree* simdTree = gtNewSIMDNode(simdType, andExpr, andNotExpr, SIMDIntrinsicBitwiseOr, baseType, size);
// If asg not null, create a GT_COMMA tree.
if (asg != nullptr)
diff --git a/src/mscorlib/System.Private.CoreLib.csproj b/src/mscorlib/System.Private.CoreLib.csproj
index c78274ee5a..72368f0c08 100644
--- a/src/mscorlib/System.Private.CoreLib.csproj
+++ b/src/mscorlib/System.Private.CoreLib.csproj
@@ -594,7 +594,6 @@
<Compile Include="$(BclSourcesRoot)\System\Nullable.cs" />
<Compile Include="$(BclSourcesRoot)\System\Collections\Generic\Comparer.cs" />
<Compile Include="$(BclSourcesRoot)\System\Collections\Generic\ComparerHelpers.cs" />
- <Compile Include="$(BclSourcesRoot)\System\Collections\Generic\Dictionary.cs" />
<Compile Include="$(BclSourcesRoot)\System\Collections\Generic\EqualityComparer.cs" />
<Compile Include="$(BclSourcesRoot)\System\Collections\Generic\ArraySortHelper.cs" />
<Compile Include="$(BclSourcesRoot)\System\Collections\ObjectModel\ReadOnlyDictionary.cs" />
diff --git a/src/mscorlib/shared/System.Private.CoreLib.Shared.projitems b/src/mscorlib/shared/System.Private.CoreLib.Shared.projitems
index 90a656f52c..5b7629f245 100644
--- a/src/mscorlib/shared/System.Private.CoreLib.Shared.projitems
+++ b/src/mscorlib/shared/System.Private.CoreLib.Shared.projitems
@@ -53,6 +53,7 @@
<Compile Include="$(MSBuildThisFileDirectory)System\CharEnumerator.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\CLSCompliantAttribute.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\DictionaryEntry.cs" />
+ <Compile Include="$(MSBuildThisFileDirectory)System\Collections\Generic\Dictionary.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\Generic\ICollection.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\Generic\ICollectionDebugView.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\Generic\IComparer.cs" />
@@ -67,7 +68,9 @@
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\Generic\IReadOnlyList.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\Generic\KeyNotFoundException.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\Generic\KeyValuePair.cs" />
+ <Compile Include="$(MSBuildThisFileDirectory)System\Collections\Generic\NonRandomizedStringEqualityComparer.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\Generic\List.cs" />
+ <Compile Include="$(MSBuildThisFileDirectory)System\Collections\HashHelpers.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\ICollection.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\IComparer.cs" />
<Compile Include="$(MSBuildThisFileDirectory)System\Collections\IDictionary.cs" />
diff --git a/src/mscorlib/src/System/Collections/Generic/Dictionary.cs b/src/mscorlib/shared/System/Collections/Generic/Dictionary.cs
index 761f775905..5b576973aa 100644
--- a/src/mscorlib/src/System/Collections/Generic/Dictionary.cs
+++ b/src/mscorlib/shared/System/Collections/Generic/Dictionary.cs
@@ -2,31 +2,14 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
-/*============================================================
-**
-**
-**
-**
-** Purpose: Generic hash table implementation
-**
-** #DictionaryVersusHashtableThreadSafety
-** Hashtable has multiple reader/single writer (MR/SW) thread safety built into
-** certain methods and properties, whereas Dictionary doesn't. If you're
-** converting framework code that formerly used Hashtable to Dictionary, it's
-** important to consider whether callers may have taken a dependence on MR/SW
-** thread safety. If a reader writer lock is available, then that may be used
-** with a Dictionary to get the same thread safety guarantee.
-**
-===========================================================*/
+using System;
+using System.Collections;
+using System.Diagnostics;
+using System.Runtime.CompilerServices;
+using System.Runtime.Serialization;
namespace System.Collections.Generic
{
- using System;
- using System.Collections;
- using System.Diagnostics;
- using System.Runtime.CompilerServices;
- using System.Runtime.Serialization;
-
/// <summary>
/// Used internally to control behavior of insertion into a <see cref="Dictionary{TKey, TValue}"/>.
/// </summary>
@@ -51,7 +34,7 @@ namespace System.Collections.Generic
[DebuggerTypeProxy(typeof(IDictionaryDebugView<,>))]
[DebuggerDisplay("Count = {Count}")]
[Serializable]
- [System.Runtime.CompilerServices.TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
+ [System.Runtime.CompilerServices.TypeForwardedFrom("mscorlib, Version=4.0.0.0, Culture=neutral, PublicKeyToken=b77a5c561934e089")]
public class Dictionary<TKey, TValue> : IDictionary<TKey, TValue>, IDictionary, IReadOnlyDictionary<TKey, TValue>, ISerializable, IDeserializationCallback
{
private struct Entry
@@ -71,13 +54,13 @@ namespace System.Collections.Generic
private IEqualityComparer<TKey> comparer;
private KeyCollection keys;
private ValueCollection values;
- private Object _syncRoot;
+ private object _syncRoot;
// constants for serialization
- private const String VersionName = "Version"; // Do not rename (binary serialization)
- private const String HashSizeName = "HashSize"; // Do not rename (binary serialization). Must save buckets.Length
- private const String KeyValuePairsName = "KeyValuePairs"; // Do not rename (binary serialization)
- private const String ComparerName = "Comparer"; // Do not rename (binary serialization)
+ private const string VersionName = "Version"; // Do not rename (binary serialization)
+ private const string HashSizeName = "HashSize"; // Do not rename (binary serialization). Must save buckets.Length
+ private const string KeyValuePairsName = "KeyValuePairs"; // Do not rename (binary serialization)
+ private const string ComparerName = "Comparer"; // Do not rename (binary serialization)
public Dictionary() : this(0, null) { }
@@ -132,9 +115,7 @@ namespace System.Collections.Generic
}
}
- public Dictionary(IEnumerable<KeyValuePair<TKey, TValue>> collection) :
- this(collection, null)
- { }
+ public Dictionary(IEnumerable<KeyValuePair<TKey, TValue>> collection) : this(collection, null) { }
public Dictionary(IEnumerable<KeyValuePair<TKey, TValue>> collection, IEqualityComparer<TKey> comparer) :
this((collection as ICollection<KeyValuePair<TKey, TValue>>)?.Count ?? 0, comparer)
@@ -152,9 +133,9 @@ namespace System.Collections.Generic
protected Dictionary(SerializationInfo info, StreamingContext context)
{
- //We can't do anything with the keys and values until the entire graph has been deserialized
- //and we have a resonable estimate that GetHashCode is not going to fail. For the time being,
- //we'll just cache this. The graph is not valid until OnDeserialization has been called.
+ // We can't do anything with the keys and values until the entire graph has been deserialized
+ // and we have a resonable estimate that GetHashCode is not going to fail. For the time being,
+ // we'll just cache this. The graph is not valid until OnDeserialization has been called.
HashHelpers.SerializationInfoTable.Add(this, info);
}
@@ -355,12 +336,14 @@ namespace System.Collections.Generic
{
ThrowHelper.ThrowArgumentNullException(ExceptionArgument.info);
}
+
info.AddValue(VersionName, version);
info.AddValue(ComparerName, comparer, typeof(IEqualityComparer<TKey>));
- info.AddValue(HashSizeName, buckets == null ? 0 : buckets.Length); //This is the length of the bucket array.
+ info.AddValue(HashSizeName, buckets == null ? 0 : buckets.Length); // This is the length of the bucket array
+
if (buckets != null)
{
- KeyValuePair<TKey, TValue>[] array = new KeyValuePair<TKey, TValue>[Count];
+ var array = new KeyValuePair<TKey, TValue>[Count];
CopyTo(array, 0);
info.AddValue(KeyValuePairsName, array, typeof(KeyValuePair<TKey, TValue>[]));
}
@@ -402,7 +385,7 @@ namespace System.Collections.Generic
if (buckets == null) Initialize(0);
int hashCode = comparer.GetHashCode(key) & 0x7FFFFFFF;
- int targetBucket = hashCode % buckets.Length;
+ int targetBucket = hashCode % buckets.Length;
int collisionCount = 0;
for (int i = buckets[targetBucket]; i >= 0; i = entries[i].next)
@@ -423,9 +406,9 @@ namespace System.Collections.Generic
return false;
}
-
collisionCount++;
}
+
int index;
if (freeCount > 0)
{
@@ -454,7 +437,7 @@ namespace System.Collections.Generic
// If we hit the collision threshold we'll need to switch to the comparer which is using randomized string hashing
// i.e. EqualityComparer<string>.Default.
- if (collisionCount > HashHelpers.HashCollisionThreshold && comparer == NonRandomizedStringEqualityComparer.Default)
+ if (collisionCount > HashHelpers.HashCollisionThreshold && comparer is NonRandomizedStringEqualityComparer)
{
comparer = (IEqualityComparer<TKey>)EqualityComparer<string>.Default;
Resize(entries.Length, true);
@@ -463,17 +446,15 @@ namespace System.Collections.Generic
return true;
}
- public virtual void OnDeserialization(Object sender)
+ public virtual void OnDeserialization(object sender)
{
SerializationInfo siInfo;
HashHelpers.SerializationInfoTable.TryGetValue(this, out siInfo);
if (siInfo == null)
{
- // It might be necessary to call OnDeserialization from a container if the container object also implements
- // OnDeserialization. However, remoting will call OnDeserialization again.
// We can return immediately if this function is called twice.
- // Note we set remove the serialization info from the table at the end of this method.
+ // Note we remove the serialization info from the table at the end of this method.
return;
}
@@ -526,6 +507,7 @@ namespace System.Collections.Generic
for (int i = 0; i < newBuckets.Length; i++) newBuckets[i] = -1;
Entry[] newEntries = new Entry[newSize];
Array.Copy(entries, 0, newEntries, 0, count);
+
if (forceNewHashCodes)
{
for (int i = 0; i < count; i++)
@@ -536,6 +518,7 @@ namespace System.Collections.Generic
}
}
}
+
for (int i = 0; i < count; i++)
{
if (newEntries[i].hashCode >= 0)
@@ -545,6 +528,7 @@ namespace System.Collections.Generic
newBuckets[bucket] = i;
}
}
+
buckets = newBuckets;
entries = newEntries;
}
@@ -672,6 +656,7 @@ namespace System.Collections.Generic
value = default(TValue);
return false;
}
+
public bool TryAdd(TKey key, TValue value) => TryInsert(key, value, InsertionBehavior.None);
bool ICollection<KeyValuePair<TKey, TValue>>.IsReadOnly
@@ -1170,7 +1155,7 @@ namespace System.Collections.Generic
get { return false; }
}
- Object ICollection.SyncRoot
+ object ICollection.SyncRoot
{
get { return ((ICollection)dictionary).SyncRoot; }
}
@@ -1225,7 +1210,7 @@ namespace System.Collections.Generic
}
}
- Object System.Collections.IEnumerator.Current
+ object System.Collections.IEnumerator.Current
{
get
{
@@ -1396,7 +1381,7 @@ namespace System.Collections.Generic
get { return false; }
}
- Object ICollection.SyncRoot
+ object ICollection.SyncRoot
{
get { return ((ICollection)dictionary).SyncRoot; }
}
@@ -1450,7 +1435,7 @@ namespace System.Collections.Generic
}
}
- Object System.Collections.IEnumerator.Current
+ object System.Collections.IEnumerator.Current
{
get
{
diff --git a/src/mscorlib/shared/System/Collections/Generic/NonRandomizedStringEqualityComparer.cs b/src/mscorlib/shared/System/Collections/Generic/NonRandomizedStringEqualityComparer.cs
new file mode 100644
index 0000000000..ef44fefc8e
--- /dev/null
+++ b/src/mscorlib/shared/System/Collections/Generic/NonRandomizedStringEqualityComparer.cs
@@ -0,0 +1,38 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System.Runtime.Serialization;
+
+namespace System.Collections.Generic
+{
+ // NonRandomizedStringEqualityComparer is the comparer used by default with the Dictionary<string,...>
+ // We use NonRandomizedStringEqualityComparer as default comparer as it doesnt use the randomized string hashing which
+ // keeps the performance not affected till we hit collision threshold and then we switch to the comparer which is using
+ // randomized string hashing.
+ [Serializable] // Required for compatibility with .NET Core 2.0 as we exposed the NonRandomizedStringEqualityComparer inside the serialization blob
+#if CORECLR
+ internal
+#else
+ public
+#endif
+ sealed class NonRandomizedStringEqualityComparer : EqualityComparer<string>, ISerializable
+ {
+ internal static new IEqualityComparer<string> Default { get; } = new NonRandomizedStringEqualityComparer();
+
+ private NonRandomizedStringEqualityComparer() { }
+
+ // This is used by the serialization engine.
+ private NonRandomizedStringEqualityComparer(SerializationInfo information, StreamingContext context) { }
+
+ public sealed override bool Equals(string x, string y) => string.Equals(x, y);
+
+ public sealed override int GetHashCode(string obj) => obj?.GetLegacyNonRandomizedHashCode() ?? 0;
+
+ public void GetObjectData(SerializationInfo info, StreamingContext context)
+ {
+ // We are doing this to stay compatible with .NET Framework.
+ info.SetType(typeof(GenericEqualityComparer<string>));
+ }
+ }
+}
diff --git a/src/mscorlib/shared/System/Collections/HashHelpers.cs b/src/mscorlib/shared/System/Collections/HashHelpers.cs
new file mode 100644
index 0000000000..49cff85b58
--- /dev/null
+++ b/src/mscorlib/shared/System/Collections/HashHelpers.cs
@@ -0,0 +1,108 @@
+// Licensed to the .NET Foundation under one or more agreements.
+// The .NET Foundation licenses this file to you under the MIT license.
+// See the LICENSE file in the project root for more information.
+
+using System.Diagnostics;
+using System.Runtime.CompilerServices;
+using System.Runtime.Serialization;
+using System.Threading;
+
+namespace System.Collections
+{
+ internal static class HashHelpers
+ {
+ public const int HashCollisionThreshold = 100;
+
+ public const int HashPrime = 101;
+
+ // Table of prime numbers to use as hash table sizes.
+ // A typical resize algorithm would pick the smallest prime number in this array
+ // that is larger than twice the previous capacity.
+ // Suppose our Hashtable currently has capacity x and enough elements are added
+ // such that a resize needs to occur. Resizing first computes 2x then finds the
+ // first prime in the table greater than 2x, i.e. if primes are ordered
+ // p_1, p_2, ..., p_i, ..., it finds p_n such that p_n-1 < 2x < p_n.
+ // Doubling is important for preserving the asymptotic complexity of the
+ // hashtable operations such as add. Having a prime guarantees that double
+ // hashing does not lead to infinite loops. IE, your hash function will be
+ // h1(key) + i*h2(key), 0 <= i < size. h2 and the size must be relatively prime.
+ public static readonly int[] primes = {
+ 3, 7, 11, 17, 23, 29, 37, 47, 59, 71, 89, 107, 131, 163, 197, 239, 293, 353, 431, 521, 631, 761, 919,
+ 1103, 1327, 1597, 1931, 2333, 2801, 3371, 4049, 4861, 5839, 7013, 8419, 10103, 12143, 14591,
+ 17519, 21023, 25229, 30293, 36353, 43627, 52361, 62851, 75431, 90523, 108631, 130363, 156437,
+ 187751, 225307, 270371, 324449, 389357, 467237, 560689, 672827, 807403, 968897, 1162687, 1395263,
+ 1674319, 2009191, 2411033, 2893249, 3471899, 4166287, 4999559, 5999471, 7199369};
+
+ public static bool IsPrime(int candidate)
+ {
+ if ((candidate & 1) != 0)
+ {
+ int limit = (int)Math.Sqrt(candidate);
+ for (int divisor = 3; divisor <= limit; divisor += 2)
+ {
+ if ((candidate % divisor) == 0)
+ return false;
+ }
+ return true;
+ }
+ return (candidate == 2);
+ }
+
+ public static int GetPrime(int min)
+ {
+ if (min < 0)
+ throw new ArgumentException(SR.Arg_HTCapacityOverflow);
+
+ for (int i = 0; i < primes.Length; i++)
+ {
+ int prime = primes[i];
+ if (prime >= min) return prime;
+ }
+
+ //outside of our predefined table.
+ //compute the hard way.
+ for (int i = (min | 1); i < Int32.MaxValue; i += 2)
+ {
+ if (IsPrime(i) && ((i - 1) % HashPrime != 0))
+ return i;
+ }
+ return min;
+ }
+
+ // Returns size of hashtable to grow to.
+ public static int ExpandPrime(int oldSize)
+ {
+ int newSize = 2 * oldSize;
+
+ // Allow the hashtables to grow to maximum possible size (~2G elements) before encoutering capacity overflow.
+ // Note that this check works even when _items.Length overflowed thanks to the (uint) cast
+ if ((uint)newSize > MaxPrimeArrayLength && MaxPrimeArrayLength > oldSize)
+ {
+ Debug.Assert(MaxPrimeArrayLength == GetPrime(MaxPrimeArrayLength), "Invalid MaxPrimeArrayLength");
+ return MaxPrimeArrayLength;
+ }
+
+ return GetPrime(newSize);
+ }
+
+
+ // This is the maximum prime smaller than Array.MaxArrayLength
+ public const int MaxPrimeArrayLength = 0x7FEFFFFD;
+
+
+ // Used by Hashtable and Dictionary's SeralizationInfo .ctor's to store the SeralizationInfo
+ // object until OnDeserialization is called.
+ private static ConditionalWeakTable<object, SerializationInfo> s_serializationInfoTable;
+
+ internal static ConditionalWeakTable<object, SerializationInfo> SerializationInfoTable
+ {
+ get
+ {
+ if (s_serializationInfoTable == null)
+ Interlocked.CompareExchange(ref s_serializationInfoTable, new ConditionalWeakTable<object, SerializationInfo>(), null);
+
+ return s_serializationInfoTable;
+ }
+ }
+ }
+}
diff --git a/src/mscorlib/src/System/Collections/Generic/EqualityComparer.cs b/src/mscorlib/src/System/Collections/Generic/EqualityComparer.cs
index 05297b089b..57b63eb0e1 100644
--- a/src/mscorlib/src/System/Collections/Generic/EqualityComparer.cs
+++ b/src/mscorlib/src/System/Collections/Generic/EqualityComparer.cs
@@ -269,56 +269,6 @@ namespace System.Collections.Generic
GetType().GetHashCode();
}
- // We use NonRandomizedStringEqualityComparer as default comparer as it doesnt use the randomized string hashing which
- // keeps the performance unaffected till we hit collision threshold and then we switch to the comparer which is using
- // randomized string hashing GenericEqualityComparer<string>
- // We are keeping serialization support here to support deserialization of .NET Core 2.0 serialization payloads with this type in it.
- [Serializable]
- internal sealed class NonRandomizedStringEqualityComparer : EqualityComparer<string>, ISerializable
- {
- private static IEqualityComparer<string> s_nonRandomizedComparer;
-
- private NonRandomizedStringEqualityComparer() { }
-
- // This is used by the serialization engine.
- private NonRandomizedStringEqualityComparer(SerializationInfo information, StreamingContext context) { }
-
- internal static new IEqualityComparer<string> Default
- {
- get
- {
- if (s_nonRandomizedComparer == null)
- {
- s_nonRandomizedComparer = new NonRandomizedStringEqualityComparer();
- }
- return s_nonRandomizedComparer;
- }
- }
-
- public override bool Equals(string x, string y)
- {
- if (x != null)
- {
- if (y != null) return x.Equals(y);
- return false;
- }
- if (y != null) return false;
- return true;
- }
-
- public override int GetHashCode(string obj)
- {
- if (obj == null) return 0;
- return obj.GetLegacyNonRandomizedHashCode();
- }
-
- public void GetObjectData(SerializationInfo info, StreamingContext context)
- {
- // We are doing this to stay compatible with .NET Framework.
- info.SetType(typeof(GenericEqualityComparer<string>));
- }
- }
-
// Performance of IndexOf on byte array is very important for some scenarios.
// We will call the C runtime function memchr, which is optimized.
[Serializable]
diff --git a/src/mscorlib/src/System/Collections/Hashtable.cs b/src/mscorlib/src/System/Collections/Hashtable.cs
index b890bfb3ca..6a23fde786 100644
--- a/src/mscorlib/src/System/Collections/Hashtable.cs
+++ b/src/mscorlib/src/System/Collections/Hashtable.cs
@@ -123,7 +123,6 @@ namespace System.Collections
--
*/
- internal const Int32 HashPrime = 101;
private const Int32 InitialSize = 3;
private const String LoadFactorName = "LoadFactor";
private const String VersionName = "Version";
@@ -268,7 +267,7 @@ namespace System.Collections
// visit every bucket in the table exactly once within hashsize
// iterations. Violate this and it'll cause obscure bugs forever.
// If you change this calculation for h2(key), update putEntry too!
- incr = (uint)(1 + ((seed * HashPrime) % ((uint)hashsize - 1)));
+ incr = (uint)(1 + ((seed * HashHelpers.HashPrime) % ((uint)hashsize - 1)));
return hashcode;
}
@@ -806,7 +805,7 @@ namespace System.Collections
Debug.Assert(hashcode >= 0, "hashcode >= 0"); // make sure collision bit (sign bit) wasn't set.
uint seed = (uint)hashcode;
- uint incr = (uint)(1 + ((seed * HashPrime) % ((uint)newBuckets.Length - 1)));
+ uint incr = (uint)(1 + ((seed * HashHelpers.HashPrime) % ((uint)newBuckets.Length - 1)));
int bucketNumber = (int)(seed % (uint)newBuckets.Length);
do
{
@@ -1378,102 +1377,4 @@ namespace System.Collections
private Hashtable hashtable;
}
}
-
- [FriendAccessAllowed]
- internal static class HashHelpers
- {
- public const int HashCollisionThreshold = 100;
-
- // Table of prime numbers to use as hash table sizes.
- // A typical resize algorithm would pick the smallest prime number in this array
- // that is larger than twice the previous capacity.
- // Suppose our Hashtable currently has capacity x and enough elements are added
- // such that a resize needs to occur. Resizing first computes 2x then finds the
- // first prime in the table greater than 2x, i.e. if primes are ordered
- // p_1, p_2, ..., p_i, ..., it finds p_n such that p_n-1 < 2x < p_n.
- // Doubling is important for preserving the asymptotic complexity of the
- // hashtable operations such as add. Having a prime guarantees that double
- // hashing does not lead to infinite loops. IE, your hash function will be
- // h1(key) + i*h2(key), 0 <= i < size. h2 and the size must be relatively prime.
- public static readonly int[] primes = {
- 3, 7, 11, 17, 23, 29, 37, 47, 59, 71, 89, 107, 131, 163, 197, 239, 293, 353, 431, 521, 631, 761, 919,
- 1103, 1327, 1597, 1931, 2333, 2801, 3371, 4049, 4861, 5839, 7013, 8419, 10103, 12143, 14591,
- 17519, 21023, 25229, 30293, 36353, 43627, 52361, 62851, 75431, 90523, 108631, 130363, 156437,
- 187751, 225307, 270371, 324449, 389357, 467237, 560689, 672827, 807403, 968897, 1162687, 1395263,
- 1674319, 2009191, 2411033, 2893249, 3471899, 4166287, 4999559, 5999471, 7199369};
-
- // Used by Hashtable and Dictionary's SeralizationInfo .ctor's to store the SeralizationInfo
- // object until OnDeserialization is called.
- private static ConditionalWeakTable<object, SerializationInfo> s_SerializationInfoTable;
-
- internal static ConditionalWeakTable<object, SerializationInfo> SerializationInfoTable
- {
- get
- {
- if (s_SerializationInfoTable == null)
- {
- ConditionalWeakTable<object, SerializationInfo> newTable = new ConditionalWeakTable<object, SerializationInfo>();
- Interlocked.CompareExchange(ref s_SerializationInfoTable, newTable, null);
- }
-
- return s_SerializationInfoTable;
- }
- }
-
- public static bool IsPrime(int candidate)
- {
- if ((candidate & 1) != 0)
- {
- int limit = (int)Math.Sqrt(candidate);
- for (int divisor = 3; divisor <= limit; divisor += 2)
- {
- if ((candidate % divisor) == 0)
- return false;
- }
- return true;
- }
- return (candidate == 2);
- }
-
- public static int GetPrime(int min)
- {
- if (min < 0)
- throw new ArgumentException(SR.Arg_HTCapacityOverflow);
-
- for (int i = 0; i < primes.Length; i++)
- {
- int prime = primes[i];
- if (prime >= min) return prime;
- }
-
- //outside of our predefined table.
- //compute the hard way.
- for (int i = (min | 1); i < Int32.MaxValue; i += 2)
- {
- if (IsPrime(i) && ((i - 1) % Hashtable.HashPrime != 0))
- return i;
- }
- return min;
- }
-
- // Returns size of hashtable to grow to.
- public static int ExpandPrime(int oldSize)
- {
- int newSize = 2 * oldSize;
-
- // Allow the hashtables to grow to maximum possible size (~2G elements) before encoutering capacity overflow.
- // Note that this check works even when _items.Length overflowed thanks to the (uint) cast
- if ((uint)newSize > MaxPrimeArrayLength && MaxPrimeArrayLength > oldSize)
- {
- Debug.Assert(MaxPrimeArrayLength == GetPrime(MaxPrimeArrayLength), "Invalid MaxPrimeArrayLength");
- return MaxPrimeArrayLength;
- }
-
- return GetPrime(newSize);
- }
-
-
- // This is the maximum prime smaller than Array.MaxArrayLength
- public const int MaxPrimeArrayLength = 0x7FEFFFFD;
- }
}
diff --git a/src/publish.proj b/src/publish.proj
index 81ac12df7c..2c56430382 100644
--- a/src/publish.proj
+++ b/src/publish.proj
@@ -19,8 +19,7 @@
<Target Name="PublishPackages" Condition="'$(__PublishPackages)' == 'true' and ('$(OfficialPublish)' != 'true' or '$(__BuildType)' == 'Release')">
<PropertyGroup>
- <RelativePath Condition="'$(RelativePath)' == ''">$(__BuildType)/pkg</RelativePath>
- <ExpectedFeedUrl>https://$(AccountName).blob.core.windows.net/$(ContainerName)/$(RelativePath)/index.json</ExpectedFeedUrl>
+ <ExpectedFeedUrl>https://$(AccountName).blob.core.windows.net/$(ContainerName)/$(RelativePath)index.json</ExpectedFeedUrl>
</PropertyGroup>
<ItemGroup>
<ItemsToPush Remove="*.nupkg" />
@@ -36,8 +35,7 @@
<Target Name="PublishSymbolPackages" Condition="'$(__PublishSymbols)' == 'true' and ('$(OfficialPublish)' != 'true' or '$(__BuildType)' == 'Release')">
<PropertyGroup>
- <RelativePath Condition="'$(RelativePath)' == ''">$(__BuildType)/symbolpkg</RelativePath>
- <ExpectedFeedUrl>https://$(AccountName).blob.core.windows.net/$(ContainerName)/$(RelativePath)/index.json</ExpectedFeedUrl>
+ <ExpectedFeedUrl>https://$(AccountName).blob.core.windows.net/$(ContainerName)/$(RelativePath)index.json</ExpectedFeedUrl>
</PropertyGroup>
<ItemGroup>
<ItemsToPush Remove="*.nupkg" />
@@ -68,4 +66,4 @@
<Target Name="Build" DependsOnTargets="PublishPackages;PublishSymbolPackages;PublishTestNativeBinaries"/>
-</Project> \ No newline at end of file
+</Project>
diff --git a/src/vm/siginfo.cpp b/src/vm/siginfo.cpp
index 40a55cb6f0..be919c8b7e 100644
--- a/src/vm/siginfo.cpp
+++ b/src/vm/siginfo.cpp
@@ -4969,7 +4969,7 @@ void ReportByRefPointersFromByRefLikeObject(promote_func *fn, ScanContext *sc, P
}
// TODO: GetApproxFieldTypeHandleThrowing may throw. This is a potential stress problem for fragile NGen of non-CoreLib
- // assemblies. It won’t ever throw for CoreCLR with R2R. Figure out if anything needs to be done to deal with the
+ // assemblies. It won't ever throw for CoreCLR with R2R. Figure out if anything needs to be done to deal with the
// exception.
PTR_MethodTable pFieldMT = pFD->GetApproxFieldTypeHandleThrowing().AsMethodTable();
if (!pFieldMT->IsByRefLike())
diff --git a/src/vm/synch.cpp b/src/vm/synch.cpp
index c21e4f53a0..1dd038237a 100644
--- a/src/vm/synch.cpp
+++ b/src/vm/synch.cpp
@@ -973,7 +973,6 @@ void CLRLifoSemaphore::Release(INT32 releaseCount)
if (countsBeforeUpdate == counts)
{
_ASSERTE((UINT32)releaseCount <= m_maximumSignalCount - counts.signalCount);
- _ASSERTE(newCounts.countOfWaitersSignaledToWake <= newCounts.waiterCount);
if (countOfWaitersToWake <= 0)
{
return;