summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorSteve MacLean <sdmaclea.qdt@qualcommdatacenter.com>2018-03-27 10:20:19 -0400
committerJan Kotas <jkotas@microsoft.com>2018-03-27 07:20:19 -0700
commit4fca952dbf71f4aad878033d6a74a398ad676e78 (patch)
treeec1268cd7a3c4dc9f77f60be90f476b3b4f1e47a /src
parentc0fbb148fbc435122f1d439658aa1b9a7ee3f333 (diff)
downloadcoreclr-4fca952dbf71f4aad878033d6a74a398ad676e78.tar.gz
coreclr-4fca952dbf71f4aad878033d6a74a398ad676e78.tar.bz2
coreclr-4fca952dbf71f4aad878033d6a74a398ad676e78.zip
[Arm64] reserve for jump stubs (#17244)
Diffstat (limited to 'src')
-rw-r--r--src/vm/codeman.cpp2
-rw-r--r--src/vm/jitinterface.cpp62
-rw-r--r--src/vm/jitinterface.h22
3 files changed, 55 insertions, 31 deletions
diff --git a/src/vm/codeman.cpp b/src/vm/codeman.cpp
index d69329f44f..aa3c1fa1ed 100644
--- a/src/vm/codeman.cpp
+++ b/src/vm/codeman.cpp
@@ -2063,7 +2063,7 @@ static size_t GetDefaultReserveForJumpStubs(size_t codeHeapSize)
{
LIMITED_METHOD_CONTRACT;
-#ifdef _TARGET_AMD64_
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
//
// Keep a small default reserve at the end of the codeheap for jump stubs. It should reduce
// chance that we won't be able allocate jump stub because of lack of suitable address space.
diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
index f850153646..7c90fe0d51 100644
--- a/src/vm/jitinterface.cpp
+++ b/src/vm/jitinterface.cpp
@@ -11371,7 +11371,7 @@ void CEEJitInfo::recordRelocation(void * location,
// When m_fAllowRel32 == TRUE, the JIT will use REL32s for both data addresses and direct code targets.
// Since we cannot tell what the relocation is for, we have to defensively retry.
//
- m_fRel32Overflow = TRUE;
+ m_fJumpStubOverflow = TRUE;
delta = 0;
}
else
@@ -11385,7 +11385,7 @@ void CEEJitInfo::recordRelocation(void * location,
{
// This forces the JIT to retry the method, which allows us to reserve more space for jump stubs and have a higher chance that
// we will find space for them.
- m_fRel32Overflow = TRUE;
+ m_fJumpStubOverflow = TRUE;
}
// Keep track of conservative estimate of how much memory may be needed by jump stubs. We will use it to reserve extra memory
@@ -11429,12 +11429,12 @@ void CEEJitInfo::recordRelocation(void * location,
if (!FitsInRel28(delta))
{
// Use jump stub.
- //
+ //
TADDR baseAddr = (TADDR)fixupLocation;
TADDR loAddr = baseAddr - 0x08000000; // -2^27
TADDR hiAddr = baseAddr + 0x07FFFFFF; // +2^27-1
- // Check for the wrap around cases
+ // Check for the wrap around cases
if (loAddr > baseAddr)
loAddr = UINT64_MIN; // overflow
if (hiAddr < baseAddr)
@@ -11443,7 +11443,21 @@ void CEEJitInfo::recordRelocation(void * location,
PCODE jumpStubAddr = ExecutionManager::jumpStub(m_pMethodBeingCompiled,
(PCODE) target,
(BYTE *) loAddr,
- (BYTE *) hiAddr);
+ (BYTE *) hiAddr,
+ NULL,
+ false);
+
+ // Keep track of conservative estimate of how much memory may be needed by jump stubs. We will use it to reserve extra memory
+ // on retry to increase chances that the retry succeeds.
+ m_reserveForJumpStubs = max(0x400, m_reserveForJumpStubs + 2*BACK_TO_BACK_JUMP_ALLOCATE_SIZE);
+
+ if (jumpStubAddr == 0)
+ {
+ // This forces the JIT to retry the method, which allows us to reserve more space for jump stubs and have a higher chance that
+ // we will find space for them.
+ m_fJumpStubOverflow = TRUE;
+ break;
+ }
delta = (INT64)(jumpStubAddr - fixupLocation);
@@ -12674,25 +12688,22 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
}
#endif //_DEBUG
-#ifdef _TARGET_AMD64_
- BOOL fForceRel32Overflow = FALSE;
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
+ BOOL fForceJumpStubOverflow = FALSE;
#ifdef _DEBUG
// Always exercise the overflow codepath with force relocs
if (PEDecoder::GetForceRelocs())
- fForceRel32Overflow = TRUE;
+ fForceJumpStubOverflow = TRUE;
+#endif
+
+#if defined(_TARGET_AMD64_)
+ BOOL fAllowRel32 = (g_fAllowRel32 | fForceJumpStubOverflow);
#endif
- BOOL fAllowRel32 = g_fAllowRel32 | fForceRel32Overflow;
size_t reserveForJumpStubs = 0;
- // For determinism, never try to use the REL32 in compilation process
- if (IsCompilationProcess())
- {
- fForceRel32Overflow = FALSE;
- fAllowRel32 = FALSE;
- }
-#endif // _TARGET_AMD64_
+#endif // defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
for (;;)
{
@@ -12706,10 +12717,15 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
EEJitManager *jitMgr = NULL;
#endif
-#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
- if (fForceRel32Overflow)
- jitInfo.SetRel32Overflow(fAllowRel32);
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) && !defined(CROSSGEN_COMPILE)
+#ifdef _TARGET_AMD64_
+ if (fForceJumpStubOverflow)
+ jitInfo.SetJumpStubOverflow(fAllowRel32);
jitInfo.SetAllowRel32(fAllowRel32);
+#else
+ if (fForceJumpStubOverflow)
+ jitInfo.SetJumpStubOverflow(fForceJumpStubOverflow);
+#endif
jitInfo.SetReserveForJumpStubs(reserveForJumpStubs);
#endif
@@ -12858,21 +12874,23 @@ PCODE UnsafeJitFunction(MethodDesc* ftn, COR_ILMETHOD_DECODER* ILHeader, CORJIT_
if (!nativeEntry)
COMPlusThrow(kInvalidProgramException);
-#if defined(_TARGET_AMD64_) && !defined(CROSSGEN_COMPILE)
- if (jitInfo.IsRel32Overflow())
+#if (defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)) && !defined(CROSSGEN_COMPILE)
+ if (jitInfo.IsJumpStubOverflow())
{
// Backout and try again with fAllowRel32 == FALSE.
jitInfo.BackoutJitData(jitMgr);
+#ifdef _TARGET_AMD64_
// Disallow rel32 relocs in future.
g_fAllowRel32 = FALSE;
fAllowRel32 = FALSE;
+#endif // _TARGET_AMD64_
reserveForJumpStubs = jitInfo.GetReserveForJumpStubs();
continue;
}
-#endif // _TARGET_AMD64_ && !CROSSGEN_COMPILE
+#endif // (_TARGET_AMD64_ || _TARGET_ARM64_) && !CROSSGEN_COMPILE
LOG((LF_JIT, LL_INFO10000,
"Jitted Entry at" FMT_ADDR "method %s::%s %s\n", DBG_ADDR(nativeEntry),
diff --git a/src/vm/jitinterface.h b/src/vm/jitinterface.h
index e04274cb69..1e4c847204 100644
--- a/src/vm/jitinterface.h
+++ b/src/vm/jitinterface.h
@@ -1351,23 +1351,25 @@ public:
LIMITED_METHOD_CONTRACT;
m_fAllowRel32 = fAllowRel32;
}
+#endif
- void SetRel32Overflow(BOOL fRel32Overflow)
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
+ void SetJumpStubOverflow(BOOL fJumpStubOverflow)
{
LIMITED_METHOD_CONTRACT;
- m_fRel32Overflow = fRel32Overflow;
+ m_fJumpStubOverflow = fJumpStubOverflow;
}
- BOOL IsRel32Overflow()
+ BOOL IsJumpStubOverflow()
{
LIMITED_METHOD_CONTRACT;
- return m_fRel32Overflow;
+ return m_fJumpStubOverflow;
}
BOOL JitAgain()
{
LIMITED_METHOD_CONTRACT;
- return m_fRel32Overflow;
+ return m_fJumpStubOverflow;
}
size_t GetReserveForJumpStubs()
@@ -1411,7 +1413,9 @@ public:
#endif
#ifdef _TARGET_AMD64_
m_fAllowRel32(FALSE),
- m_fRel32Overflow(FALSE),
+#endif
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
+ m_fJumpStubOverflow(FALSE),
m_reserveForJumpStubs(0),
#endif
m_GCinfo_len(0),
@@ -1495,8 +1499,10 @@ protected :
#ifdef _TARGET_AMD64_
BOOL m_fAllowRel32; // Use 32-bit PC relative address modes
- BOOL m_fRel32Overflow; // Overflow while trying to use encode 32-bit PC relative address.
- // The code will need to be regenerated with m_fRel32Allowed == FALSE.
+#endif
+#if defined(_TARGET_AMD64_) || defined(_TARGET_ARM64_)
+ BOOL m_fJumpStubOverflow; // Overflow while trying to alocate jump stub slot within PC relative branch region
+ // The code will need to be regenerated (with m_fRel32Allowed == FALSE for AMD64).
size_t m_reserveForJumpStubs; // Space to reserve for jump stubs when allocating code
#endif