diff options
Diffstat (limited to 'src/vm/jithelpers.cpp')
-rw-r--r-- | src/vm/jithelpers.cpp | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp index 1626810758..7b9389d5b6 100644 --- a/src/vm/jithelpers.cpp +++ b/src/vm/jithelpers.cpp @@ -23,7 +23,7 @@ #include "security.h" #include "securitymeta.h" #include "dllimport.h" -#include "gc.h" +#include "gcheaputilities.h" #include "comdelegate.h" #include "jitperf.h" // to track jit perf #include "corprof.h" @@ -130,7 +130,7 @@ inline UINT64 ShiftToHi32Bits(UINT32 x) return ret.QuadPart; } -#if !defined(_TARGET_X86_) +#if !defined(_TARGET_X86_) || defined(FEATURE_PAL) /*********************************************************************/ HCIMPL2_VV(INT64, JIT_LMul, INT64 val1, INT64 val2) { @@ -145,7 +145,7 @@ HCIMPL2_VV(INT64, JIT_LMul, INT64 val1, INT64 val2) return (val1 * val2); } HCIMPLEND -#endif // !defined(_TARGET_X86_) +#endif // !_TARGET_X86_ || FEATURE_PAL /*********************************************************************/ HCIMPL2_VV(INT64, JIT_LMulOvf, INT64 val1, INT64 val2) @@ -513,7 +513,7 @@ HCIMPL1_V(double, JIT_ULng2Dbl, UINT64 val) HCIMPLEND /*********************************************************************/ -// needed for ARM +// needed for ARM and RyuJIT-x86 HCIMPL1_V(double, JIT_Lng2Dbl, INT64 val) { FCALL_CONTRACT; @@ -619,7 +619,7 @@ HCIMPL1_V(UINT64, JIT_Dbl2ULng, double val) else { // subtract 0x8000000000000000, do the convert then add it back again ret = FastDbl2Lng(val - two63) + I64(0x8000000000000000); -} + } return ret; } HCIMPLEND @@ -654,7 +654,7 @@ HCIMPL1_V(UINT64, JIT_Dbl2ULngOvf, double val) HCIMPLEND -#if !defined(_TARGET_X86_) +#if !defined(_TARGET_X86_) || defined(FEATURE_PAL) HCIMPL1_V(INT64, JIT_Dbl2Lng, double val) { @@ -755,7 +755,7 @@ HCIMPL2_VV(double, JIT_DblRem, double dividend, double divisor) } HCIMPLEND -#endif // !defined(_TARGET_X86_) +#endif // !_TARGET_X86_ || FEATURE_PAL #include <optdefault.h> @@ -2858,7 +2858,7 @@ HCIMPL1(Object*, JIT_NewS_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_) do { - _ASSERTE(GCHeap::UseAllocationContexts()); + _ASSERTE(GCHeapUtilities::UseAllocationContexts()); // This is typically the only call in the fast path. Making the call early seems to be better, as it allows the compiler // to use volatile registers for intermediate values. This reduces the number of push/pop instructions and eliminates @@ -2872,7 +2872,7 @@ HCIMPL1(Object*, JIT_NewS_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_) SIZE_T size = methodTable->GetBaseSize(); _ASSERTE(size % DATA_ALIGNMENT == 0); - alloc_context *allocContext = thread->GetAllocContext(); + gc_alloc_context *allocContext = thread->GetAllocContext(); BYTE *allocPtr = allocContext->alloc_ptr; _ASSERTE(allocPtr <= allocContext->alloc_limit); if (size > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr)) @@ -2997,7 +2997,7 @@ HCIMPL1(StringObject*, AllocateString_MP_FastPortable, DWORD stringLength) do { - _ASSERTE(GCHeap::UseAllocationContexts()); + _ASSERTE(GCHeapUtilities::UseAllocationContexts()); // Instead of doing elaborate overflow checks, we just limit the number of elements. This will avoid all overflow // problems, as well as making sure big string objects are correctly allocated in the big object heap. @@ -3021,7 +3021,7 @@ HCIMPL1(StringObject*, AllocateString_MP_FastPortable, DWORD stringLength) _ASSERTE(alignedTotalSize >= totalSize); totalSize = alignedTotalSize; - alloc_context *allocContext = thread->GetAllocContext(); + gc_alloc_context *allocContext = thread->GetAllocContext(); BYTE *allocPtr = allocContext->alloc_ptr; _ASSERTE(allocPtr <= allocContext->alloc_limit); if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr)) @@ -3161,7 +3161,7 @@ HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHn do { - _ASSERTE(GCHeap::UseAllocationContexts()); + _ASSERTE(GCHeapUtilities::UseAllocationContexts()); // Do a conservative check here. This is to avoid overflow while doing the calculations. We don't // have to worry about "large" objects, since the allocation quantum is never big enough for @@ -3198,7 +3198,7 @@ HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHn _ASSERTE(alignedTotalSize >= totalSize); totalSize = alignedTotalSize; - alloc_context *allocContext = thread->GetAllocContext(); + gc_alloc_context *allocContext = thread->GetAllocContext(); BYTE *allocPtr = allocContext->alloc_ptr; _ASSERTE(allocPtr <= allocContext->alloc_limit); if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr)) @@ -3238,7 +3238,7 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeH do { - _ASSERTE(GCHeap::UseAllocationContexts()); + _ASSERTE(GCHeapUtilities::UseAllocationContexts()); // Make sure that the total size cannot reach LARGE_OBJECT_SIZE, which also allows us to avoid overflow checks. The // "256" slack is to cover the array header size and round-up, using a constant value here out of laziness. @@ -3266,7 +3266,7 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeH _ASSERTE(ALIGN_UP(totalSize, DATA_ALIGNMENT) == totalSize); - alloc_context *allocContext = thread->GetAllocContext(); + gc_alloc_context *allocContext = thread->GetAllocContext(); BYTE *allocPtr = allocContext->alloc_ptr; _ASSERTE(allocPtr <= allocContext->alloc_limit); if (totalSize > static_cast<SIZE_T>(allocContext->alloc_limit - allocPtr)) @@ -6431,7 +6431,7 @@ HCIMPL0(VOID, JIT_StressGC) bool fSkipGC = false; if (!fSkipGC) - GCHeap::GetGCHeap()->GarbageCollect(); + GCHeapUtilities::GetGCHeap()->GarbageCollect(); // <TODO>@TODO: the following ifdef is in error, but if corrected the // compiler complains about the *__ms->pRetAddr() saying machine state |