summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKonstantin Baladurin <k.baladurin@partner.samsung.com>2018-04-26 17:34:58 +0300
committerwoongsuk cho <ws77.cho@samsung.com>2018-04-27 01:08:53 +0000
commit731adf2e8aba9b7a45e7671d313c2628f4ec5414 (patch)
tree5887ab6bba5604ec898d765f08cd2c89edc5ba1d
parentbf88f670189b7baae58b538ae3b77052f8ec7e08 (diff)
downloadcoreclr-731adf2e8aba9b7a45e7671d313c2628f4ec5414.tar.gz
coreclr-731adf2e8aba9b7a45e7671d313c2628f4ec5414.tar.bz2
coreclr-731adf2e8aba9b7a45e7671d313c2628f4ec5414.zip
Add memory optimization patches from upstream
Change-Id: Ie8ea75fa60184b77135289c8fdc0f49d40b49d87 (cherry picked from commit 371df401e4d8c9639035d164710d1246e0cb8548)
-rw-r--r--packaging/0001-Extract-PEImage-CreateLayoutMapped-and-PEImage-Creat.patch190
-rw-r--r--packaging/0002-Direct-mapping-of-IL-assembly-images-that-don-t-cont.patch246
-rw-r--r--packaging/0003-Delete-default-copy-move-constructors-and-assignment.patch153
-rw-r--r--packaging/0004-Change-relocations-in-ngen-ed-code-with-PC-relative-.patch467
-rw-r--r--packaging/0005-Allow-RelativePointer-SetValue-usage-for-non-DAC-bui.patch344
-rw-r--r--packaging/0006-Remove-relocations-from-SECTION_MethodDesc-for-ngene.patch1017
-rw-r--r--packaging/0007-FIX-fix-No.1-missing-GetImplementedMDs.patch59
-rw-r--r--packaging/0008-Fix-issues-with-RelativePointer-instead-of-RelativeF.patch26
-rw-r--r--packaging/0009-Remove-relocations-from-SECTION_MethodDesc-for-ngene.patch595
-rw-r--r--packaging/0010-Partially-remove-relocations-for-ModuleSection-ZapVi.patch1759
-rw-r--r--packaging/0011-FIX-fix-No.2-incorrect-m_pBeginInvokeMethod.patch25
-rw-r--r--packaging/0012-Replace-array-type-handle-with-method-table-in-argum.patch1214
-rw-r--r--packaging/0013-Implement-JIT_NewArr1_R2R-as-R2R-wrapper-for-JIT_New.patch384
-rw-r--r--packaging/0014-Fix-JIT_NewArr1-8-byte-alignment-for-ELEMENT_TYPE_R8.patch26
-rw-r--r--packaging/0015-Partially-remove-relocations-from-Class-section-of-N.patch1105
-rw-r--r--packaging/0016-Fix-copying-of-FieldMarshaler-structures-in-EEClassL.patch42
-rw-r--r--packaging/0017-Fix-alignment-of-reads-in-MD5Transform.-12800.patch51
-rw-r--r--packaging/0018-Simplify-SHM-allocator-12815.patch2647
-rw-r--r--packaging/0019-Remove-relocations-from-SECTION_Readonly-for-fields-.patch903
-rw-r--r--packaging/0020-Add-FixupPlainOrRelativePointerField-for-MethodDesc-.patch49
-rw-r--r--packaging/0021-Additional-fixes-for-RelativePointer-FixupPointer-Re.patch332
-rw-r--r--packaging/0022-Remove-relocations-for-InterfaceInfo_t-m_pMethodTabl.patch138
-rw-r--r--packaging/0023-Remove-relocations-for-MethodTable-m_pWriteableData-.patch207
-rw-r--r--packaging/0024-Remove-relocations-for-MethodTable-m_pPerInstInfo-fo.patch541
-rw-r--r--packaging/0025-Remove-relocations-for-MethodTable-s-vtable-1st-leve.patch901
-rw-r--r--packaging/0026-Move-ITEM_DICTIONARY-and-ITEM_VTABLE_CHUNK-to-separa.patch64
-rw-r--r--packaging/0027-Update-GUID.patch33
-rw-r--r--packaging/0028-Review-fixes.patch357
-rw-r--r--packaging/0029-Allocate-FileMappingImmutableData-szFileName-and-CFi.patch1606
-rw-r--r--packaging/0030-Remove-relocations-for-MethodTable-m_pParentMethodTa.patch563
-rw-r--r--packaging/0031-Fix-build-break-with-older-VS-versions-16522.patch37
-rw-r--r--packaging/0032-Fix-handling-of-incorrect-assemblies-on-Unix-16747.patch196
-rw-r--r--packaging/coreclr.spec66
33 files changed, 16342 insertions, 1 deletions
diff --git a/packaging/0001-Extract-PEImage-CreateLayoutMapped-and-PEImage-Creat.patch b/packaging/0001-Extract-PEImage-CreateLayoutMapped-and-PEImage-Creat.patch
new file mode 100644
index 0000000000..d51706bbcc
--- /dev/null
+++ b/packaging/0001-Extract-PEImage-CreateLayoutMapped-and-PEImage-Creat.patch
@@ -0,0 +1,190 @@
+From 550c59a96baa6714a32dd649795eb4e5294df18b Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <r.ayrapetyan@samsung.com>
+Date: Fri, 7 Apr 2017 13:25:42 +0300
+Subject: [PATCH 01/32] Extract PEImage::CreateLayoutMapped and
+ PEImage::CreateLayoutFlat from PEImage::GetLayoutInternal.
+
+---
+ src/vm/peimage.cpp | 138 +++++++++++++++++++++++++++++++++--------------------
+ src/vm/peimage.h | 6 +++
+ 2 files changed, 92 insertions(+), 52 deletions(-)
+
+diff --git a/src/vm/peimage.cpp b/src/vm/peimage.cpp
+index 3367ef9..1462c94 100644
+--- a/src/vm/peimage.cpp
++++ b/src/vm/peimage.cpp
+@@ -911,68 +911,102 @@ PTR_PEImageLayout PEImage::GetLayoutInternal(DWORD imageLayoutMask,DWORD flags)
+
+ if (imageLayoutMask&PEImageLayout::LAYOUT_MAPPED)
+ {
+- PEImageLayout * pLoadLayout = NULL;
++ pRetVal = PEImage::CreateLayoutMapped();
++ }
++ else
++ {
++ pRetVal = PEImage::CreateLayoutFlat();
++ }
++ }
+
+- if (m_bIsTrustedNativeImage || IsFile())
+- {
+- // For CoreCLR, try to load all files via LoadLibrary first. If LoadLibrary did not work, retry using
+- // regular mapping - but not for native images.
+- pLoadLayout = PEImageLayout::Load(this, TRUE /* bNTSafeLoad */, m_bIsTrustedNativeImage /* bThrowOnError */);
+- }
++ if (pRetVal != NULL)
++ {
++ pRetVal->AddRef();
++ }
+
+- if (pLoadLayout != NULL)
+- {
+- SetLayout(IMAGE_MAPPED,pLoadLayout);
+- pLoadLayout->AddRef();
+- SetLayout(IMAGE_LOADED,pLoadLayout);
+- pRetVal=pLoadLayout;
+- }
+- else
+- if (IsFile())
+- {
+- PEImageLayoutHolder pLayout(PEImageLayout::Map(GetFileHandle(),this));
+-
+- bool fMarkAnyCpuImageAsLoaded = false;
+- // Avoid mapping another image if we can. We can only do this for IL-ONLY images
+- // since LoadLibrary is needed if we are to actually load code.
+- if (pLayout->HasCorHeader() && pLayout->IsILOnly())
+- {
+- // For CoreCLR, IL only images will always be mapped. We also dont bother doing the conversion of PE header on 64bit,
+- // as done below for the desktop case, as there is no appcompat burden for CoreCLR on 64bit to have that conversion done.
+- fMarkAnyCpuImageAsLoaded = true;
+- }
++ return pRetVal;
++}
+
+- pLayout.SuppressRelease();
++PTR_PEImageLayout PEImage::CreateLayoutMapped()
++{
++ CONTRACTL
++ {
++ THROWS;
++ GC_TRIGGERS;
++ MODE_ANY;
++ PRECONDITION(m_pLayoutLock->IsWriterLock());
++ }
++ CONTRACTL_END;
+
+- SetLayout(IMAGE_MAPPED,pLayout);
+- if (fMarkAnyCpuImageAsLoaded)
+- {
+- pLayout->AddRef();
+- SetLayout(IMAGE_LOADED, pLayout);
+- }
+- pRetVal=pLayout;
+- }
+- else
+- {
+- PEImageLayoutHolder flatPE(GetLayoutInternal(PEImageLayout::LAYOUT_FLAT,LAYOUT_CREATEIFNEEDED));
+- if (!flatPE->CheckFormat())
+- ThrowFormat(COR_E_BADIMAGEFORMAT);
+- pRetVal=PEImageLayout::LoadFromFlat(flatPE);
+- SetLayout(IMAGE_MAPPED,pRetVal);
+- }
++ PTR_PEImageLayout pRetVal;
++
++ PEImageLayout * pLoadLayout = NULL;
++
++ if (m_bIsTrustedNativeImage || IsFile())
++ {
++ // For CoreCLR, try to load all files via LoadLibrary first. If LoadLibrary did not work, retry using
++ // regular mapping - but not for native images.
++ pLoadLayout = PEImageLayout::Load(this, TRUE /* bNTSafeLoad */, m_bIsTrustedNativeImage /* bThrowOnError */);
++ }
++
++ if (pLoadLayout != NULL)
++ {
++ SetLayout(IMAGE_MAPPED,pLoadLayout);
++ pLoadLayout->AddRef();
++ SetLayout(IMAGE_LOADED,pLoadLayout);
++ pRetVal=pLoadLayout;
++ }
++ else if (IsFile())
++ {
++ PEImageLayoutHolder pLayout(PEImageLayout::Map(GetFileHandle(),this));
++
++ bool fMarkAnyCpuImageAsLoaded = false;
++ // Avoid mapping another image if we can. We can only do this for IL-ONLY images
++ // since LoadLibrary is needed if we are to actually load code
++ if (pLayout->HasCorHeader() && pLayout->IsILOnly())
++ {
++ // For CoreCLR, IL only images will always be mapped. We also dont bother doing the conversion of PE header on 64bit,
++ // as done below for the desktop case, as there is no appcompat burden for CoreCLR on 64bit to have that conversion done.
++ fMarkAnyCpuImageAsLoaded = true;
+ }
+- else
+- if (imageLayoutMask&PEImageLayout::LAYOUT_FLAT)
++
++ pLayout.SuppressRelease();
++
++ SetLayout(IMAGE_MAPPED,pLayout);
++ if (fMarkAnyCpuImageAsLoaded)
+ {
+- pRetVal=PEImageLayout::LoadFlat(GetFileHandle(),this);
+- m_pLayouts[IMAGE_FLAT]=pRetVal;
++ pLayout->AddRef();
++ SetLayout(IMAGE_LOADED, pLayout);
+ }
+-
++ pRetVal=pLayout;
+ }
+- if (pRetVal)
++ else
+ {
+- pRetVal->AddRef();
++ PEImageLayoutHolder flatPE(GetLayoutInternal(PEImageLayout::LAYOUT_FLAT,LAYOUT_CREATEIFNEEDED));
++ if (!flatPE->CheckFormat())
++ ThrowFormat(COR_E_BADIMAGEFORMAT);
++ pRetVal=PEImageLayout::LoadFromFlat(flatPE);
++ SetLayout(IMAGE_MAPPED,pRetVal);
+ }
++
++ return pRetVal;
++}
++
++PTR_PEImageLayout PEImage::CreateLayoutFlat()
++{
++ CONTRACTL
++ {
++ GC_TRIGGERS;
++ MODE_ANY;
++ PRECONDITION(m_pLayoutLock->IsWriterLock());
++ }
++ CONTRACTL_END;
++
++ PTR_PEImageLayout pRetVal;
++
++ pRetVal = PEImageLayout::LoadFlat(GetFileHandle(),this);
++ m_pLayouts[IMAGE_FLAT] = pRetVal;
++
+ return pRetVal;
+ }
+
+diff --git a/src/vm/peimage.h b/src/vm/peimage.h
+index 3245621..f61e185 100644
+--- a/src/vm/peimage.h
++++ b/src/vm/peimage.h
+@@ -257,6 +257,12 @@ private:
+ #ifndef DACCESS_COMPILE
+ // Get or create the layout corresponding to the mask, with an AddRef
+ PTR_PEImageLayout GetLayoutInternal(DWORD imageLayoutMask, DWORD flags);
++
++ // Create the mapped layout
++ PTR_PEImageLayout CreateLayoutMapped();
++
++ // Create the flat layout
++ PTR_PEImageLayout CreateLayoutFlat();
+ #endif
+ // Get an existing layout corresponding to the mask, no AddRef
+ PTR_PEImageLayout GetExistingLayoutInternal(DWORD imageLayoutMask);
+--
+2.7.4
+
diff --git a/packaging/0002-Direct-mapping-of-IL-assembly-images-that-don-t-cont.patch b/packaging/0002-Direct-mapping-of-IL-assembly-images-that-don-t-cont.patch
new file mode 100644
index 0000000000..2e122a5223
--- /dev/null
+++ b/packaging/0002-Direct-mapping-of-IL-assembly-images-that-don-t-cont.patch
@@ -0,0 +1,246 @@
+From 0fb2f3573cbe92216e7f56f2ac8b22e4427671ee Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <r.ayrapetyan@samsung.com>
+Date: Wed, 5 Apr 2017 21:08:32 +0300
+Subject: [PATCH 02/32] Direct mapping of IL-assembly images that don't contain
+ writeable sections.
+
+---
+ src/inc/pedecoder.h | 2 +
+ src/utilcode/pedecoder.cpp | 31 ++++++++++++++++
+ src/vm/pefile.cpp | 15 +++++++-
+ src/vm/peimage.cpp | 92 ++++++++++++++++++++++++++++++++++++++--------
+ src/vm/peimage.h | 2 +-
+ 5 files changed, 124 insertions(+), 18 deletions(-)
+
+diff --git a/src/inc/pedecoder.h b/src/inc/pedecoder.h
+index d5bae87..01375e6 100644
+--- a/src/inc/pedecoder.h
++++ b/src/inc/pedecoder.h
+@@ -187,6 +187,8 @@ class PEDecoder
+
+ DWORD GetImageIdentity() const;
+
++ BOOL HasWriteableSections() const;
++
+ // Directory entry access
+
+ BOOL HasDirectoryEntry(int entry) const;
+diff --git a/src/utilcode/pedecoder.cpp b/src/utilcode/pedecoder.cpp
+index 3b3c937..babe374 100644
+--- a/src/utilcode/pedecoder.cpp
++++ b/src/utilcode/pedecoder.cpp
+@@ -440,6 +440,37 @@ CHECK PEDecoder::CheckSection(COUNT_T previousAddressEnd, COUNT_T addressStart,
+ CHECK_OK;
+ }
+
++BOOL PEDecoder::HasWriteableSections() const
++{
++ CONTRACT_CHECK
++ {
++ INSTANCE_CHECK;
++ PRECONDITION(CheckFormat());
++ NOTHROW;
++ GC_NOTRIGGER;
++ SUPPORTS_DAC;
++ SO_TOLERANT;
++ }
++ CONTRACT_CHECK_END;
++
++ PTR_IMAGE_SECTION_HEADER pSection = FindFirstSection(FindNTHeaders());
++ _ASSERTE(pSection != NULL);
++
++ PTR_IMAGE_SECTION_HEADER pSectionEnd = pSection + VAL16(FindNTHeaders()->FileHeader.NumberOfSections);
++
++ while (pSection < pSectionEnd)
++ {
++ if ((pSection->Characteristics & VAL32(IMAGE_SCN_MEM_WRITE)) != 0)
++ {
++ return TRUE;
++ }
++
++ pSection++;
++ }
++
++ return FALSE;
++}
++
+ CHECK PEDecoder::CheckDirectoryEntry(int entry, int forbiddenFlags, IsNullOK ok) const
+ {
+ CONTRACT_CHECK
+diff --git a/src/vm/pefile.cpp b/src/vm/pefile.cpp
+index c7870e6..0c4f660 100644
+--- a/src/vm/pefile.cpp
++++ b/src/vm/pefile.cpp
+@@ -376,9 +376,22 @@ void PEFile::LoadLibrary(BOOL allowNativeSkip/*=TRUE*/) // if allowNativeSkip==F
+ #endif
+ {
+ if (GetILimage()->IsFile())
+- GetILimage()->LoadFromMapped();
++ {
++#ifdef PLATFORM_UNIX
++ if (GetILimage()->IsILOnly())
++ {
++ GetILimage()->Load();
++ }
++ else
++#endif // PLATFORM_UNIX
++ {
++ GetILimage()->LoadFromMapped();
++ }
++ }
+ else
++ {
+ GetILimage()->LoadNoFile();
++ }
+ }
+ }
+
+diff --git a/src/vm/peimage.cpp b/src/vm/peimage.cpp
+index 1462c94..bd5ad7f 100644
+--- a/src/vm/peimage.cpp
++++ b/src/vm/peimage.cpp
+@@ -909,13 +909,36 @@ PTR_PEImageLayout PEImage::GetLayoutInternal(DWORD imageLayoutMask,DWORD flags)
+ {
+ _ASSERTE(HasID());
+
+- if (imageLayoutMask&PEImageLayout::LAYOUT_MAPPED)
++ BOOL bIsMappedLayoutSuitable = ((imageLayoutMask & PEImageLayout::LAYOUT_MAPPED) != 0);
++ BOOL bIsFlatLayoutSuitable = ((imageLayoutMask & PEImageLayout::LAYOUT_FLAT) != 0);
++
++#if !defined(PLATFORM_UNIX)
++ if (bIsMappedLayoutSuitable)
+ {
+- pRetVal = PEImage::CreateLayoutMapped();
++ bIsFlatLayoutSuitable = FALSE;
+ }
+- else
++#endif // !PLATFORM_UNIX
++
++ _ASSERTE(bIsMappedLayoutSuitable || bIsFlatLayoutSuitable);
++
++ BOOL bIsMappedLayoutRequired = !bIsFlatLayoutSuitable;
++ BOOL bIsFlatLayoutRequired = !bIsMappedLayoutSuitable;
++
++ if (bIsFlatLayoutRequired
++ || (bIsFlatLayoutSuitable && !m_bIsTrustedNativeImage))
+ {
+- pRetVal = PEImage::CreateLayoutFlat();
++ _ASSERTE(bIsFlatLayoutSuitable);
++
++ BOOL bPermitWriteableSections = bIsFlatLayoutRequired;
++
++ pRetVal = PEImage::CreateLayoutFlat(bPermitWriteableSections);
++ }
++
++ if (pRetVal == NULL)
++ {
++ _ASSERTE(bIsMappedLayoutSuitable);
++
++ pRetVal = PEImage::CreateLayoutMapped();
+ }
+ }
+
+@@ -992,7 +1015,7 @@ PTR_PEImageLayout PEImage::CreateLayoutMapped()
+ return pRetVal;
+ }
+
+-PTR_PEImageLayout PEImage::CreateLayoutFlat()
++PTR_PEImageLayout PEImage::CreateLayoutFlat(BOOL bPermitWriteableSections)
+ {
+ CONTRACTL
+ {
+@@ -1002,12 +1025,22 @@ PTR_PEImageLayout PEImage::CreateLayoutFlat()
+ }
+ CONTRACTL_END;
+
+- PTR_PEImageLayout pRetVal;
++ _ASSERTE(m_pLayouts[IMAGE_FLAT] == NULL);
+
+- pRetVal = PEImageLayout::LoadFlat(GetFileHandle(),this);
+- m_pLayouts[IMAGE_FLAT] = pRetVal;
++ PTR_PEImageLayout pFlatLayout = PEImageLayout::LoadFlat(GetFileHandle(),this);
+
+- return pRetVal;
++ if (!bPermitWriteableSections && pFlatLayout->HasWriteableSections())
++ {
++ pFlatLayout->Release();
++
++ return NULL;
++ }
++ else
++ {
++ m_pLayouts[IMAGE_FLAT] = pFlatLayout;
++
++ return pFlatLayout;
++ }
+ }
+
+ /* static */
+@@ -1070,17 +1103,44 @@ void PEImage::Load()
+ }
+
+ SimpleWriteLockHolder lock(m_pLayoutLock);
+- if(!IsFile())
++
++ _ASSERTE(m_pLayouts[IMAGE_LOADED] == NULL);
++
++#ifdef PLATFORM_UNIX
++ if (m_pLayouts[IMAGE_FLAT] != NULL
++ && m_pLayouts[IMAGE_FLAT]->CheckFormat()
++ && m_pLayouts[IMAGE_FLAT]->IsILOnly()
++ && !m_pLayouts[IMAGE_FLAT]->HasWriteableSections())
+ {
+- if (!m_pLayouts[IMAGE_FLAT]->CheckILOnly())
+- ThrowHR(COR_E_BADIMAGEFORMAT);
+- if(m_pLayouts[IMAGE_LOADED]==NULL)
+- SetLayout(IMAGE_LOADED,PEImageLayout::LoadFromFlat(m_pLayouts[IMAGE_FLAT]));
++ // IL-only images with writeable sections are mapped in general way,
++ // because the writeable sections should always be page-aligned
++ // to make possible setting another protection bits exactly for these sections
++ _ASSERTE(!m_pLayouts[IMAGE_FLAT]->HasWriteableSections());
++
++ // As the image is IL-only, there should no be native code to execute
++ _ASSERTE(!m_pLayouts[IMAGE_FLAT]->HasNativeEntryPoint());
++
++ m_pLayouts[IMAGE_FLAT]->AddRef();
++
++ SetLayout(IMAGE_LOADED, m_pLayouts[IMAGE_FLAT]);
+ }
+ else
++#endif // PLATFORM_UNIX
+ {
+- if(m_pLayouts[IMAGE_LOADED]==NULL)
+- SetLayout(IMAGE_LOADED,PEImageLayout::Load(this,TRUE));
++ if(!IsFile())
++ {
++ _ASSERTE(m_pLayouts[IMAGE_FLAT] != NULL);
++
++ if (!m_pLayouts[IMAGE_FLAT]->CheckILOnly())
++ ThrowHR(COR_E_BADIMAGEFORMAT);
++ if(m_pLayouts[IMAGE_LOADED]==NULL)
++ SetLayout(IMAGE_LOADED,PEImageLayout::LoadFromFlat(m_pLayouts[IMAGE_FLAT]));
++ }
++ else
++ {
++ if(m_pLayouts[IMAGE_LOADED]==NULL)
++ SetLayout(IMAGE_LOADED,PEImageLayout::Load(this,TRUE));
++ }
+ }
+ }
+
+diff --git a/src/vm/peimage.h b/src/vm/peimage.h
+index f61e185..e77a474 100644
+--- a/src/vm/peimage.h
++++ b/src/vm/peimage.h
+@@ -262,7 +262,7 @@ private:
+ PTR_PEImageLayout CreateLayoutMapped();
+
+ // Create the flat layout
+- PTR_PEImageLayout CreateLayoutFlat();
++ PTR_PEImageLayout CreateLayoutFlat(BOOL bPermitWriteableSections);
+ #endif
+ // Get an existing layout corresponding to the mask, no AddRef
+ PTR_PEImageLayout GetExistingLayoutInternal(DWORD imageLayoutMask);
+--
+2.7.4
+
diff --git a/packaging/0003-Delete-default-copy-move-constructors-and-assignment.patch b/packaging/0003-Delete-default-copy-move-constructors-and-assignment.patch
new file mode 100644
index 0000000000..c8653c0906
--- /dev/null
+++ b/packaging/0003-Delete-default-copy-move-constructors-and-assignment.patch
@@ -0,0 +1,153 @@
+From 662cc59c451bb6e1b5481a088126b5c0293a9238 Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <ruben-ayrapetyan@users.noreply.github.com>
+Date: Mon, 22 May 2017 17:38:20 +0300
+Subject: [PATCH 03/32] Delete default copy/move constructors and assignment
+ operators of RelativePointer and RelativeFixupPointer. (#11745)
+
+---
+ src/inc/fixuppointer.h | 31 +++++++++++++++++++++++++++++++
+ src/vm/field.h | 29 +++++++++++++++++++++++++++--
+ src/vm/generics.cpp | 3 +--
+ src/vm/ngenhash.h | 7 +++++++
+ 4 files changed, 66 insertions(+), 4 deletions(-)
+
+diff --git a/src/inc/fixuppointer.h b/src/inc/fixuppointer.h
+index 5a1b62c..3467cfe 100644
+--- a/src/inc/fixuppointer.h
++++ b/src/inc/fixuppointer.h
+@@ -30,6 +30,24 @@ template<typename PTR_TYPE>
+ class RelativePointer
+ {
+ public:
++#ifndef DACCESS_COMPILE
++ RelativePointer()
++ {
++ m_delta = (TADDR)NULL;
++
++ _ASSERTE (IsNull());
++ }
++#else // DACCESS_COMPILE
++ RelativePointer() =delete;
++#endif // DACCESS_COMPILE
++
++ // Implicit copy/move is not allowed
++ // Bitwise copy is implemented by BitwiseCopyTo method
++ RelativePointer<PTR_TYPE>(const RelativePointer<PTR_TYPE> &) =delete;
++ RelativePointer<PTR_TYPE>(RelativePointer<PTR_TYPE> &&) =delete;
++ RelativePointer<PTR_TYPE>& operator = (const RelativePointer<PTR_TYPE> &) =delete;
++ RelativePointer<PTR_TYPE>& operator = (RelativePointer<PTR_TYPE> &&) =delete;
++
+ // Returns whether the encoded pointer is NULL.
+ BOOL IsNull() const
+ {
+@@ -143,6 +161,13 @@ public:
+ dac_cast<DPTR(RelativePointer<PTR_TYPE>)>(base)->SetValueMaybeNull(base, addr);
+ }
+
++#ifndef DACCESS_COMPILE
++ void BitwiseCopyTo(RelativePointer<PTR_TYPE> &dest) const
++ {
++ dest.m_delta = m_delta;
++ }
++#endif // DACCESS_COMPILE
++
+ private:
+ #ifndef DACCESS_COMPILE
+ Volatile<TADDR> m_delta;
+@@ -234,6 +259,12 @@ template<typename PTR_TYPE>
+ class RelativeFixupPointer
+ {
+ public:
++ // Implicit copy/move is not allowed
++ RelativeFixupPointer<PTR_TYPE>(const RelativeFixupPointer<PTR_TYPE> &) =delete;
++ RelativeFixupPointer<PTR_TYPE>(RelativeFixupPointer<PTR_TYPE> &&) =delete;
++ RelativeFixupPointer<PTR_TYPE>& operator = (const RelativeFixupPointer<PTR_TYPE> &) =delete;
++ RelativeFixupPointer<PTR_TYPE>& operator = (RelativeFixupPointer<PTR_TYPE> &&) =delete;
++
+ // Returns whether the encoded pointer is NULL.
+ BOOL IsNull() const
+ {
+diff --git a/src/vm/field.h b/src/vm/field.h
+index 030a0aa..8f6668b 100644
+--- a/src/vm/field.h
++++ b/src/vm/field.h
+@@ -43,6 +43,8 @@ class FieldDesc
+ protected:
+ RelativePointer<PTR_MethodTable> m_pMTOfEnclosingClass; // This is used to hold the log2 of the field size temporarily during class loading. Yuck.
+
++ // See also: FieldDesc::InitializeFrom method
++
+ #if defined(DACCESS_COMPILE)
+ union { //create a union so I can get the correct offset for ClrDump.
+ unsigned m_dword1;
+@@ -85,10 +87,33 @@ class FieldDesc
+ LPUTF8 m_debugName;
+ #endif
+
++public:
+ // Allocated by special heap means, don't construct me
+- FieldDesc() {};
++ FieldDesc() =delete;
++
++#ifndef DACCESS_COMPILE
++ void InitializeFrom(const FieldDesc& sourceField, MethodTable *pMT)
++ {
++ m_pMTOfEnclosingClass.SetValue(pMT);
++
++ m_mb = sourceField.m_mb;
++ m_isStatic = sourceField.m_isStatic;
++ m_isThreadLocal = sourceField.m_isThreadLocal;
++ m_isRVA = sourceField.m_isRVA;
++ m_prot = sourceField.m_prot;
++ m_requiresFullMbValue = sourceField.m_requiresFullMbValue;
++
++ m_dwOffset = sourceField.m_dwOffset;
++ m_type = sourceField.m_type;
++
++#ifdef _DEBUG
++ m_isDangerousAppDomainAgileField = sourceField.m_isDangerousAppDomainAgileField;
++
++ m_debugName = sourceField.m_debugName;
++#endif // _DEBUG
++ }
++#endif // !DACCESS_COMPILE
+
+-public:
+ #ifdef _DEBUG
+ inline LPUTF8 GetDebugName()
+ {
+diff --git a/src/vm/generics.cpp b/src/vm/generics.cpp
+index a04bde1..63d95a0 100644
+--- a/src/vm/generics.cpp
++++ b/src/vm/generics.cpp
+@@ -597,8 +597,7 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
+
+ for (DWORD i = 0; i < pOldMT->GetNumStaticFields(); i++)
+ {
+- pStaticFieldDescs[i] = pOldFD[i];
+- pStaticFieldDescs[i].SetMethodTable(pMT);
++ pStaticFieldDescs[i].InitializeFrom(pOldFD[i], pMT);
+ }
+ }
+ pMT->SetupGenericsStaticsInfo(pStaticFieldDescs);
+diff --git a/src/vm/ngenhash.h b/src/vm/ngenhash.h
+index 004d4b8..667a55e 100644
+--- a/src/vm/ngenhash.h
++++ b/src/vm/ngenhash.h
+@@ -475,6 +475,13 @@ public:
+ // Call this during the ngen Fixup phase to adjust the relative pointer to account for ngen image layout.
+ void Fixup(DataImage *pImage, NgenHashTable<NGEN_HASH_ARGS> *pTable);
+ #endif // FEATURE_PREJIT
++
++ NgenHashEntryRef<NGEN_HASH_ARGS>& operator = (const NgenHashEntryRef<NGEN_HASH_ARGS> &src)
++ {
++ src.m_rpEntryRef.BitwiseCopyTo(m_rpEntryRef);
++
++ return *this;
++ }
+ #endif // !DACCESS_COMPILE
+
+ private:
+--
+2.7.4
+
diff --git a/packaging/0004-Change-relocations-in-ngen-ed-code-with-PC-relative-.patch b/packaging/0004-Change-relocations-in-ngen-ed-code-with-PC-relative-.patch
new file mode 100644
index 0000000000..09d836ad68
--- /dev/null
+++ b/packaging/0004-Change-relocations-in-ngen-ed-code-with-PC-relative-.patch
@@ -0,0 +1,467 @@
+From 381ca0894a01e9bd83ab39d6163c947531051e17 Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <r.ayrapetyan@samsung.com>
+Date: Wed, 3 May 2017 17:01:07 +0300
+Subject: [PATCH 04/32] Change relocations in ngen-ed code with PC-relative
+ constants for Linux ARM32.
+
+---
+ src/inc/corinfo.h | 15 +++++++++++++++
+ src/inc/corjit.h | 6 ++++++
+ src/inc/zapper.h | 2 ++
+ src/jit/codegen.h | 4 ++++
+ src/jit/codegenarm.cpp | 12 ++++--------
+ src/jit/codegencommon.cpp | 47 +++++++++++++++++++++++++++++++++++++++++++++++
+ src/jit/codegenlegacy.cpp | 9 +++------
+ src/jit/emit.cpp | 23 +++++++++++++++++++++++
+ src/jit/emit.h | 5 +++++
+ src/jit/emitarm.cpp | 4 ++--
+ src/jit/instr.cpp | 3 +--
+ src/jit/jitee.h | 12 ++++++++++++
+ src/zap/zapinfo.cpp | 19 +++++++++++++++++++
+ src/zap/zapper.cpp | 18 ++++++++++++++----
+ src/zap/zaprelocs.cpp | 17 +++++++++++++++++
+ 15 files changed, 174 insertions(+), 22 deletions(-)
+ mode change 100755 => 100644 src/jit/codegen.h
+
+diff --git a/src/inc/corinfo.h b/src/inc/corinfo.h
+index 97f3958..2495de2 100644
+--- a/src/inc/corinfo.h
++++ b/src/inc/corinfo.h
+@@ -3074,4 +3074,19 @@ public:
+ #define IMAGE_REL_BASED_REL32 0x10
+ #define IMAGE_REL_BASED_THUMB_BRANCH24 0x13
+
++// The identifier for ARM32-specific PC-relative address
++// computation corresponds to the following instruction
++// sequence:
++// l0: movw rX, #imm_lo // 4 byte
++// l4: movt rX, #imm_hi // 4 byte
++// l8: add rX, pc <- after this instruction rX = relocTarget
++//
++// Program counter at l8 is address of l8 + 4
++// Address of relocated movw/movt is l0
++// So, imm should be calculated as the following:
++// imm = relocTarget - (l8 + 4) = relocTarget - (l0 + 8 + 4) = relocTarget - (l_0 + 12)
++// So, the value of offset correction is 12
++//
++#define IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL 0x14
++
+ #endif // _COR_INFO_H_
+diff --git a/src/inc/corjit.h b/src/inc/corjit.h
+index e6d067c..e6e8257 100644
+--- a/src/inc/corjit.h
++++ b/src/inc/corjit.h
+@@ -148,6 +148,12 @@ public:
+ CORJIT_FLAG_DESKTOP_QUIRKS = 38, // The JIT should generate desktop-quirk-compatible code
+ CORJIT_FLAG_TIER0 = 39, // This is the initial tier for tiered compilation which should generate code as quickly as possible
+ CORJIT_FLAG_TIER1 = 40, // This is the final tier (for now) for tiered compilation which should generate high quality code
++
++#if defined(_TARGET_ARM_)
++ CORJIT_FLAG_RELATIVE_CODE_RELOCS = 41, // JIT should generate PC-relative address computations instead of EE relocation records
++#else // !defined(_TARGET_ARM_)
++ CORJIT_FLAG_UNUSED11 = 41
++#endif // !defined(_TARGET_ARM_)
+ };
+
+ CORJIT_FLAGS()
+diff --git a/src/inc/zapper.h b/src/inc/zapper.h
+index a55ddbe..b846274 100644
+--- a/src/inc/zapper.h
++++ b/src/inc/zapper.h
+@@ -448,6 +448,8 @@ class ZapperOptions
+
+ bool m_fNoMetaData; // Do not copy metadata and IL to native image
+
++ void SetCompilerFlags(void);
++
+ ZapperOptions();
+ ~ZapperOptions();
+ };
+diff --git a/src/jit/codegen.h b/src/jit/codegen.h
+old mode 100755
+new mode 100644
+index e50e640..471434c
+--- a/src/jit/codegen.h
++++ b/src/jit/codegen.h
+@@ -361,6 +361,10 @@ protected:
+ /* IN OUT */ bool* pUnwindStarted,
+ bool jmpEpilog);
+
++ void genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg);
++ void genMov32RelocatableDataLabel(unsigned value, regNumber reg);
++ void genMov32RelocatableImmediate(emitAttr size, unsigned value, regNumber reg);
++
+ bool genUsedPopToReturn; // True if we use the pop into PC to return,
+ // False if we didn't and must branch to LR to return.
+
+diff --git a/src/jit/codegenarm.cpp b/src/jit/codegenarm.cpp
+index 40371e3..8f98343 100644
+--- a/src/jit/codegenarm.cpp
++++ b/src/jit/codegenarm.cpp
+@@ -44,8 +44,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
+
+ // Load the address where the finally funclet should return into LR.
+ // The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do the return.
+- getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
+- getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
++ genMov32RelocatableDisplacement(bbFinallyRet, REG_LR);
+
+ // Jump to the finally BB
+ inst_JMP(EJ_jmp, block->bbJumpDest);
+@@ -63,8 +62,7 @@ BasicBlock* CodeGen::genCallFinally(BasicBlock* block)
+ // genEHCatchRet:
+ void CodeGen::genEHCatchRet(BasicBlock* block)
+ {
+- getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_INTRET);
+- getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_INTRET);
++ genMov32RelocatableDisplacement(block->bbJumpDest, REG_INTRET);
+ }
+
+ //------------------------------------------------------------------------
+@@ -82,8 +80,7 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm,
+
+ if (EA_IS_RELOC(size))
+ {
+- getEmitter()->emitIns_R_I(INS_movw, size, reg, imm);
+- getEmitter()->emitIns_R_I(INS_movt, size, reg, imm);
++ genMov32RelocatableImmediate(size, imm, reg);
+ }
+ else if (imm == 0)
+ {
+@@ -681,8 +678,7 @@ void CodeGen::genJumpTable(GenTree* treeNode)
+
+ getEmitter()->emitDataGenEnd();
+
+- getEmitter()->emitIns_R_D(INS_movw, EA_HANDLE_CNS_RELOC, jmpTabBase, treeNode->gtRegNum);
+- getEmitter()->emitIns_R_D(INS_movt, EA_HANDLE_CNS_RELOC, jmpTabBase, treeNode->gtRegNum);
++ genMov32RelocatableDataLabel(jmpTabBase, treeNode->gtRegNum);
+
+ genProduceReg(treeNode);
+ }
+diff --git a/src/jit/codegencommon.cpp b/src/jit/codegencommon.cpp
+index 50f43fa..9613e4d 100644
+--- a/src/jit/codegencommon.cpp
++++ b/src/jit/codegencommon.cpp
+@@ -6380,6 +6380,53 @@ void CodeGen::genFreeLclFrame(unsigned frameSize, /* IN OUT */ bool* pUnwindStar
+
+ /*-----------------------------------------------------------------------------
+ *
++ * Move of relocatable displacement value to register
++ */
++void CodeGen::genMov32RelocatableDisplacement(BasicBlock* block, regNumber reg)
++{
++ getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block, reg);
++ getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block, reg);
++
++ if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
++ {
++ getEmitter()->emitIns_R_R_R(INS_add, EA_4BYTE_DSP_RELOC, reg, reg, REG_PC);
++ }
++}
++
++/*-----------------------------------------------------------------------------
++ *
++ * Move of relocatable data-label to register
++ */
++void CodeGen::genMov32RelocatableDataLabel(unsigned value, regNumber reg)
++{
++ getEmitter()->emitIns_R_D(INS_movw, EA_HANDLE_CNS_RELOC, value, reg);
++ getEmitter()->emitIns_R_D(INS_movt, EA_HANDLE_CNS_RELOC, value, reg);
++
++ if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
++ {
++ getEmitter()->emitIns_R_R_R(INS_add, EA_HANDLE_CNS_RELOC, reg, reg, REG_PC);
++ }
++}
++
++/*-----------------------------------------------------------------------------
++ *
++ * Move of relocatable immediate to register
++ */
++void CodeGen::genMov32RelocatableImmediate(emitAttr size, unsigned value, regNumber reg)
++{
++ _ASSERTE(EA_IS_RELOC(size));
++
++ getEmitter()->emitIns_R_I(INS_movw, size, reg, value);
++ getEmitter()->emitIns_R_I(INS_movt, size, reg, value);
++
++ if (compiler->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
++ {
++ getEmitter()->emitIns_R_R_R(INS_add, size, reg, reg, REG_PC);
++ }
++}
++
++/*-----------------------------------------------------------------------------
++ *
+ * Returns register mask to push/pop to allocate a small stack frame,
+ * instead of using "sub sp" / "add sp". Returns RBM_NONE if either frame size
+ * is zero, or if we should use "sub sp" / "add sp" instead of push/pop.
+diff --git a/src/jit/codegenlegacy.cpp b/src/jit/codegenlegacy.cpp
+index b8a239a..178be54 100644
+--- a/src/jit/codegenlegacy.cpp
++++ b/src/jit/codegenlegacy.cpp
+@@ -13095,8 +13095,7 @@ void CodeGen::genCodeForBBlist()
+ // Load the address where the finally funclet should return into LR.
+ // The funclet prolog/epilog will do "push {lr}" / "pop {pc}" to do
+ // the return.
+- getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
+- getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, bbFinallyRet, REG_LR);
++ genMov32RelocatableDisplacement(bbFinallyRet, REG_LR);
+ regTracker.rsTrackRegTrash(REG_LR);
+ #endif // 0
+
+@@ -13123,8 +13122,7 @@ void CodeGen::genCodeForBBlist()
+
+ case BBJ_EHCATCHRET:
+ // set r0 to the address the VM should return to after the catch
+- getEmitter()->emitIns_R_L(INS_movw, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_R0);
+- getEmitter()->emitIns_R_L(INS_movt, EA_4BYTE_DSP_RELOC, block->bbJumpDest, REG_R0);
++ genMov32RelocatableDisplacement(block->bbJumpDest, REG_R0);
+ regTracker.rsTrackRegTrash(REG_R0);
+
+ __fallthrough;
+@@ -15509,8 +15507,7 @@ void CodeGen::genTableSwitch(regNumber reg, unsigned jumpCnt, BasicBlock** jumpT
+ // Pick any register except the index register.
+ //
+ regNumber regTabBase = regSet.rsGrabReg(RBM_ALLINT & ~genRegMask(reg));
+- getEmitter()->emitIns_R_D(INS_movw, EA_HANDLE_CNS_RELOC, jmpTabBase, regTabBase);
+- getEmitter()->emitIns_R_D(INS_movt, EA_HANDLE_CNS_RELOC, jmpTabBase, regTabBase);
++ genMov32RelocatableDataLabel(jmpTabBase, regTabBase);
+ regTracker.rsTrackRegTrash(regTabBase);
+
+ // LDR PC, [regTableBase + reg * 4] (encoded as LDR PC, [regTableBase, reg, LSL 2]
+diff --git a/src/jit/emit.cpp b/src/jit/emit.cpp
+index d2aa29f..29f79f0 100644
+--- a/src/jit/emit.cpp
++++ b/src/jit/emit.cpp
+@@ -7103,6 +7103,29 @@ void emitter::emitRecordRelocation(void* location, /* IN */
+ #endif // defined(LATE_DISASM)
+ }
+
++#ifdef _TARGET_ARM_
++/*****************************************************************************
++ * A helper for handling a Thumb-Mov32 of position-independent (PC-relative) value
++ *
++ * This routine either records relocation for the location with the EE,
++ * or creates a virtual relocation entry to perform offset fixup during
++ * compilation without recording it with EE - depending on which of
++ * absolute/relocative relocations mode are used for code section.
++ */
++void emitter::emitHandlePCRelativeMov32(void* location, /* IN */
++ void* target) /* IN */
++{
++ if (emitComp->opts.jitFlags->IsSet(JitFlags::JIT_FLAG_RELATIVE_CODE_RELOCS))
++ {
++ emitRecordRelocation(location, target, IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL);
++ }
++ else
++ {
++ emitRecordRelocation(location, target, IMAGE_REL_BASED_THUMB_MOV32);
++ }
++}
++#endif // _TARGET_ARM_
++
+ /*****************************************************************************
+ * A helper for recording a call site with the EE.
+ */
+diff --git a/src/jit/emit.h b/src/jit/emit.h
+index 5ec8a6a..a925f1f 100644
+--- a/src/jit/emit.h
++++ b/src/jit/emit.h
+@@ -2183,6 +2183,11 @@ public:
+ WORD slotNum = 0, /* IN */
+ INT32 addlDelta = 0); /* IN */
+
++#ifdef _TARGET_ARM_
++ void emitHandlePCRelativeMov32(void* location, /* IN */
++ void* target); /* IN */
++#endif
++
+ void emitRecordCallSite(ULONG instrOffset, /* IN */
+ CORINFO_SIG_INFO* callSig, /* IN */
+ CORINFO_METHOD_HANDLE methodHandle); /* IN */
+diff --git a/src/jit/emitarm.cpp b/src/jit/emitarm.cpp
+index 2b8eb25..9ec8e07 100644
+--- a/src/jit/emitarm.cpp
++++ b/src/jit/emitarm.cpp
+@@ -5387,7 +5387,7 @@ BYTE* emitter::emitOutputLJ(insGroup* ig, BYTE* dst, instrDesc* i)
+ {
+ assert(ins == INS_movt || ins == INS_movw);
+ if ((ins == INS_movt) && emitComp->info.compMatchedVM)
+- emitRecordRelocation((void*)(dst - 8), (void*)distVal, IMAGE_REL_BASED_THUMB_MOV32);
++ emitHandlePCRelativeMov32((void*)(dst - 8), (void*)distVal);
+ }
+ }
+ else
+@@ -6011,7 +6011,7 @@ size_t emitter::emitOutputInstr(insGroup* ig, instrDesc* id, BYTE** dp)
+ assert((ins == INS_movt) || (ins == INS_movw));
+ dst += emitOutput_Thumb2Instr(dst, code);
+ if ((ins == INS_movt) && emitComp->info.compMatchedVM)
+- emitRecordRelocation((void*)(dst - 8), (void*)imm, IMAGE_REL_BASED_THUMB_MOV32);
++ emitHandlePCRelativeMov32((void*)(dst - 8), (void*)imm);
+ }
+ else
+ {
+diff --git a/src/jit/instr.cpp b/src/jit/instr.cpp
+index 5bbfdde..670a709 100644
+--- a/src/jit/instr.cpp
++++ b/src/jit/instr.cpp
+@@ -3915,8 +3915,7 @@ void CodeGen::instGen_Set_Reg_To_Imm(emitAttr size, regNumber reg, ssize_t imm,
+
+ if (EA_IS_RELOC(size))
+ {
+- getEmitter()->emitIns_R_I(INS_movw, size, reg, imm);
+- getEmitter()->emitIns_R_I(INS_movt, size, reg, imm);
++ genMov32RelocatableImmediate(size, imm, reg);
+ }
+ else if (arm_Valid_Imm_For_Mov(imm))
+ {
+diff --git a/src/jit/jitee.h b/src/jit/jitee.h
+index 7b0e4a0..7a03dd6 100644
+--- a/src/jit/jitee.h
++++ b/src/jit/jitee.h
+@@ -80,6 +80,12 @@ public:
+ JIT_FLAG_DESKTOP_QUIRKS = 38, // The JIT should generate desktop-quirk-compatible code
+ JIT_FLAG_TIER0 = 39, // This is the initial tier for tiered compilation which should generate code as quickly as possible
+ JIT_FLAG_TIER1 = 40, // This is the final tier (for now) for tiered compilation which should generate high quality code
++
++#if defined(_TARGET_ARM_)
++ JIT_FLAG_RELATIVE_CODE_RELOCS = 41, // JIT should generate PC-relative address computations instead of EE relocation records
++#else // !defined(_TARGET_ARM_)
++ JIT_FLAG_UNUSED11 = 41
++#endif // !defined(_TARGET_ARM_)
+ };
+ // clang-format on
+
+@@ -192,6 +198,12 @@ public:
+ FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_TIER0, JIT_FLAG_TIER0);
+ FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_TIER1, JIT_FLAG_TIER1);
+
++#if defined(_TARGET_ARM_)
++
++ FLAGS_EQUAL(CORJIT_FLAGS::CORJIT_FLAG_RELATIVE_CODE_RELOCS, JIT_FLAG_RELATIVE_CODE_RELOCS);
++
++#endif // _TARGET_ARM_
++
+ #undef FLAGS_EQUAL
+ }
+
+diff --git a/src/zap/zapinfo.cpp b/src/zap/zapinfo.cpp
+index 40d14ae..60e03af 100644
+--- a/src/zap/zapinfo.cpp
++++ b/src/zap/zapinfo.cpp
+@@ -2481,7 +2481,25 @@ void ZapInfo::recordRelocation(void *location, void *target,
+
+ #if defined(_TARGET_ARM_)
+ case IMAGE_REL_BASED_THUMB_MOV32:
++ case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
+ case IMAGE_REL_BASED_THUMB_BRANCH24:
++
++# ifdef _DEBUG
++ {
++ CORJIT_FLAGS jitFlags = m_zapper->m_pOpt->m_compilerFlags;
++
++ if (jitFlags.IsSet(CORJIT_FLAGS::CORJIT_FLAG_RELATIVE_CODE_RELOCS))
++ {
++ _ASSERTE(fRelocType == IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL
++ || fRelocType == IMAGE_REL_BASED_THUMB_BRANCH24);
++ }
++ else
++ {
++ _ASSERTE(fRelocType == IMAGE_REL_BASED_THUMB_MOV32
++ || fRelocType == IMAGE_REL_BASED_THUMB_BRANCH24);
++ }
++ }
++# endif // _DEBUG
+ break;
+ #endif
+
+@@ -2584,6 +2602,7 @@ void ZapInfo::recordRelocation(void *location, void *target,
+
+ #if defined(_TARGET_ARM_)
+ case IMAGE_REL_BASED_THUMB_MOV32:
++ case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
+ PutThumb2Mov32((UINT16 *)location, targetOffset);
+ break;
+
+diff --git a/src/zap/zapper.cpp b/src/zap/zapper.cpp
+index 4d1330e..6b45bc2 100644
+--- a/src/zap/zapper.cpp
++++ b/src/zap/zapper.cpp
+@@ -278,8 +278,7 @@ ZapperOptions::ZapperOptions() :
+ m_legacyMode(false)
+ ,m_fNoMetaData(s_fNGenNoMetaData)
+ {
+- m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_RELOC);
+- m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_PREJIT);
++ SetCompilerFlags();
+
+ m_zapSet = CLRConfig::GetConfigValue(CLRConfig::EXTERNAL_ZapSet);
+ if (m_zapSet != NULL && wcslen(m_zapSet) > 3)
+@@ -319,6 +318,18 @@ ZapperOptions::~ZapperOptions()
+ delete [] m_repositoryDir;
+ }
+
++void ZapperOptions::SetCompilerFlags(void)
++{
++ m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_RELOC);
++ m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_PREJIT);
++
++#if defined(_TARGET_ARM_)
++# if defined(PLATFORM_UNIX)
++ m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_RELATIVE_CODE_RELOCS);
++# endif // defined(PLATFORM_UNIX)
++#endif // defined(_TARGET_ARM_)
++}
++
+ /* --------------------------------------------------------------------------- *
+ * Zapper class
+ * --------------------------------------------------------------------------- */
+@@ -370,8 +381,7 @@ Zapper::Zapper(NGenOptions *pOptions, bool fromDllHost)
+ pOptions = &currentVersionOptions;
+
+ zo->m_compilerFlags.Reset();
+- zo->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_RELOC);
+- zo->m_compilerFlags.Set(CORJIT_FLAGS::CORJIT_FLAG_PREJIT);
++ zo->SetCompilerFlags();
+ zo->m_autodebug = true;
+
+ if (pOptions->fDebug)
+diff --git a/src/zap/zaprelocs.cpp b/src/zap/zaprelocs.cpp
+index 04708c2..059d9a5 100644
+--- a/src/zap/zaprelocs.cpp
++++ b/src/zap/zaprelocs.cpp
+@@ -84,6 +84,22 @@ void ZapBaseRelocs::WriteReloc(PVOID pSrc, int offset, ZapNode * pTarget, int ta
+ break;
+ }
+
++ case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
++ {
++ TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
++
++ // For details about how the value is calculated, see
++ // description of IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL
++ const UINT32 offsetCorrection = 12;
++
++ UINT32 imm32 = pActualTarget - (pSite + offsetCorrection);
++
++ PutThumb2Mov32((UINT16 *)pLocation, imm32);
++
++ // IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL does not need base reloc entry
++ return;
++ }
++
+ case IMAGE_REL_BASED_THUMB_BRANCH24:
+ {
+ TADDR pSite = (TADDR)m_pImage->GetBaseAddress() + rva;
+@@ -282,6 +298,7 @@ void ZapBlobWithRelocs::Save(ZapWriter * pZapWriter)
+
+ #if defined(_TARGET_ARM_)
+ case IMAGE_REL_BASED_THUMB_MOV32:
++ case IMAGE_REL_BASED_REL_THUMB_MOV32_PCREL:
+ targetOffset = (int)GetThumb2Mov32((UINT16 *)pLocation);
+ break;
+
+--
+2.7.4
+
diff --git a/packaging/0005-Allow-RelativePointer-SetValue-usage-for-non-DAC-bui.patch b/packaging/0005-Allow-RelativePointer-SetValue-usage-for-non-DAC-bui.patch
new file mode 100644
index 0000000000..d7e7b7ecf7
--- /dev/null
+++ b/packaging/0005-Allow-RelativePointer-SetValue-usage-for-non-DAC-bui.patch
@@ -0,0 +1,344 @@
+From fae13b76411ba059de27f68d56b09397bbc81b37 Mon Sep 17 00:00:00 2001
+From: gbalykov <g.balykov@samsung.com>
+Date: Thu, 25 May 2017 01:50:26 +0300
+Subject: [PATCH 05/32] Allow RelativePointer::SetValue usage for non-DAC
+ builds only (#11891)
+
+---
+ src/inc/fixuppointer.h | 76 +++++++++++---------------------------------------
+ src/vm/ceeload.cpp | 5 +++-
+ src/vm/ceeload.h | 5 ++++
+ src/vm/ceeload.inl | 6 ++--
+ src/vm/class.h | 4 +--
+ src/vm/classhash.cpp | 7 +++--
+ src/vm/method.cpp | 5 +++-
+ src/vm/method.hpp | 2 +-
+ src/vm/methodtable.h | 11 +++++---
+ 9 files changed, 48 insertions(+), 73 deletions(-)
+
+diff --git a/src/inc/fixuppointer.h b/src/inc/fixuppointer.h
+index 3467cfe..549023a 100644
+--- a/src/inc/fixuppointer.h
++++ b/src/inc/fixuppointer.h
+@@ -23,9 +23,9 @@
+ // There are several flavors of conversions from/to RelativePointer:
+ // - GetValue/SetValue: The most common version. Assumes that the pointer is not NULL.
+ // - GetValueMaybeNull/SetValueMaybeNull: Pointer can be NULL.
+-// - GetValueAtPtr/SetValueAtPtr: Static version of GetValue/SetValue. It is
++// - GetValueAtPtr: Static version of GetValue. It is
+ // meant to simplify access to arrays of RelativePointers.
+-// - GetValueMaybeNullAtPtr/SetValueMaybeNullAtPtr
++// - GetValueMaybeNullAtPtr
+ template<typename PTR_TYPE>
+ class RelativePointer
+ {
+@@ -112,55 +112,33 @@ public:
+ return dac_cast<DPTR(RelativePointer<PTR_TYPE>)>(base)->GetValueMaybeNull(base);
+ }
+
+- // Set encoded value of the pointer. Assumes that the value is not NULL.
+- void SetValue(TADDR base, PTR_TYPE addr)
+- {
+- LIMITED_METHOD_CONTRACT;
+- PRECONDITION(addr != NULL);
+- m_delta = dac_cast<TADDR>(addr) - base;
+- }
+-
+ #ifndef DACCESS_COMPILE
+ // Set encoded value of the pointer. Assumes that the value is not NULL.
+- // Does not need explicit base and thus can be used in non-DAC builds only.
+ FORCEINLINE void SetValue(PTR_TYPE addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+- return SetValue((TADDR)this, addr);
+- }
+-#endif
+-
+- // Static version of SetValue. It is meant to simplify access to arrays of pointers.
+- FORCEINLINE static void SetValueAtPtr(TADDR base, PTR_TYPE addr)
+- {
+- LIMITED_METHOD_CONTRACT;
+- dac_cast<DPTR(RelativePointer<PTR_TYPE>)>(base)->SetValue(base, addr);
++ PRECONDITION(addr != NULL);
++ m_delta = (TADDR)addr - (TADDR)this;
+ }
+
+ // Set encoded value of the pointer. The value can be NULL.
+ void SetValueMaybeNull(TADDR base, PTR_TYPE addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+- if (addr == NULL) m_delta = NULL; else SetValue(base, addr);
++ if (addr == NULL)
++ m_delta = NULL;
++ else
++ m_delta = (TADDR)addr - (TADDR)base;
+ }
+
+-#ifndef DACCESS_COMPILE
+ // Set encoded value of the pointer. The value can be NULL.
+- // Does not need explicit base and thus can be used in non-DAC builds only.
+ FORCEINLINE void SetValueMaybeNull(PTR_TYPE addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+- return SetValueMaybeNull((TADDR)this, addr);
++ SetValueMaybeNull((TADDR)this, addr);
+ }
+ #endif
+
+- // Static version of SetValueMaybeNull. It is meant to simplify access to arrays of pointers.
+- FORCEINLINE static void SetValueMaybeNullAtPtr(TADDR base, PTR_TYPE addr)
+- {
+- LIMITED_METHOD_CONTRACT;
+- dac_cast<DPTR(RelativePointer<PTR_TYPE>)>(base)->SetValueMaybeNull(base, addr);
+- }
+-
+ #ifndef DACCESS_COMPILE
+ void BitwiseCopyTo(RelativePointer<PTR_TYPE> &dest) const
+ {
+@@ -347,55 +325,33 @@ public:
+ return dac_cast<DPTR(RelativeFixupPointer<PTR_TYPE>)>(base)->GetValueMaybeNull(base);
+ }
+
+- // Set encoded value of the pointer. Assumes that the value is not NULL.
+- void SetValue(TADDR base, PTR_TYPE addr)
+- {
+- LIMITED_METHOD_CONTRACT;
+- PRECONDITION(addr != NULL);
+- m_delta = dac_cast<TADDR>(addr) - base;
+- }
+-
+ #ifndef DACCESS_COMPILE
+ // Set encoded value of the pointer. Assumes that the value is not NULL.
+- // Does not need explicit base and thus can be used in non-DAC builds only.
+ FORCEINLINE void SetValue(PTR_TYPE addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+- return SetValue((TADDR)this, addr);
+- }
+-#endif
+-
+- // Static version of SetValue. It is meant to simplify access to arrays of pointers.
+- FORCEINLINE static void SetValueAtPtr(TADDR base, PTR_TYPE addr)
+- {
+- LIMITED_METHOD_CONTRACT;
+- dac_cast<DPTR(RelativeFixupPointer<PTR_TYPE>)>(base)->SetValue(base, addr);
++ PRECONDITION(addr != NULL);
++ m_delta = (TADDR)addr - (TADDR)this;
+ }
+
+ // Set encoded value of the pointer. The value can be NULL.
+ void SetValueMaybeNull(TADDR base, PTR_TYPE addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+- if (addr == NULL) m_delta = NULL; else SetValue(base, addr);
++ if (addr == NULL)
++ m_delta = NULL;
++ else
++ m_delta = (TADDR)addr - (TADDR)base;
+ }
+
+-#ifndef DACCESS_COMPILE
+ // Set encoded value of the pointer. The value can be NULL.
+- // Does not need explicit base and thus can be used in non-DAC builds only.
+ FORCEINLINE void SetValueMaybeNull(PTR_TYPE addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+- return SetValueMaybeNull((TADDR)this, addr);
++ SetValueMaybeNull((TADDR)this, addr);
+ }
+ #endif
+
+- // Static version of SetValueMaybeNull. It is meant to simplify access to arrays of pointers.
+- FORCEINLINE static void SetValueMaybeNullAtPtr(TADDR base, PTR_TYPE addr)
+- {
+- LIMITED_METHOD_CONTRACT;
+- dac_cast<DPTR(RelativeFixupPointer<PTR_TYPE>)>(base)->SetValueMaybeNull(base, addr);
+- }
+-
+ // Returns the pointer to the indirection cell.
+ PTR_TYPE * GetValuePtr(TADDR base) const
+ {
+diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
+index 6a1eb62..f995343 100644
+--- a/src/vm/ceeload.cpp
++++ b/src/vm/ceeload.cpp
+@@ -13708,7 +13708,10 @@ void LookupMapBase::CreateHotItemList(DataImage *image, CorProfileData *profileD
+ for (DWORD ii = 0; ii < numItems; ii++)
+ {
+ if (itemList[ii].value != NULL)
+- RelativePointer<TADDR>::SetValueMaybeNullAtPtr(dac_cast<TADDR>(&itemList[ii].value), itemList[ii].value);
++ {
++ RelativePointer<TADDR> *pRelPtr = (RelativePointer<TADDR> *)&itemList[ii].value;
++ pRelPtr->SetValueMaybeNull(itemList[ii].value);
++ }
+ }
+
+ if (itemList != NULL)
+diff --git a/src/vm/ceeload.h b/src/vm/ceeload.h
+index 2f3fe90..dc21eec 100644
+--- a/src/vm/ceeload.h
++++ b/src/vm/ceeload.h
+@@ -311,7 +311,10 @@ template <typename TYPE>
+ struct LookupMap : LookupMapBase
+ {
+ static TYPE GetValueAt(PTR_TADDR pValue, TADDR* pFlags, TADDR supportedFlags);
++
++#ifndef DACCESS_COMPILE
+ static void SetValueAt(PTR_TADDR pValue, TYPE value, TADDR flags);
++#endif // DACCESS_COMPILE
+
+ TYPE GetElement(DWORD rid, TADDR* pFlags);
+ void SetElement(DWORD rid, TYPE value, TADDR flags);
+@@ -368,6 +371,7 @@ public:
+ SetElement(rid, value, flags);
+ }
+
++#ifndef DACCESS_COMPILE
+ void AddFlag(DWORD rid, TADDR flag)
+ {
+ WRAPPER_NO_CONTRACT;
+@@ -388,6 +392,7 @@ public:
+ TYPE existingValue = GetValueAt(pElement, &existingFlags, supportedFlags);
+ SetValueAt(pElement, existingValue, existingFlags | flag);
+ }
++#endif // DACCESS_COMPILE
+
+ //
+ // Try to store an association in a map. Will never throw or fail.
+diff --git a/src/vm/ceeload.inl b/src/vm/ceeload.inl
+index 9184a74..8226dce 100644
+--- a/src/vm/ceeload.inl
++++ b/src/vm/ceeload.inl
+@@ -26,6 +26,8 @@ TYPE LookupMap<TYPE>::GetValueAt(PTR_TADDR pValue, TADDR* pFlags, TADDR supporte
+ return (TYPE)(dac_cast<TADDR>(value) & ~supportedFlags);
+ }
+
++#ifndef DACCESS_COMPILE
++
+ template<typename TYPE>
+ inline
+ void LookupMap<TYPE>::SetValueAt(PTR_TADDR pValue, TYPE value, TADDR flags)
+@@ -34,10 +36,10 @@ void LookupMap<TYPE>::SetValueAt(PTR_TADDR pValue, TYPE value, TADDR flags)
+
+ value = (TYPE)(dac_cast<TADDR>(value) | flags);
+
+- RelativePointer<TYPE>::SetValueAtPtr(dac_cast<TADDR>(pValue), value);
++ RelativePointer<TYPE> *pRelPtr = (RelativePointer<TYPE> *)pValue;
++ pRelPtr->SetValue(value);
+ }
+
+-#ifndef DACCESS_COMPILE
+ //
+ // Specialization of Get/SetValueAt methods to support maps of pointer-sized value types
+ //
+diff --git a/src/vm/class.h b/src/vm/class.h
+index e3ec0ba..6358624 100644
+--- a/src/vm/class.h
++++ b/src/vm/class.h
+@@ -1265,7 +1265,7 @@ public:
+ inline void SetFieldDescList (FieldDesc* pFieldDescList)
+ {
+ LIMITED_METHOD_CONTRACT;
+- m_pFieldDescList.SetValue(PTR_HOST_MEMBER_TADDR(EEClass, this, m_pFieldDescList), pFieldDescList);
++ m_pFieldDescList.SetValue(pFieldDescList);
+ }
+ #endif // !DACCESS_COMPILE
+
+@@ -1700,7 +1700,7 @@ public:
+ inline void SetChunks (MethodDescChunk* pChunks)
+ {
+ LIMITED_METHOD_CONTRACT;
+- m_pChunks.SetValueMaybeNull(PTR_HOST_MEMBER_TADDR(EEClass, this, m_pChunks), pChunks);
++ m_pChunks.SetValueMaybeNull(pChunks);
+ }
+ #endif // !DACCESS_COMPILE
+ void AddChunk (MethodDescChunk* pNewChunk);
+diff --git a/src/vm/classhash.cpp b/src/vm/classhash.cpp
+index 408a6e8..2ffc612 100644
+--- a/src/vm/classhash.cpp
++++ b/src/vm/classhash.cpp
+@@ -47,7 +47,7 @@ PTR_VOID EEClassHashEntry::GetData()
+ }
+
+ #ifndef DACCESS_COMPILE
+-void EEClassHashEntry::SetData(PTR_VOID data)
++void EEClassHashEntry::SetData(void *data)
+ {
+ CONTRACTL
+ {
+@@ -60,7 +60,10 @@ void EEClassHashEntry::SetData(PTR_VOID data)
+ // TypeHandles are encoded as a relative pointer rather than a regular pointer to avoid the need for image
+ // fixups (any TypeHandles in this hash are defined in the same module).
+ if (((TADDR)data & EECLASSHASH_TYPEHANDLE_DISCR) == 0)
+- RelativePointer<PTR_VOID>::SetValueMaybeNullAtPtr((TADDR)&m_Data, data);
++ {
++ RelativePointer<void *> *pRelPtr = (RelativePointer<void *> *) &m_Data;
++ pRelPtr->SetValueMaybeNull(data);
++ }
+ else
+ m_Data = data;
+ }
+diff --git a/src/vm/method.cpp b/src/vm/method.cpp
+index 34ae6d9..751ceac 100644
+--- a/src/vm/method.cpp
++++ b/src/vm/method.cpp
+@@ -2493,7 +2493,10 @@ void MethodDesc::Reset()
+ }
+
+ if (HasNativeCodeSlot())
+- NativeCodeSlot::SetValueMaybeNullAtPtr(GetAddrOfNativeCodeSlot(), NULL);
++ {
++ RelativePointer<TADDR> *pRelPtr = (RelativePointer<TADDR> *)GetAddrOfNativeCodeSlot();
++ pRelPtr->SetValueMaybeNull(NULL);
++ }
+ _ASSERTE(!HasNativeCode());
+ }
+
+diff --git a/src/vm/method.hpp b/src/vm/method.hpp
+index 3354e57..4ef6db0 100644
+--- a/src/vm/method.hpp
++++ b/src/vm/method.hpp
+@@ -2063,7 +2063,7 @@ public:
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(m_methodTable.IsNull());
+ _ASSERTE(pMT != NULL);
+- m_methodTable.SetValue(PTR_HOST_MEMBER_TADDR(MethodDescChunk, this, m_methodTable), pMT);
++ m_methodTable.SetValue(pMT);
+ }
+
+ inline void SetSizeAndCount(ULONG sizeOfMethodDescs, COUNT_T methodDescCount)
+diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
+index 2ce9f2a..93a9ae2 100644
+--- a/src/vm/methodtable.h
++++ b/src/vm/methodtable.h
+@@ -1671,12 +1671,13 @@ public:
+ }
+
+ #ifndef DACCESS_COMPILE
+- inline void SetNonVirtualSlotsArray(PTR_PCODE slots)
++ inline void SetNonVirtualSlotsArray(PCODE *slots)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasNonVirtualSlotsArray());
+-
+- RelativePointer<PTR_PCODE>::SetValueAtPtr(GetNonVirtualSlotsPtr(), slots);
++
++ RelativePointer<PCODE *> *pRelPtr = (RelativePointer<PCODE *> *)GetNonVirtualSlotsPtr();
++ pRelPtr->SetValue(slots);
+ }
+
+ inline void SetHasSingleNonVirtualSlot()
+@@ -2456,7 +2457,9 @@ public:
+ _ASSERTE(HasDispatchMapSlot());
+
+ TADDR pSlot = GetMultipurposeSlotPtr(enum_flag_HasDispatchMapSlot, c_DispatchMapSlotOffsets);
+- RelativePointer<PTR_DispatchMap>::SetValueAtPtr(pSlot, pDispatchMap);
++
++ RelativePointer<DispatchMap *> *pRelPtr = (RelativePointer<DispatchMap *> *)pSlot;
++ pRelPtr->SetValue(pDispatchMap);
+ }
+ #endif // !DACCESS_COMPILE
+
+--
+2.7.4
+
diff --git a/packaging/0006-Remove-relocations-from-SECTION_MethodDesc-for-ngene.patch b/packaging/0006-Remove-relocations-from-SECTION_MethodDesc-for-ngene.patch
new file mode 100644
index 0000000000..f7511ef78a
--- /dev/null
+++ b/packaging/0006-Remove-relocations-from-SECTION_MethodDesc-for-ngene.patch
@@ -0,0 +1,1017 @@
+From 38dd83da084cd357b9bce97a2a096b55232b57fc Mon Sep 17 00:00:00 2001
+From: gbalykov <g.balykov@samsung.com>
+Date: Wed, 31 May 2017 04:25:04 +0300
+Subject: [PATCH 06/32] Remove relocations from SECTION_MethodDesc for ngened
+ images (#11394)
+
+---
+ src/debug/daccess/nidump.cpp | 51 +++++++++++------------
+ src/vm/dllimport.cpp | 4 +-
+ src/vm/dynamicmethod.cpp | 14 +++----
+ src/vm/genmeth.cpp | 4 +-
+ src/vm/ilstubcache.cpp | 12 +++---
+ src/vm/method.cpp | 33 +++++++--------
+ src/vm/method.hpp | 94 ++++++++++++++++++++++++++++---------------
+ src/vm/methodimpl.cpp | 69 ++++++++++++++++++-------------
+ src/vm/methodimpl.h | 45 +++++++++++++--------
+ src/vm/methodtablebuilder.cpp | 24 +++++------
+ src/vm/methodtablebuilder.h | 8 ++--
+ 11 files changed, 205 insertions(+), 153 deletions(-)
+
+diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
+index 77c05b5..5e8302f 100644
+--- a/src/debug/daccess/nidump.cpp
++++ b/src/debug/daccess/nidump.cpp
+@@ -7829,20 +7829,20 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ MethodImpl, METHODDESCS);
+
+ }
+- _ASSERTE(impl->pImplementedMD == NULL
+- || isInRange(PTR_TO_TADDR(impl->pImplementedMD)));
+- if ((impl->pImplementedMD != NULL) &&
+- isInRange(PTR_TO_TADDR(impl->pImplementedMD)))
++ _ASSERTE(impl->pImplementedMD.IsNull()
++ || isInRange(PTR_TO_TADDR(impl->GetImpMDsNonNull())));
++ if (!impl->pImplementedMD.IsNull() &&
++ isInRange(PTR_TO_TADDR(impl->GetImpMDsNonNull())))
+ {
+ DisplayWriteFieldAddress( pImplementedMD,
+- DataPtrToDisplay(dac_cast<TADDR>(impl->pImplementedMD)),
+- numSlots * sizeof(MethodDesc*),
++ DataPtrToDisplay(dac_cast<TADDR>(impl->GetImpMDsNonNull())),
++ numSlots * sizeof(RelativePointer <MethodDesc*>),
+ MethodImpl, METHODDESCS );
+ }
+ else
+ {
+ DisplayWriteFieldPointer( pImplementedMD,
+- DataPtrToDisplay(dac_cast<TADDR>(impl->pImplementedMD)),
++ DataPtrToDisplay(dac_cast<TADDR>(impl->GetImpMDs())),
+ MethodImpl, METHODDESCS );
+ }
+ DisplayEndVStructure( METHODDESCS );
+@@ -7852,19 +7852,19 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ DisplayStartVStructure( "StoredSigMethodDesc", METHODDESCS );
+ PTR_StoredSigMethodDesc ssmd(md);
+ //display signature information.
+- if( isInRange(ssmd->m_pSig) )
++ if( isInRange(ssmd->GetSigRVA()) )
+ {
+- DisplayWriteFieldAddress(m_pSig, DataPtrToDisplay(ssmd->m_pSig),
++ DisplayWriteFieldAddress(m_pSig, DataPtrToDisplay(ssmd->GetSigRVA()),
+ ssmd->m_cSig, StoredSigMethodDesc,
+ METHODDESCS);
+ }
+ else
+ {
+- DisplayWriteFieldPointer(m_pSig, DataPtrToDisplay(ssmd->m_pSig),
++ DisplayWriteFieldPointer(m_pSig, DataPtrToDisplay(ssmd->GetSigRVA()),
+ StoredSigMethodDesc, METHODDESCS);
+
+ }
+- CoverageRead(TO_TADDR(ssmd->m_pSig), ssmd->m_cSig);
++ CoverageRead(TO_TADDR(ssmd->GetSigRVA()), ssmd->m_cSig);
+ DisplayWriteFieldInt( m_cSig, ssmd->m_cSig,
+ StoredSigMethodDesc, METHODDESCS );
+ #ifdef _WIN64
+@@ -7880,10 +7880,10 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ {
+ PTR_DynamicMethodDesc dmd(md);
+ DisplayStartVStructure( "DynamicMethodDesc", METHODDESCS );
+- WriteFieldStr( m_pszMethodName, PTR_BYTE(dmd->m_pszMethodName),
++ WriteFieldStr( m_pszMethodName, PTR_BYTE(dmd->GetMethodName()),
+ DynamicMethodDesc, METHODDESCS );
+ if( !CHECK_OPT(METHODDESCS) )
+- CoverageReadString( PTR_TO_TADDR(dmd->m_pszMethodName) );
++ CoverageReadString( PTR_TO_TADDR(dmd->GetMethodName()) );
+ DisplayWriteFieldPointer( m_pResolver,
+ DPtrToPreferredAddr(dmd->m_pResolver),
+ DynamicMethodDesc, METHODDESCS );
+@@ -7927,10 +7927,10 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ METHODDESCS );
+
+ WriteFieldStr( m_pszEntrypointName,
+- PTR_BYTE(TO_TADDR(nd->m_pszEntrypointName)),
++ PTR_BYTE(dac_cast<TADDR>(ndmd->GetEntrypointName())),
+ NDirectMethodDesc::temp1, METHODDESCS );
+ if( !CHECK_OPT(METHODDESCS) )
+- CoverageReadString(TO_TADDR(nd->m_pszEntrypointName));
++ CoverageReadString(dac_cast<TADDR>(ndmd->GetEntrypointName()));
+ if (md->IsQCall())
+ {
+ DisplayWriteFieldInt( m_dwECallID,
+@@ -7941,11 +7941,11 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ else
+ {
+ WriteFieldStr( m_pszLibName,
+- PTR_BYTE(TO_TADDR(nd->m_pszLibName)),
++ PTR_BYTE(dac_cast<TADDR>(ndmd->GetLibNameRaw())),
+ NDirectMethodDesc::temp1, METHODDESCS );
+ }
+ if( !CHECK_OPT(METHODDESCS) )
+- CoverageReadString( TO_TADDR(nd->m_pszLibName) );
++ CoverageReadString( dac_cast<TADDR>(ndmd->GetLibNameRaw()) );
+
+ PTR_NDirectWriteableData wnd( nd->m_pWriteableData );
+ DisplayStartStructureWithOffset( m_pWriteableData,
+@@ -7959,12 +7959,7 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ CoverageRead( PTR_TO_TADDR(wnd), sizeof(*wnd) );
+ DisplayEndStructure( METHODDESCS ); //m_pWriteableData
+
+-
+-#ifdef HAS_NDIRECT_IMPORT_PRECODE
+- PTR_NDirectImportThunkGlue glue(nd->m_pImportThunkGlue);
+-#else
+- PTR_NDirectImportThunkGlue glue(PTR_HOST_MEMBER_TADDR(NDirectMethodDesc::temp1, nd, m_ImportThunkGlue));
+-#endif
++ PTR_NDirectImportThunkGlue glue(ndmd->GetNDirectImportThunkGlue());
+
+ #ifdef HAS_NDIRECT_IMPORT_PRECODE
+ if (glue == NULL)
+@@ -8065,7 +8060,7 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ & InstantiatedMethodDesc::KindMask;
+ if( kind == InstantiatedMethodDesc::SharedMethodInstantiation )
+ {
+- PTR_DictionaryLayout layout(TO_TADDR(imd->m_pDictLayout));
++ PTR_DictionaryLayout layout(dac_cast<TADDR>(imd->GetDictLayoutRaw()));
+ IF_OPT(METHODDESCS)
+ {
+ WriteFieldDictionaryLayout( "m_pDictLayout",
+@@ -8089,7 +8084,7 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ else if( kind ==
+ InstantiatedMethodDesc::WrapperStubWithInstantiations )
+ {
+- PTR_MethodDesc wimd(imd->m_pWrappedMethodDesc.GetValue());
++ PTR_MethodDesc wimd(imd->IMD_GetWrappedMethodDesc());
+ if( wimd == NULL || !DoWriteFieldAsFixup( "m_pWrappedMethodDesc",
+ offsetof(InstantiatedMethodDesc, m_pWrappedMethodDesc),
+ fieldsize(InstantiatedMethodDesc, m_pWrappedMethodDesc),
+@@ -8101,7 +8096,7 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ }
+ else
+ {
+- _ASSERTE(imd->m_pDictLayout == NULL);
++ _ASSERTE(imd->m_pDictLayout.IsNull());
+ DisplayWriteFieldPointer( m_pDictLayout, NULL,
+ InstantiatedMethodDesc,
+ METHODDESCS );
+@@ -8117,7 +8112,7 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ else if( kind == InstantiatedMethodDesc::WrapperStubWithInstantiations )
+ {
+ PTR_InstantiatedMethodDesc wrapped =
+- PTR_InstantiatedMethodDesc(imd->m_pWrappedMethodDesc.GetValue());
++ PTR_InstantiatedMethodDesc(imd->IMD_GetWrappedMethodDesc());
+ if( CORCOMPILE_IS_POINTER_TAGGED(PTR_TO_TADDR(wrapped)) )
+ {
+ /* XXX Mon 03/27/2006
+@@ -8131,7 +8126,7 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ else
+ {
+ PTR_DictionaryLayout layout(wrapped->IsSharedByGenericMethodInstantiations()
+- ? TO_TADDR(wrapped->m_pDictLayout) : NULL );
++ ? dac_cast<TADDR>(wrapped->GetDictLayoutRaw()) : NULL );
+ dictSize = DictionaryLayout::GetFirstDictionaryBucketSize(imd->GetNumGenericMethodArgs(),
+ layout);
+ }
+diff --git a/src/vm/dllimport.cpp b/src/vm/dllimport.cpp
+index b58ac56..cf546cd 100644
+--- a/src/vm/dllimport.cpp
++++ b/src/vm/dllimport.cpp
+@@ -4368,8 +4368,8 @@ void NDirect::PopulateNDirectMethodDesc(NDirectMethodDesc* pNMD, PInvokeStaticSi
+ else
+ {
+ EnsureWritablePages(&pNMD->ndirect);
+- pNMD->ndirect.m_pszLibName = szLibName;
+- pNMD->ndirect.m_pszEntrypointName = szEntryPointName;
++ pNMD->ndirect.m_pszLibName.SetValueMaybeNull(szLibName);
++ pNMD->ndirect.m_pszEntrypointName.SetValueMaybeNull(szEntryPointName);
+ }
+
+ #ifdef _TARGET_X86_
+diff --git a/src/vm/dynamicmethod.cpp b/src/vm/dynamicmethod.cpp
+index 2a61f97..43f4c69 100644
+--- a/src/vm/dynamicmethod.cpp
++++ b/src/vm/dynamicmethod.cpp
+@@ -272,7 +272,7 @@ DynamicMethodDesc* DynamicMethodTable::GetDynamicMethod(BYTE *psig, DWORD sigSiz
+ // the store sig part of the method desc
+ pNewMD->SetStoredMethodSig((PCCOR_SIGNATURE)psig, sigSize);
+ // the dynamic part of the method desc
+- pNewMD->m_pszMethodName = name;
++ pNewMD->m_pszMethodName.SetValueMaybeNull(name);
+
+ pNewMD->m_dwExtendedFlags = mdPublic | mdStatic | DynamicMethodDesc::nomdLCGMethod;
+
+@@ -884,16 +884,16 @@ void DynamicMethodDesc::Destroy(BOOL fDomainUnload)
+ LoaderAllocator *pLoaderAllocator = GetLoaderAllocatorForCode();
+
+ LOG((LF_BCL, LL_INFO1000, "Level3 - Destroying DynamicMethod {0x%p}\n", this));
+- if (m_pSig)
++ if (!m_pSig.IsNull())
+ {
+- delete[] (BYTE*)m_pSig;
+- m_pSig = NULL;
++ delete[] (BYTE*)m_pSig.GetValue();
++ m_pSig.SetValueMaybeNull(NULL);
+ }
+ m_cSig = 0;
+- if (m_pszMethodName)
++ if (!m_pszMethodName.IsNull())
+ {
+- delete[] m_pszMethodName;
+- m_pszMethodName = NULL;
++ delete[] m_pszMethodName.GetValue();
++ m_pszMethodName.SetValueMaybeNull(NULL);
+ }
+
+ GetLCGMethodResolver()->Destroy(fDomainUnload);
+diff --git a/src/vm/genmeth.cpp b/src/vm/genmeth.cpp
+index c50f806..d5b435b 100644
+--- a/src/vm/genmeth.cpp
++++ b/src/vm/genmeth.cpp
+@@ -465,7 +465,7 @@ InstantiatedMethodDesc::NewInstantiatedMethodDesc(MethodTable *pExactMT,
+ {
+ if (pWrappedMD->IsSharedByGenericMethodInstantiations())
+ {
+- pDL = pWrappedMD->AsInstantiatedMethodDesc()->m_pDictLayout;
++ pDL = pWrappedMD->AsInstantiatedMethodDesc()->GetDictLayoutRaw();
+ }
+ }
+ else if (getWrappedCode)
+@@ -1576,7 +1576,7 @@ void InstantiatedMethodDesc::SetupSharedMethodInstantiation(DWORD numGenericArgs
+ _ASSERTE(FitsIn<WORD>(numGenericArgs));
+ m_wNumGenericArgs = static_cast<WORD>(numGenericArgs);
+
+- m_pDictLayout = pDL;
++ m_pDictLayout.SetValueMaybeNull(pDL);
+
+
+ _ASSERTE(IMD_IsSharedByGenericMethodInstantiations());
+diff --git a/src/vm/ilstubcache.cpp b/src/vm/ilstubcache.cpp
+index ff6bdc0..3651715 100644
+--- a/src/vm/ilstubcache.cpp
++++ b/src/vm/ilstubcache.cpp
+@@ -167,7 +167,7 @@ MethodDesc* ILStubCache::CreateNewMethodDesc(LoaderHeap* pCreationHeap, MethodTa
+ pMD->SetMemberDef(0);
+ pMD->SetSlot(MethodTable::NO_SLOT); // we can't ever use the slot for dynamic methods
+ // the no metadata part of the method desc
+- pMD->m_pszMethodName = (PTR_CUTF8)"IL_STUB";
++ pMD->m_pszMethodName.SetValue((PTR_CUTF8)"IL_STUB");
+ pMD->m_dwExtendedFlags = mdPublic | DynamicMethodDesc::nomdILStub;
+
+ pMD->SetTemporaryEntryPoint(pMT->GetLoaderAllocator(), pamTracker);
+@@ -292,11 +292,11 @@ MethodDesc* ILStubCache::CreateNewMethodDesc(LoaderHeap* pCreationHeap, MethodTa
+ {
+ switch(dwStubFlags)
+ {
+- case ILSTUB_ARRAYOP_GET: pMD->m_pszMethodName = (PTR_CUTF8)"IL_STUB_Array_Get";
++ case ILSTUB_ARRAYOP_GET: pMD->m_pszMethodName.SetValue((PTR_CUTF8)"IL_STUB_Array_Get");
+ break;
+- case ILSTUB_ARRAYOP_SET: pMD->m_pszMethodName = (PTR_CUTF8)"IL_STUB_Array_Set";
++ case ILSTUB_ARRAYOP_SET: pMD->m_pszMethodName.SetValue((PTR_CUTF8)"IL_STUB_Array_Set");
+ break;
+- case ILSTUB_ARRAYOP_ADDRESS: pMD->m_pszMethodName = (PTR_CUTF8)"IL_STUB_Array_Address";
++ case ILSTUB_ARRAYOP_ADDRESS: pMD->m_pszMethodName.SetValue((PTR_CUTF8)"IL_STUB_Array_Address");
+ break;
+ default: _ASSERTE(!"Unknown array il stub");
+ }
+@@ -304,12 +304,12 @@ MethodDesc* ILStubCache::CreateNewMethodDesc(LoaderHeap* pCreationHeap, MethodTa
+ else
+ #endif
+ {
+- pMD->m_pszMethodName = pMD->GetILStubResolver()->GetStubMethodName();
++ pMD->m_pszMethodName.SetValue(pMD->GetILStubResolver()->GetStubMethodName());
+ }
+
+
+ #ifdef _DEBUG
+- pMD->m_pszDebugMethodName = pMD->m_pszMethodName;
++ pMD->m_pszDebugMethodName = RelativePointer<PTR_CUTF8>::GetValueAtPtr(PTR_HOST_MEMBER_TADDR(DynamicMethodDesc, pMD, m_pszMethodName));
+ pMD->m_pszDebugClassName = ILStubResolver::GetStubClassName(pMD); // must be called after type is set
+ pMD->m_pszDebugMethodSignature = FormatSig(pMD, pCreationHeap, pamTracker);
+ pMD->m_pDebugMethodTable.SetValue(pMT);
+diff --git a/src/vm/method.cpp b/src/vm/method.cpp
+index 751ceac..c8c1b9f 100644
+--- a/src/vm/method.cpp
++++ b/src/vm/method.cpp
+@@ -2919,7 +2919,7 @@ void MethodDesc::Save(DataImage *image)
+
+ if (pNewSMD->HasStoredMethodSig())
+ {
+- if (!image->IsStored((void *) pNewSMD->m_pSig))
++ if (!image->IsStored((void *) pNewSMD->m_pSig.GetValueMaybeNull()))
+ {
+ // Store signatures that doesn't need restore into a read only section.
+ DataImage::ItemKind sigItemKind = DataImage::ITEM_STORED_METHOD_SIG_READONLY;
+@@ -2935,7 +2935,7 @@ void MethodDesc::Save(DataImage *image)
+ }
+
+ if (FixupSignatureContainingInternalTypes(image,
+- (PCCOR_SIGNATURE)pNewSMD->m_pSig,
++ (PCCOR_SIGNATURE) pNewSMD->m_pSig.GetValueMaybeNull(),
+ pNewSMD->m_cSig,
+ true /*checkOnly if we will need to restore the signature without doing fixup*/))
+ {
+@@ -2943,7 +2943,7 @@ void MethodDesc::Save(DataImage *image)
+ }
+ }
+
+- image->StoreInternedStructure((void *) pNewSMD->m_pSig,
++ image->StoreInternedStructure((void *) pNewSMD->m_pSig.GetValueMaybeNull(),
+ pNewSMD->m_cSig,
+ sigItemKind,
+ 1);
+@@ -2964,9 +2964,9 @@ void MethodDesc::Save(DataImage *image)
+ if (HasMethodInstantiation())
+ {
+ InstantiatedMethodDesc* pIMD = AsInstantiatedMethodDesc();
+- if (pIMD->IMD_IsSharedByGenericMethodInstantiations() && pIMD->m_pDictLayout != NULL)
++ if (pIMD->IMD_IsSharedByGenericMethodInstantiations() && !pIMD->m_pDictLayout.IsNull())
+ {
+- pIMD->m_pDictLayout->Save(image);
++ pIMD->m_pDictLayout.GetValue()->Save(image);
+ }
+ }
+ if (IsNDirect())
+@@ -3042,9 +3042,10 @@ void MethodDesc::Save(DataImage *image)
+ if (IsDynamicMethod())
+ {
+ DynamicMethodDesc *pDynMeth = AsDynamicMethodDesc();
+- if (pDynMeth->m_pszMethodName && !image->IsStored(pDynMeth->m_pszMethodName))
+- image->StoreStructure((void *) pDynMeth->m_pszMethodName,
+- (ULONG)(strlen(pDynMeth->m_pszMethodName) + 1),
++ if (!pDynMeth->m_pszMethodName.IsNull()
++ && !image->IsStored(pDynMeth->m_pszMethodName.GetValue()))
++ image->StoreStructure((void *) pDynMeth->m_pszMethodName.GetValue(),
++ (ULONG)(strlen(pDynMeth->m_pszMethodName.GetValue()) + 1),
+ DataImage::ITEM_STORED_METHOD_NAME,
+ 1);
+ }
+@@ -3629,7 +3630,7 @@ MethodDesc::Fixup(
+ if (IsDynamicMethod())
+ {
+ image->ZeroPointerField(this, offsetof(DynamicMethodDesc, m_pResolver));
+- image->FixupPointerField(this, offsetof(DynamicMethodDesc, m_pszMethodName));
++ image->FixupRelativePointerField(this, offsetof(DynamicMethodDesc, m_pszMethodName));
+ }
+
+ if (GetClassification() == mcInstantiated)
+@@ -3645,8 +3646,8 @@ MethodDesc::Fixup(
+ {
+ if (pIMD->IMD_IsSharedByGenericMethodInstantiations())
+ {
+- pIMD->m_pDictLayout->Fixup(image, TRUE);
+- image->FixupPointerField(this, offsetof(InstantiatedMethodDesc, m_pDictLayout));
++ pIMD->m_pDictLayout.GetValue()->Fixup(image, TRUE);
++ image->FixupRelativePointerField(this, offsetof(InstantiatedMethodDesc, m_pDictLayout));
+ }
+ }
+
+@@ -3754,7 +3755,7 @@ MethodDesc::Fixup(
+ if (!pNMD->MarshalingRequired())
+ {
+ // import thunk is only needed if the P/Invoke is inlinable
+- image->FixupPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pImportThunkGlue));
++ image->FixupRelativePointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pImportThunkGlue));
+ ((Precode*)pImportThunkGlue)->Fixup(image, this);
+ }
+ else
+@@ -3767,8 +3768,8 @@ MethodDesc::Fixup(
+
+ if (!IsQCall())
+ {
+- image->FixupPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pszLibName));
+- image->FixupPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pszEntrypointName));
++ image->FixupRelativePointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pszLibName));
++ image->FixupRelativePointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pszEntrypointName));
+ }
+
+ if (image->IsStored(pNMD->ndirect.m_pStubMD.GetValueMaybeNull()))
+@@ -3779,7 +3780,7 @@ MethodDesc::Fixup(
+
+ if (HasStoredSig())
+ {
+- image->FixupPointerField(this, offsetof(StoredSigMethodDesc, m_pSig));
++ image->FixupRelativePointerField(this, offsetof(StoredSigMethodDesc, m_pSig));
+
+ // The DynamicMethodDescs used for IL stubs may have a signature that refers to
+ // runtime types using ELEMENT_TYPE_INTERNAL. We need to fixup these types here.
+@@ -5512,7 +5513,7 @@ StoredSigMethodDesc::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ {
+ SUPPORTS_DAC;
+ // 'this' already done, see below.
+- DacEnumMemoryRegion(m_pSig, m_cSig);
++ DacEnumMemoryRegion(GetSigRVA(), m_cSig);
+ }
+
+ //*******************************************************************************
+diff --git a/src/vm/method.hpp b/src/vm/method.hpp
+index 4ef6db0..41695df 100644
+--- a/src/vm/method.hpp
++++ b/src/vm/method.hpp
+@@ -2215,7 +2215,7 @@ class StoredSigMethodDesc : public MethodDesc
+ // Put the sig RVA in here - this allows us to avoid
+ // touching the method desc table when mscorlib is prejitted.
+
+- TADDR m_pSig;
++ RelativePointer<TADDR> m_pSig;
+ DWORD m_cSig;
+ #ifdef _WIN64
+ // m_dwExtendedFlags is not used by StoredSigMethodDesc itself.
+@@ -2224,10 +2224,16 @@ class StoredSigMethodDesc : public MethodDesc
+ DWORD m_dwExtendedFlags;
+ #endif
+
++ TADDR GetSigRVA()
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return RelativePointer<TADDR>::GetValueMaybeNullAtPtr(PTR_HOST_MEMBER_TADDR(StoredSigMethodDesc, this, m_pSig));
++ }
++
+ bool HasStoredMethodSig(void)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+- return m_pSig != 0;
++ return !m_pSig.IsNull();
+ }
+ PCCOR_SIGNATURE GetStoredMethodSig(DWORD* sigLen = NULL)
+ {
+@@ -2238,16 +2244,16 @@ class StoredSigMethodDesc : public MethodDesc
+ }
+ #ifdef DACCESS_COMPILE
+ return (PCCOR_SIGNATURE)
+- DacInstantiateTypeByAddress(m_pSig, m_cSig, true);
++ DacInstantiateTypeByAddress(GetSigRVA(), m_cSig, true);
+ #else // !DACCESS_COMPILE
+ g_IBCLogger.LogNDirectCodeAccess(this);
+- return (PCCOR_SIGNATURE)m_pSig;
++ return (PCCOR_SIGNATURE) m_pSig.GetValueMaybeNull();
+ #endif // !DACCESS_COMPILE
+ }
+ void SetStoredMethodSig(PCCOR_SIGNATURE sig, DWORD sigBytes)
+ {
+ #ifndef DACCESS_COMPILE
+- m_pSig = (TADDR)sig;
++ m_pSig.SetValueMaybeNull((TADDR)sig);
+ m_cSig = sigBytes;
+ #endif // !DACCESS_COMPILE
+ }
+@@ -2307,7 +2313,7 @@ class DynamicMethodDesc : public StoredSigMethodDesc
+ #endif
+
+ protected:
+- PTR_CUTF8 m_pszMethodName;
++ RelativePointer<PTR_CUTF8> m_pszMethodName;
+ PTR_DynamicResolver m_pResolver;
+
+ #ifndef _WIN64
+@@ -2348,7 +2354,11 @@ public:
+ inline PTR_LCGMethodResolver GetLCGMethodResolver();
+ inline PTR_ILStubResolver GetILStubResolver();
+
+- PTR_CUTF8 GetMethodName() { LIMITED_METHOD_DAC_CONTRACT; return m_pszMethodName; }
++ PTR_CUTF8 GetMethodName()
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return RelativePointer<PTR_CUTF8>::GetValueMaybeNullAtPtr(PTR_HOST_MEMBER_TADDR(DynamicMethodDesc, this, m_pszMethodName));
++ }
+
+ WORD GetAttrs()
+ {
+@@ -2571,11 +2581,11 @@ public:
+ LPVOID m_pNativeNDirectTarget;
+
+ // Information about the entrypoint
+- LPCUTF8 m_pszEntrypointName;
++ RelativePointer<PTR_CUTF8> m_pszEntrypointName;
+
+ union
+ {
+- LPCUTF8 m_pszLibName;
++ RelativePointer<PTR_CUTF8> m_pszLibName;
+ DWORD m_dwECallID; // ECallID for QCalls
+ };
+
+@@ -2583,7 +2593,7 @@ public:
+ PTR_NDirectWriteableData m_pWriteableData;
+
+ #ifdef HAS_NDIRECT_IMPORT_PRECODE
+- PTR_NDirectImportThunkGlue m_pImportThunkGlue;
++ RelativePointer<PTR_NDirectImportThunkGlue> m_pImportThunkGlue;
+ #else // HAS_NDIRECT_IMPORT_PRECODE
+ NDirectImportThunkGlue m_ImportThunkGlue;
+ #endif // HAS_NDIRECT_IMPORT_PRECODE
+@@ -2706,18 +2716,27 @@ public:
+ ndirect.m_dwECallID = dwID;
+ }
+
++ PTR_CUTF8 GetLibNameRaw()
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return RelativePointer<PTR_CUTF8>::GetValueMaybeNullAtPtr(PTR_HOST_MEMBER_TADDR(NDirectMethodDesc, this, ndirect.m_pszLibName));
++ }
++
++#ifndef DACCESS_COMPILE
+ LPCUTF8 GetLibName() const
+ {
+ LIMITED_METHOD_CONTRACT;
+
+- return IsQCall() ? "QCall" : ndirect.m_pszLibName;
++ return IsQCall() ? "QCall" : ndirect.m_pszLibName.GetValueMaybeNull();
+ }
++#endif // !DACCESS_COMPILE
+
+- LPCUTF8 GetEntrypointName() const
++ PTR_CUTF8 GetEntrypointName() const
+ {
+- LIMITED_METHOD_CONTRACT;
++ LIMITED_METHOD_DAC_CONTRACT;
+
+- return ndirect.m_pszEntrypointName;
++ return RelativePointer<PTR_CUTF8>::GetValueMaybeNullAtPtr(PTR_HOST_MEMBER_TADDR(NDirectMethodDesc, this, ndirect.m_pszEntrypointName));
+ }
+
+ BOOL IsVarArgs() const
+@@ -2800,14 +2819,16 @@ public:
+ return ndirect.m_pWriteableData;
+ }
+
+- NDirectImportThunkGlue* GetNDirectImportThunkGlue()
++ PTR_NDirectImportThunkGlue GetNDirectImportThunkGlue()
+ {
+- LIMITED_METHOD_CONTRACT;
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ TADDR base = PTR_HOST_MEMBER_TADDR(NDirectMethodDesc, this, ndirect.m_pImportThunkGlue);
+
+ #ifdef HAS_NDIRECT_IMPORT_PRECODE
+- return ndirect.m_pImportThunkGlue;
++ return RelativePointer<PTR_NDirectImportThunkGlue>::GetValueAtPtr(base);
+ #else
+- return &ndirect.m_ImportThunkGlue;
++ return dac_cast<PTR_NDirectImportThunkGlue>(base);
+ #endif
+ }
+
+@@ -3281,28 +3302,37 @@ public:
+ }
+ #endif // FEATURE_COMINTEROP
+
++ PTR_DictionaryLayout GetDictLayoutRaw()
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return RelativePointer<PTR_DictionaryLayout>::GetValueMaybeNullAtPtr(PTR_HOST_MEMBER_TADDR(InstantiatedMethodDesc, this, m_pDictLayout));
++ }
++
++ PTR_MethodDesc IMD_GetWrappedMethodDesc()
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ _ASSERTE(IMD_IsWrapperStubWithInstantiations());
++ return RelativePointer<PTR_MethodDesc>::GetValueAtPtr(PTR_HOST_MEMBER_TADDR(InstantiatedMethodDesc, this, m_pWrappedMethodDesc));
++ }
++
++#ifndef DACCESS_COMPILE
+ // Get the dictionary layout, if there is one
+ DictionaryLayout* IMD_GetDictionaryLayout()
+ {
+ WRAPPER_NO_CONTRACT;
+ if (IMD_IsWrapperStubWithInstantiations() && IMD_HasMethodInstantiation())
+- return IMD_GetWrappedMethodDesc()->AsInstantiatedMethodDesc()->m_pDictLayout;
++ {
++ InstantiatedMethodDesc* pIMD = IMD_GetWrappedMethodDesc()->AsInstantiatedMethodDesc();
++ return pIMD->m_pDictLayout.GetValueMaybeNull();
++ }
+ else
+ if (IMD_IsSharedByGenericMethodInstantiations())
+- return m_pDictLayout;
++ return m_pDictLayout.GetValueMaybeNull();
+ else
+ return NULL;
+ }
+-
+- MethodDesc* IMD_GetWrappedMethodDesc()
+- {
+- LIMITED_METHOD_CONTRACT;
+-
+- _ASSERTE(IMD_IsWrapperStubWithInstantiations());
+- return m_pWrappedMethodDesc.GetValue();
+- }
+-
+-
++#endif // !DACCESS_COMPILE
+
+ // Setup the IMD as shared code
+ void SetupSharedMethodInstantiation(DWORD numGenericArgs, TypeHandle *pPerInstInfo, DictionaryLayout *pDL);
+@@ -3349,9 +3379,9 @@ private:
+
+ friend class MethodDesc; // this fields are currently accessed by MethodDesc::Save/Restore etc.
+ union {
+- DictionaryLayout * m_pDictLayout; //SharedMethodInstantiation
++ RelativePointer<PTR_DictionaryLayout> m_pDictLayout; //SharedMethodInstantiation
+
+- FixupPointer<PTR_MethodDesc> m_pWrappedMethodDesc; // For WrapperStubWithInstantiations
++ RelativeFixupPointer<PTR_MethodDesc> m_pWrappedMethodDesc; // For WrapperStubWithInstantiations
+ };
+
+ public: // <TODO>make private: JITinterface.cpp accesses through this </TODO>
+diff --git a/src/vm/methodimpl.cpp b/src/vm/methodimpl.cpp
+index 1779c2d..c685e1c 100644
+--- a/src/vm/methodimpl.cpp
++++ b/src/vm/methodimpl.cpp
+@@ -72,7 +72,10 @@ PTR_MethodDesc MethodImpl::FindMethodDesc(DWORD slot, PTR_MethodDesc defaultRetu
+ return defaultReturn;
+ }
+
+- PTR_MethodDesc result = pImplementedMD[slotIndex]; // The method descs are not offset by one
++ DPTR(RelativePointer<PTR_MethodDesc>) pRelPtrForSlot = GetImpMDsNonNull();
++ // The method descs are not offset by one
++ TADDR base = dac_cast<TADDR>(pRelPtrForSlot) + slotIndex * sizeof(RelativePointer<MethodDesc *>);
++ PTR_MethodDesc result = RelativePointer<PTR_MethodDesc>::GetValueMaybeNullAtPtr(base);
+
+ // Prejitted images may leave NULL in this table if
+ // the methoddesc is declared in another module.
+@@ -98,13 +101,13 @@ MethodDesc *MethodImpl::RestoreSlot(DWORD index, MethodTable *pMT)
+ NOTHROW;
+ GC_NOTRIGGER;
+ FORBID_FAULT;
+- PRECONDITION(CheckPointer(pdwSlots));
++ PRECONDITION(!pdwSlots.IsNull());
+ }
+ CONTRACTL_END
+
+ MethodDesc *result;
+
+- PREFIX_ASSUME(pdwSlots != NULL);
++ PREFIX_ASSUME(!pdwSlots.IsNull());
+ DWORD slot = GetSlots()[index];
+
+ // Since the overridden method is in a different module, we
+@@ -126,8 +129,9 @@ MethodDesc *MethodImpl::RestoreSlot(DWORD index, MethodTable *pMT)
+ _ASSERTE(result != NULL);
+
+ // Don't worry about races since we would all be setting the same result
+- if (EnsureWritableExecutablePagesNoThrow(&pImplementedMD[index], sizeof(pImplementedMD[index])))
+- pImplementedMD[index] = result;
++ if (EnsureWritableExecutablePagesNoThrow(&pImplementedMD.GetValue()[index],
++ sizeof(pImplementedMD.GetValue()[index])))
++ pImplementedMD.GetValue()[index].SetValue(result);
+
+ return result;
+ }
+@@ -139,7 +143,7 @@ void MethodImpl::SetSize(LoaderHeap *pHeap, AllocMemTracker *pamTracker, DWORD s
+ THROWS;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+- PRECONDITION(pdwSlots==NULL && pImplementedMD==NULL);
++ PRECONDITION(pdwSlots.GetValueMaybeNull()==NULL && pImplementedMD.GetValueMaybeNull()==NULL);
+ INJECT_FAULT(ThrowOutOfMemory());
+ } CONTRACTL_END;
+
+@@ -149,7 +153,7 @@ void MethodImpl::SetSize(LoaderHeap *pHeap, AllocMemTracker *pamTracker, DWORD s
+ S_SIZE_T(size) * S_SIZE_T(sizeof(DWORD)); // DWORD each for the slot numbers
+
+ // MethodDesc* for each of the implemented methods
+- S_SIZE_T cbMethodDescs = S_SIZE_T(size) * S_SIZE_T(sizeof(MethodDesc *));
++ S_SIZE_T cbMethodDescs = S_SIZE_T(size) * S_SIZE_T(sizeof(RelativePointer<MethodDesc *>));
+
+ // Need to align-up the slot entries so that the MethodDesc* array starts on a pointer boundary.
+ cbCountAndSlots.AlignUp(sizeof(MethodDesc*));
+@@ -161,29 +165,36 @@ void MethodImpl::SetSize(LoaderHeap *pHeap, AllocMemTracker *pamTracker, DWORD s
+ LPBYTE pAllocData = (BYTE*)pamTracker->Track(pHeap->AllocMem(cbTotal));
+
+ // Set the count and slot array
+- pdwSlots = (DWORD*)pAllocData;
++ pdwSlots.SetValue((DWORD*)pAllocData);
+
+ // Set the MethodDesc* array. Make sure to adjust for alignment.
+- pImplementedMD = (MethodDesc**)ALIGN_UP(pAllocData + cbCountAndSlots.Value(), sizeof(MethodDesc*));
++ pImplementedMD.SetValue((RelativePointer<MethodDesc*> *)ALIGN_UP(pAllocData + cbCountAndSlots.Value(), sizeof(RelativePointer <MethodDesc*>)));
+
+ // Store the count in the first entry
+- *pdwSlots = size;
++ *pdwSlots.GetValue() = size;
+ }
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+-void MethodImpl::SetData(DWORD* slots, MethodDesc** md)
++void MethodImpl::SetData(DWORD* slots, RelativePointer<MethodDesc*>* md)
+ {
+ CONTRACTL {
+ NOTHROW;
+ GC_NOTRIGGER;
+ PRECONDITION(CheckPointer(this));
+- PRECONDITION(CheckPointer(pdwSlots));
++ PRECONDITION(!pdwSlots.IsNull());
+ } CONTRACTL_END;
+
+- DWORD dwSize = *pdwSlots;
+- memcpy(&(pdwSlots[1]), slots, dwSize*sizeof(DWORD));
+- memcpy(pImplementedMD, md, dwSize*sizeof(MethodDesc*));
++ DWORD *pdwSize = pdwSlots.GetValue();
++ DWORD dwSize = *pdwSize;
++ memcpy(&(pdwSize[1]), slots, dwSize*sizeof(DWORD));
++
++ RelativePointer<MethodDesc *> *pImplMD = pImplementedMD.GetValue();
++
++ for (uint32_t i = 0; i < dwSize; ++i)
++ {
++ pImplMD[i].SetValue(md[i].GetValue());
++ }
+ }
+
+ #ifdef FEATURE_NATIVE_IMAGE_GENERATION
+@@ -194,10 +205,10 @@ void MethodImpl::Save(DataImage *image)
+ DWORD size = GetSize();
+ _ASSERTE(size > 0);
+
+- image->StoreStructure(pdwSlots, (size+1)*sizeof(DWORD),
++ image->StoreStructure(pdwSlots.GetValue(), (size+1)*sizeof(DWORD),
+ DataImage::ITEM_METHOD_DESC_COLD,
+ sizeof(DWORD));
+- image->StoreStructure(pImplementedMD, size*sizeof(MethodDesc*),
++ image->StoreStructure(pImplementedMD.GetValue(), size*sizeof(RelativePointer<MethodDesc*>),
+ DataImage::ITEM_METHOD_DESC_COLD,
+ sizeof(MethodDesc*));
+ }
+@@ -214,21 +225,22 @@ void MethodImpl::Fixup(DataImage *image, PVOID p, SSIZE_T offset)
+ // <TODO> Why not use FixupMethodDescPointer? </TODO>
+ // <TODO> Does it matter if the MethodDesc needs a restore? </TODO>
+
+- MethodDesc * pMD = pImplementedMD[iMD];
++ RelativePointer<MethodDesc *> *pRelPtr = pImplementedMD.GetValue();
++ MethodDesc * pMD = pRelPtr[iMD].GetValueMaybeNull();
+
+ if (image->CanEagerBindToMethodDesc(pMD) &&
+ image->CanHardBindToZapModule(pMD->GetLoaderModule()))
+ {
+- image->FixupPointerField(pImplementedMD, iMD * sizeof(MethodDesc *));
++ image->FixupRelativePointerField(pImplementedMD.GetValue(), iMD * sizeof(RelativePointer<MethodDesc *>));
+ }
+ else
+ {
+- image->ZeroPointerField(pImplementedMD, iMD * sizeof(MethodDesc *));
++ image->ZeroPointerField(pImplementedMD.GetValue(), iMD * sizeof(RelativePointer<MethodDesc *>));
+ }
+ }
+
+- image->FixupPointerField(p, offset + offsetof(MethodImpl, pdwSlots));
+- image->FixupPointerField(p, offset + offsetof(MethodImpl, pImplementedMD));
++ image->FixupRelativePointerField(p, offset + offsetof(MethodImpl, pdwSlots));
++ image->FixupRelativePointerField(p, offset + offsetof(MethodImpl, pImplementedMD));
+ }
+
+ #endif // FEATURE_NATIVE_IMAGE_GENERATION
+@@ -247,19 +259,20 @@ MethodImpl::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ // 'this' memory should already be enumerated as
+ // part of the base MethodDesc.
+
+- if (pdwSlots.IsValid() && GetSize())
++ if (GetSlotsRaw().IsValid() && GetSize())
+ {
+ ULONG32 numSlots = GetSize();
+- DacEnumMemoryRegion(dac_cast<TADDR>(pdwSlots),
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetSlotsRawNonNull()),
+ (numSlots + 1) * sizeof(DWORD));
+
+- if (pImplementedMD.IsValid())
++ if (GetImpMDs().IsValid())
+ {
+- DacEnumMemoryRegion(dac_cast<TADDR>(pImplementedMD),
+- numSlots * sizeof(PTR_MethodDesc));
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetImpMDsNonNull()),
++ numSlots * sizeof(RelativePointer<MethodDesc *>));
+ for (DWORD i = 0; i < numSlots; i++)
+ {
+- PTR_MethodDesc methodDesc = pImplementedMD[i];
++ DPTR(RelativePointer<PTR_MethodDesc>) pRelPtr = GetImpMDsNonNull();
++ PTR_MethodDesc methodDesc = pRelPtr[i].GetValueMaybeNull();
+ if (methodDesc.IsValid())
+ {
+ methodDesc->EnumMemoryRegions(flags);
+diff --git a/src/vm/methodimpl.h b/src/vm/methodimpl.h
+index 0646367..453f4cc 100644
+--- a/src/vm/methodimpl.h
++++ b/src/vm/methodimpl.h
+@@ -24,8 +24,8 @@ class MethodImpl
+ friend class NativeImageDumper;
+ #endif
+
+- PTR_DWORD pdwSlots; // Maintains the slots in sorted order, the first entry is the size
+- DPTR(PTR_MethodDesc) pImplementedMD;
++ RelativePointer<PTR_DWORD> pdwSlots; // Maintains the slots in sorted order, the first entry is the size
++ RelativePointer<DPTR( RelativePointer<PTR_MethodDesc> )> pImplementedMD;
+
+ public:
+
+@@ -49,18 +49,19 @@ public:
+ inline MethodDesc *GetMethodDesc()
+ { WRAPPER_NO_CONTRACT; return m_pImpl->FindMethodDesc(GetSlot(), (PTR_MethodDesc) m_pMD); }
+ };
++#endif // !DACCESS_COMPILE
+
+- ///////////////////////////////////////////////////////////////////////////////////////
+- inline MethodDesc** GetImplementedMDs()
++ inline DPTR(RelativePointer<PTR_MethodDesc>) GetImpMDs()
+ {
+- CONTRACTL {
+- NOTHROW;
+- GC_NOTRIGGER;
+- PRECONDITION(CheckPointer(this));
+- } CONTRACTL_END;
+- return pImplementedMD;
++ LIMITED_METHOD_DAC_CONTRACT;
++ return RelativePointer<DPTR(RelativePointer<PTR_MethodDesc>)>::GetValueMaybeNullAtPtr(PTR_HOST_MEMBER_TADDR(MethodImpl, this, pImplementedMD));
++ }
++
++ inline DPTR(RelativePointer<PTR_MethodDesc>) GetImpMDsNonNull()
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return RelativePointer<DPTR(RelativePointer<PTR_MethodDesc>)>::GetValueAtPtr(PTR_HOST_MEMBER_TADDR(MethodImpl, this, pImplementedMD));
+ }
+-#endif // !DACCESS_COMPILE
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+ inline DWORD GetSize()
+@@ -71,10 +72,10 @@ public:
+ PRECONDITION(CheckPointer(this));
+ } CONTRACTL_END;
+
+- if(pdwSlots == NULL)
++ if(pdwSlots.IsNull())
+ return 0;
+ else
+- return *pdwSlots;
++ return *GetSlotsRawNonNull();
+ }
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+@@ -87,10 +88,22 @@ public:
+ SUPPORTS_DAC;
+ } CONTRACTL_END;
+
+- if(pdwSlots == NULL)
++ if(pdwSlots.IsNull())
+ return NULL;
+ else
+- return pdwSlots + 1;
++ return GetSlotsRawNonNull() + 1;
++ }
++
++ inline PTR_DWORD GetSlotsRaw()
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return RelativePointer<PTR_DWORD>::GetValueMaybeNullAtPtr(PTR_HOST_MEMBER_TADDR(MethodImpl, this, pdwSlots));
++ }
++
++ inline PTR_DWORD GetSlotsRawNonNull()
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return RelativePointer<PTR_DWORD>::GetValueAtPtr(PTR_HOST_MEMBER_TADDR(MethodImpl, this, pdwSlots));
+ }
+
+ #ifndef DACCESS_COMPILE
+@@ -99,7 +112,7 @@ public:
+ void SetSize(LoaderHeap *pHeap, AllocMemTracker *pamTracker, DWORD size);
+
+ ///////////////////////////////////////////////////////////////////////////////////////
+- void SetData(DWORD* slots, MethodDesc** md);
++ void SetData(DWORD* slots, RelativePointer<MethodDesc*> * md);
+
+ #endif // !DACCESS_COMPILE
+
+diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
+index 503c13a..df31c4a 100644
+--- a/src/vm/methodtablebuilder.cpp
++++ b/src/vm/methodtablebuilder.cpp
+@@ -6125,8 +6125,8 @@ MethodTableBuilder::InitMethodDesc(
+ AllocateFromHighFrequencyHeap(S_SIZE_T(sizeof(NDirectWriteableData)));
+
+ #ifdef HAS_NDIRECT_IMPORT_PRECODE
+- pNewNMD->ndirect.m_pImportThunkGlue = Precode::Allocate(PRECODE_NDIRECT_IMPORT, pNewMD,
+- GetLoaderAllocator(), GetMemTracker())->AsNDirectImportPrecode();
++ pNewNMD->ndirect.m_pImportThunkGlue.SetValue(Precode::Allocate(PRECODE_NDIRECT_IMPORT, pNewMD,
++ GetLoaderAllocator(), GetMemTracker())->AsNDirectImportPrecode());
+ #else // !HAS_NDIRECT_IMPORT_PRECODE
+ pNewNMD->GetNDirectImportThunkGlue()->Init(pNewNMD);
+ #endif // !HAS_NDIRECT_IMPORT_PRECODE
+@@ -6371,7 +6371,7 @@ MethodTableBuilder::PlaceMethodImpls()
+ // Allocate some temporary storage. The number of overrides for a single method impl
+ // cannot be greater then the number of vtable slots.
+ DWORD * slots = new (&GetThread()->m_MarshalAlloc) DWORD[bmtVT->cVirtualSlots];
+- MethodDesc ** replaced = new (&GetThread()->m_MarshalAlloc) MethodDesc*[bmtVT->cVirtualSlots];
++ RelativePointer<MethodDesc *> * replaced = new (&GetThread()->m_MarshalAlloc) RelativePointer<MethodDesc*>[bmtVT->cVirtualSlots];
+
+ DWORD iEntry = 0;
+ bmtMDMethod * pCurImplMethod = bmtMethodImpl->GetImplementationMethod(iEntry);
+@@ -6458,7 +6458,7 @@ MethodTableBuilder::WriteMethodImplData(
+ bmtMDMethod * pImplMethod,
+ DWORD cSlots,
+ DWORD * rgSlots,
+- MethodDesc ** rgDeclMD)
++ RelativePointer<MethodDesc *> * rgDeclMD)
+ {
+ STANDARD_VM_CONTRACT;
+
+@@ -6488,9 +6488,9 @@ MethodTableBuilder::WriteMethodImplData(
+ {
+ if (rgSlots[j] < rgSlots[i])
+ {
+- MethodDesc * mTmp = rgDeclMD[i];
+- rgDeclMD[i] = rgDeclMD[j];
+- rgDeclMD[j] = mTmp;
++ MethodDesc * mTmp = rgDeclMD[i].GetValue();
++ rgDeclMD[i].SetValue(rgDeclMD[j].GetValue());
++ rgDeclMD[j].SetValue(mTmp);
+
+ DWORD sTmp = rgSlots[i];
+ rgSlots[i] = rgSlots[j];
+@@ -6512,7 +6512,7 @@ MethodTableBuilder::PlaceLocalDeclaration(
+ bmtMDMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD * slots,
+- MethodDesc ** replaced,
++ RelativePointer<MethodDesc *> * replaced,
+ DWORD * pSlotIndex)
+ {
+ CONTRACTL
+@@ -6569,7 +6569,7 @@ MethodTableBuilder::PlaceLocalDeclaration(
+
+ // We implement this slot, record it
+ slots[*pSlotIndex] = pDecl->GetSlotIndex();
+- replaced[*pSlotIndex] = pDecl->GetMethodDesc();
++ replaced[*pSlotIndex].SetValue(pDecl->GetMethodDesc());
+
+ // increment the counter
+ (*pSlotIndex)++;
+@@ -6580,7 +6580,7 @@ VOID MethodTableBuilder::PlaceInterfaceDeclaration(
+ bmtRTMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD* slots,
+- MethodDesc** replaced,
++ RelativePointer<MethodDesc *> * replaced,
+ DWORD* pSlotIndex)
+ {
+ CONTRACTL {
+@@ -6685,7 +6685,7 @@ MethodTableBuilder::PlaceParentDeclaration(
+ bmtRTMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD * slots,
+- MethodDesc ** replaced,
++ RelativePointer<MethodDesc *> * replaced,
+ DWORD * pSlotIndex)
+ {
+ CONTRACTL {
+@@ -6730,7 +6730,7 @@ MethodTableBuilder::PlaceParentDeclaration(
+
+ // We implement this slot, record it
+ slots[*pSlotIndex] = pDeclMD->GetSlot();
+- replaced[*pSlotIndex] = pDeclMD;
++ replaced[*pSlotIndex].SetValue(pDeclMD);
+
+ // increment the counter
+ (*pSlotIndex)++;
+diff --git a/src/vm/methodtablebuilder.h b/src/vm/methodtablebuilder.h
+index 2aa3683..6c82e54 100644
+--- a/src/vm/methodtablebuilder.h
++++ b/src/vm/methodtablebuilder.h
+@@ -2749,7 +2749,7 @@ private:
+ bmtMDMethod * pImplMethod,
+ DWORD cSlots,
+ DWORD * rgSlots,
+- MethodDesc ** rgDeclMD);
++ RelativePointer<MethodDesc *> * rgDeclMD);
+
+ // --------------------------------------------------------------------------------------------
+ // Places a methodImpl pair where the decl is declared by the type being built.
+@@ -2758,7 +2758,7 @@ private:
+ bmtMDMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD* slots,
+- MethodDesc** replaced,
++ RelativePointer<MethodDesc *> * replaced,
+ DWORD* pSlotIndex);
+
+ // --------------------------------------------------------------------------------------------
+@@ -2768,7 +2768,7 @@ private:
+ bmtRTMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD* slots,
+- MethodDesc** replaced,
++ RelativePointer<MethodDesc *> * replaced,
+ DWORD* pSlotIndex);
+
+ // --------------------------------------------------------------------------------------------
+@@ -2778,7 +2778,7 @@ private:
+ bmtRTMethod * pDecl,
+ bmtMDMethod * pImpl,
+ DWORD* slots,
+- MethodDesc** replaced,
++ RelativePointer<MethodDesc *> * replaced,
+ DWORD* pSlotIndex);
+
+ // --------------------------------------------------------------------------------------------
+--
+2.7.4
+
diff --git a/packaging/0007-FIX-fix-No.1-missing-GetImplementedMDs.patch b/packaging/0007-FIX-fix-No.1-missing-GetImplementedMDs.patch
new file mode 100644
index 0000000000..8ea8b47380
--- /dev/null
+++ b/packaging/0007-FIX-fix-No.1-missing-GetImplementedMDs.patch
@@ -0,0 +1,59 @@
+From 959e95869f6897afd5c3d9ff25c07318170bfaec Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Wed, 11 Apr 2018 16:10:58 +0300
+Subject: [PATCH 07/32] FIX: fix No.1, missing GetImplementedMDs()
+
+---
+ src/vm/methodtablebuilder.cpp | 6 ++++--
+ src/vm/securitymeta.cpp | 11 +++++++++--
+ 2 files changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
+index df31c4a..3162f7c 100644
+--- a/src/vm/methodtablebuilder.cpp
++++ b/src/vm/methodtablebuilder.cpp
+@@ -11909,8 +11909,10 @@ void MethodTableBuilder::VerifyInheritanceSecurity()
+ MethodImpl *pMethodImpl = pMD->GetMethodImpl();
+ for (DWORD iCurImpl = 0; iCurImpl < pMethodImpl->GetSize(); iCurImpl++)
+ {
+- MethodDesc *pDeclMD = pMethodImpl->GetImplementedMDs()[iCurImpl];
+- _ASSERTE(pDeclMD != NULL);
++ RelativePointer<MethodDesc *> *pRelPtrForSlot = pMethodImpl->GetImpMDsNonNull();
++ TADDR base = dac_cast<TADDR>(pRelPtrForSlot) + iCurImpl * sizeof(RelativePointer<MethodDesc *>);
++ MethodDesc *pDeclMD = RelativePointer<MethodDesc *>::GetValueAtPtr(base);
++
+ // We deal with interfaces below, so don't duplicate work
+ if (!pDeclMD->IsInterface())
+ {
+diff --git a/src/vm/securitymeta.cpp b/src/vm/securitymeta.cpp
+index 1374d9f..3f06f05 100644
+--- a/src/vm/securitymeta.cpp
++++ b/src/vm/securitymeta.cpp
+@@ -771,7 +771,10 @@ MethodDesc *MethodSecurityDescriptor::MethodImplementationIterator::Current()
+ _ASSERTE(m_fMethodImplIterationBegun);
+ _ASSERTE(m_pMD->IsMethodImpl());
+ _ASSERTE(m_iMethodImplIndex < m_pMD->GetMethodImpl()->GetSize());
+- return m_pMD->GetMethodImpl()->GetImplementedMDs()[m_iMethodImplIndex];
++
++ RelativePointer<MethodDesc *> *pRelPtrForSlot = m_pMD->GetMethodImpl()->GetImpMDsNonNull();
++ TADDR base = dac_cast<TADDR>(pRelPtrForSlot) + m_iMethodImplIndex * sizeof(RelativePointer<MethodDesc *>);
++ return RelativePointer<MethodDesc *>::GetValueAtPtr(base);
+ }
+ }
+
+@@ -856,7 +859,11 @@ void MethodSecurityDescriptor::MethodImplementationIterator::Next()
+ if (m_iMethodImplIndex < pMethodImpl->GetSize())
+ {
+ // Skip over the interface MethodImpls since we already processed those
+- fFoundImpl = !pMethodImpl->GetImplementedMDs()[m_iMethodImplIndex]->IsInterface();
++ RelativePointer<MethodDesc *> *pRelPtrForSlot = pMethodImpl->GetImpMDsNonNull();
++ TADDR base = dac_cast<TADDR>(pRelPtrForSlot) + m_iMethodImplIndex * sizeof(RelativePointer<MethodDesc *>);
++ MethodDesc *pDeclMD = RelativePointer<MethodDesc *>::GetValueAtPtr(base);
++
++ fFoundImpl = !pDeclMD->IsInterface();
+ }
+ }
+ }
+--
+2.7.4
+
diff --git a/packaging/0008-Fix-issues-with-RelativePointer-instead-of-RelativeF.patch b/packaging/0008-Fix-issues-with-RelativePointer-instead-of-RelativeF.patch
new file mode 100644
index 0000000000..1cd2634d61
--- /dev/null
+++ b/packaging/0008-Fix-issues-with-RelativePointer-instead-of-RelativeF.patch
@@ -0,0 +1,26 @@
+From de0e52102e60a0be0b481c3c92e8fad71dbf8941 Mon Sep 17 00:00:00 2001
+From: gbalykov <g.balykov@samsung.com>
+Date: Wed, 31 May 2017 18:39:43 +0300
+Subject: [PATCH 08/32] Fix issues with RelativePointer instead of
+ RelativeFixupPointer (#12004)
+
+---
+ src/vm/method.hpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/vm/method.hpp b/src/vm/method.hpp
+index 41695df..ae65f30 100644
+--- a/src/vm/method.hpp
++++ b/src/vm/method.hpp
+@@ -3313,7 +3313,7 @@ public:
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ _ASSERTE(IMD_IsWrapperStubWithInstantiations());
+- return RelativePointer<PTR_MethodDesc>::GetValueAtPtr(PTR_HOST_MEMBER_TADDR(InstantiatedMethodDesc, this, m_pWrappedMethodDesc));
++ return RelativeFixupPointer<PTR_MethodDesc>::GetValueAtPtr(PTR_HOST_MEMBER_TADDR(InstantiatedMethodDesc, this, m_pWrappedMethodDesc));
+ }
+
+ #ifndef DACCESS_COMPILE
+--
+2.7.4
+
diff --git a/packaging/0009-Remove-relocations-from-SECTION_MethodDesc-for-ngene.patch b/packaging/0009-Remove-relocations-from-SECTION_MethodDesc-for-ngene.patch
new file mode 100644
index 0000000000..d348274583
--- /dev/null
+++ b/packaging/0009-Remove-relocations-from-SECTION_MethodDesc-for-ngene.patch
@@ -0,0 +1,595 @@
+From 37436d59134e8beea0fcb4a7bb2b9c3e1e5e9714 Mon Sep 17 00:00:00 2001
+From: gbalykov <g.balykov@samsung.com>
+Date: Sat, 3 Jun 2017 06:31:28 +0300
+Subject: [PATCH 09/32] Remove relocations from SECTION_MethodDesc for ngened
+ images for fields accessed from jit code for Linux ARM (#11963)
+
+---
+ .../superpmi/superpmi-shared/methodcontext.cpp | 6 ++
+ .../superpmi/superpmi-shared/methodcontext.h | 1 +
+ src/debug/daccess/nidump.cpp | 4 +-
+ src/inc/corinfo.h | 7 ++
+ src/inc/fixuppointer.h | 99 +++++++++++++++++++++-
+ src/jit/importer.cpp | 14 +++
+ src/vm/dllimport.cpp | 12 +++
+ src/vm/genmeth.cpp | 10 +--
+ src/vm/jitinterface.cpp | 12 +++
+ src/vm/method.cpp | 18 +++-
+ src/vm/method.hpp | 32 +++++--
+ src/vm/methodtablebuilder.cpp | 4 +-
+ src/vm/prestub.cpp | 13 +++
+ 13 files changed, 211 insertions(+), 21 deletions(-)
+
+diff --git a/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp b/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
+index e60ef83..4c5fb61 100644
+--- a/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
++++ b/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
+@@ -1573,6 +1573,7 @@ void MethodContext::recGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ value.stubLookup.runtimeLookup.indirections = (DWORD)pResult->stubLookup.runtimeLookup.indirections;
+ value.stubLookup.runtimeLookup.testForNull = (DWORD)pResult->stubLookup.runtimeLookup.testForNull;
+ value.stubLookup.runtimeLookup.testForFixup = (DWORD)pResult->stubLookup.runtimeLookup.testForFixup;
++ value.stubLookup.runtimeLookup.indirectFirstOffset = (DWORD)pResult->stubLookup.runtimeLookup.indirectFirstOffset;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ value.stubLookup.runtimeLookup.offsets[i] = (DWORDLONG)pResult->stubLookup.runtimeLookup.offsets[i];
+ }
+@@ -1583,6 +1584,7 @@ void MethodContext::recGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ value.stubLookup.runtimeLookup.indirections = (DWORD)0;
+ value.stubLookup.runtimeLookup.testForNull = (DWORD)0;
+ value.stubLookup.runtimeLookup.testForFixup = (DWORD)0;
++ value.stubLookup.runtimeLookup.indirectFirstOffset = (DWORD)0;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ value.stubLookup.runtimeLookup.offsets[i] = (DWORDLONG)0;
+
+@@ -1761,6 +1763,7 @@ void MethodContext::repGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ pResult->stubLookup.runtimeLookup.indirections = (WORD)value.stubLookup.runtimeLookup.indirections;
+ pResult->stubLookup.runtimeLookup.testForNull = value.stubLookup.runtimeLookup.testForNull != 0;
+ pResult->stubLookup.runtimeLookup.testForFixup = value.stubLookup.runtimeLookup.testForFixup != 0;
++ pResult->stubLookup.runtimeLookup.indirectFirstOffset = value.stubLookup.runtimeLookup.indirectFirstOffset != 0;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ pResult->stubLookup.runtimeLookup.offsets[i] = (SIZE_T)value.stubLookup.runtimeLookup.offsets[i];
+ }
+@@ -3218,6 +3221,7 @@ void MethodContext::recEmbedGenericHandle(CORINFO_RESOLVED_TOKEN* pResolve
+ value.lookup.runtimeLookup.indirections = (DWORD)pResult->lookup.runtimeLookup.indirections;
+ value.lookup.runtimeLookup.testForNull = (DWORD)pResult->lookup.runtimeLookup.testForNull;
+ value.lookup.runtimeLookup.testForFixup = (DWORD)pResult->lookup.runtimeLookup.testForFixup;
++ value.lookup.runtimeLookup.indirectFirstOffset = (DWORD)pResult->lookup.runtimeLookup.indirectFirstOffset;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ value.lookup.runtimeLookup.offsets[i] = (DWORDLONG)pResult->lookup.runtimeLookup.offsets[i];
+ }
+@@ -3228,6 +3232,7 @@ void MethodContext::recEmbedGenericHandle(CORINFO_RESOLVED_TOKEN* pResolve
+ value.lookup.runtimeLookup.indirections = (DWORD)0;
+ value.lookup.runtimeLookup.testForNull = (DWORD)0;
+ value.lookup.runtimeLookup.testForFixup = (DWORD)0;
++ value.lookup.runtimeLookup.indirectFirstOffset = (DWORD)0;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ value.lookup.runtimeLookup.offsets[i] = (DWORDLONG)0;
+ // copy the constLookup view of the union
+@@ -3305,6 +3310,7 @@ void MethodContext::repEmbedGenericHandle(CORINFO_RESOLVED_TOKEN* pResolve
+ pResult->lookup.runtimeLookup.indirections = (WORD)value.lookup.runtimeLookup.indirections;
+ pResult->lookup.runtimeLookup.testForNull = value.lookup.runtimeLookup.testForNull != 0;
+ pResult->lookup.runtimeLookup.testForFixup = value.lookup.runtimeLookup.testForFixup != 0;
++ pResult->lookup.runtimeLookup.indirectFirstOffset = value.lookup.runtimeLookup.indirectFirstOffset != 0;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ pResult->lookup.runtimeLookup.offsets[i] = (size_t)value.lookup.runtimeLookup.offsets[i];
+ }
+diff --git a/src/ToolBox/superpmi/superpmi-shared/methodcontext.h b/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
+index f7c0e16..4887522 100644
+--- a/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
++++ b/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
+@@ -240,6 +240,7 @@ public:
+ DWORD testForNull;
+ DWORD testForFixup;
+ DWORDLONG offsets[CORINFO_MAXINDIRECTIONS];
++ DWORD indirectFirstOffset;
+ };
+ struct Agnostic_CORINFO_CONST_LOOKUP
+ {
+diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
+index 5e8302f..d1e69f6 100644
+--- a/src/debug/daccess/nidump.cpp
++++ b/src/debug/daccess/nidump.cpp
+@@ -7947,7 +7947,7 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ if( !CHECK_OPT(METHODDESCS) )
+ CoverageReadString( dac_cast<TADDR>(ndmd->GetLibNameRaw()) );
+
+- PTR_NDirectWriteableData wnd( nd->m_pWriteableData );
++ PTR_NDirectWriteableData wnd( ndmd->GetWriteableData() );
+ DisplayStartStructureWithOffset( m_pWriteableData,
+ DPtrToPreferredAddr(wnd),
+ sizeof(*wnd),
+@@ -8103,7 +8103,7 @@ void NativeImageDumper::DumpMethodDesc( PTR_MethodDesc md, PTR_Module module )
+ }
+ //now handle the contents of the m_pMethInst/m_pPerInstInfo union.
+ unsigned numSlots = imd->m_wNumGenericArgs;
+- PTR_Dictionary inst(imd->m_pPerInstInfo);
++ PTR_Dictionary inst(imd->IMD_GetMethodDictionary());
+ unsigned dictSize;
+ if( kind == InstantiatedMethodDesc::SharedMethodInstantiation )
+ {
+diff --git a/src/inc/corinfo.h b/src/inc/corinfo.h
+index 2495de2..f6a136c 100644
+--- a/src/inc/corinfo.h
++++ b/src/inc/corinfo.h
+@@ -1323,6 +1323,13 @@ struct CORINFO_RUNTIME_LOOKUP
+ bool testForFixup;
+
+ SIZE_T offsets[CORINFO_MAXINDIRECTIONS];
++
++ // If set, first offset is indirect.
++ // 0 means that value stored at first offset (offsets[0]) from pointer is next pointer, to which the next offset
++ // (offsets[1]) is added and so on.
++ // 1 means that value stored at first offset (offsets[0]) from pointer is offset1, and the next pointer is
++ // stored at pointer+offsets[0]+offset1.
++ bool indirectFirstOffset;
+ } ;
+
+ // Result of calling embedGenericHandle
+diff --git a/src/inc/fixuppointer.h b/src/inc/fixuppointer.h
+index 549023a..38ae348 100644
+--- a/src/inc/fixuppointer.h
++++ b/src/inc/fixuppointer.h
+@@ -30,6 +30,10 @@ template<typename PTR_TYPE>
+ class RelativePointer
+ {
+ public:
++
++ static constexpr bool isRelative = true;
++ typedef PTR_TYPE type;
++
+ #ifndef DACCESS_COMPILE
+ RelativePointer()
+ {
+@@ -173,6 +177,10 @@ template<typename PTR_TYPE>
+ class FixupPointer
+ {
+ public:
++
++ static constexpr bool isRelative = false;
++ typedef PTR_TYPE type;
++
+ // Returns whether the encoded pointer is NULL.
+ BOOL IsNull() const
+ {
+@@ -237,6 +245,10 @@ template<typename PTR_TYPE>
+ class RelativeFixupPointer
+ {
+ public:
++
++ static constexpr bool isRelative = true;
++ typedef PTR_TYPE type;
++
+ // Implicit copy/move is not allowed
+ RelativeFixupPointer<PTR_TYPE>(const RelativeFixupPointer<PTR_TYPE> &) =delete;
+ RelativeFixupPointer<PTR_TYPE>(RelativeFixupPointer<PTR_TYPE> &&) =delete;
+@@ -384,7 +396,7 @@ private:
+ // Fixup used for RelativePointer
+ #define IMAGE_REL_BASED_RelativePointer IMAGE_REL_BASED_RELPTR
+
+-#else // FEATURE_PREJIT
++#endif // FEATURE_PREJIT
+
+ //----------------------------------------------------------------------------
+ // PlainPointer is simple pointer wrapper to support compilation without indirections
+@@ -393,6 +405,10 @@ template<typename PTR_TYPE>
+ class PlainPointer
+ {
+ public:
++
++ static constexpr bool isRelative = false;
++ typedef PTR_TYPE type;
++
+ // Returns whether the encoded pointer is NULL.
+ BOOL IsNull() const
+ {
+@@ -499,11 +515,13 @@ private:
+ TADDR m_ptr;
+ };
+
++#ifndef FEATURE_PREJIT
++
+ #define FixupPointer PlainPointer
+ #define RelativePointer PlainPointer
+ #define RelativeFixupPointer PlainPointer
+
+-#endif // FEATURE_PREJIT
++#endif // !FEATURE_PREJIT
+
+ //----------------------------------------------------------------------------
+ // RelativePointer32 is pointer encoded as relative 32-bit offset. It is used
+@@ -513,6 +531,10 @@ template<typename PTR_TYPE>
+ class RelativePointer32
+ {
+ public:
++
++ static constexpr bool isRelative = true;
++ typedef PTR_TYPE type;
++
+ // Returns whether the encoded pointer is NULL.
+ BOOL IsNull() const
+ {
+@@ -581,4 +603,77 @@ private:
+ INT32 m_delta;
+ };
+
++template<bool isMaybeNull, typename T, typename PT>
++typename PT::type
++ReadPointer(const T *base, const PT T::* pPointerFieldMember)
++{
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ uintptr_t offset = (uintptr_t) &(base->*pPointerFieldMember) - (uintptr_t) base;
++
++ if (isMaybeNull)
++ {
++ return PT::GetValueMaybeNullAtPtr(dac_cast<TADDR>(base) + offset);
++ }
++ else
++ {
++ return PT::GetValueAtPtr(dac_cast<TADDR>(base) + offset);
++ }
++}
++
++template<typename T, typename PT>
++typename PT::type
++ReadPointerMaybeNull(const T *base, const PT T::* pPointerFieldMember)
++{
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return ReadPointer<true>(base, pPointerFieldMember);
++}
++
++template<typename T, typename PT>
++typename PT::type
++ReadPointer(const T *base, const PT T::* pPointerFieldMember)
++{
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return ReadPointer<false>(base, pPointerFieldMember);
++}
++
++template<bool isMaybeNull, typename T, typename C, typename PT>
++typename PT::type
++ReadPointer(const T *base, const C T::* pFirstPointerFieldMember, const PT C::* pSecondPointerFieldMember)
++{
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ const PT *ptr = &(base->*pFirstPointerFieldMember.*pSecondPointerFieldMember);
++ uintptr_t offset = (uintptr_t) ptr - (uintptr_t) base;
++
++ if (isMaybeNull)
++ {
++ return PT::GetValueMaybeNullAtPtr(dac_cast<TADDR>(base) + offset);
++ }
++ else
++ {
++ return PT::GetValueAtPtr(dac_cast<TADDR>(base) + offset);
++ }
++}
++
++template<typename T, typename C, typename PT>
++typename PT::type
++ReadPointerMaybeNull(const T *base, const C T::* pFirstPointerFieldMember, const PT C::* pSecondPointerFieldMember)
++{
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return ReadPointer<true>(base, pFirstPointerFieldMember, pSecondPointerFieldMember);
++}
++
++template<typename T, typename C, typename PT>
++typename PT::type
++ReadPointer(const T *base, const C T::* pFirstPointerFieldMember, const PT C::* pSecondPointerFieldMember)
++{
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return ReadPointer<false>(base, pFirstPointerFieldMember, pSecondPointerFieldMember);
++}
++
+ #endif //_FIXUPPOINTER_H
+diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
+index 2d50741..80c0b75 100644
+--- a/src/jit/importer.cpp
++++ b/src/jit/importer.cpp
+@@ -1975,15 +1975,29 @@ GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedTok
+ nullptr DEBUGARG("impRuntimeLookup slot"));
+ }
+
++ GenTreePtr indOffTree = nullptr;
++
+ // Applied repeated indirections
+ for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
+ {
++ if (i == 1 && pRuntimeLookup->indirectFirstOffset)
++ {
++ indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
++ nullptr DEBUGARG("impRuntimeLookup indirectFirstOffset"));
++ }
++
+ if (i != 0)
+ {
+ slotPtrTree = gtNewOperNode(GT_IND, TYP_I_IMPL, slotPtrTree);
+ slotPtrTree->gtFlags |= GTF_IND_NONFAULTING;
+ slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
+ }
++
++ if (i == 1 && pRuntimeLookup->indirectFirstOffset)
++ {
++ slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
++ }
++
+ if (pRuntimeLookup->offsets[i] != 0)
+ {
+ slotPtrTree =
+diff --git a/src/vm/dllimport.cpp b/src/vm/dllimport.cpp
+index cf546cd..3ec8b5f 100644
+--- a/src/vm/dllimport.cpp
++++ b/src/vm/dllimport.cpp
+@@ -2294,7 +2294,19 @@ void NDirectStubLinker::DoNDirect(ILCodeStream *pcsEmit, DWORD dwStubFlags, Meth
+ //pcsEmit->EmitCALL(METHOD__STUBHELPERS__GET_NDIRECT_TARGET, 1, 1);
+ pcsEmit->EmitLDC(offsetof(NDirectMethodDesc, ndirect.m_pWriteableData));
+ pcsEmit->EmitADD();
++
++ if (decltype(NDirectMethodDesc::ndirect.m_pWriteableData)::isRelative)
++ {
++ pcsEmit->EmitDUP();
++ }
++
+ pcsEmit->EmitLDIND_I();
++
++ if (decltype(NDirectMethodDesc::ndirect.m_pWriteableData)::isRelative)
++ {
++ pcsEmit->EmitADD();
++ }
++
+ pcsEmit->EmitLDIND_I();
+ }
+ }
+diff --git a/src/vm/genmeth.cpp b/src/vm/genmeth.cpp
+index d5b435b..dc55221 100644
+--- a/src/vm/genmeth.cpp
++++ b/src/vm/genmeth.cpp
+@@ -1518,9 +1518,9 @@ void InstantiatedMethodDesc::SetupGenericMethodDefinition(IMDInternalImport *pIM
+ S_SIZE_T dwAllocSize = S_SIZE_T(numTyPars) * S_SIZE_T(sizeof(TypeHandle));
+
+ // the memory allocated for m_pMethInst will be freed if the declaring type fails to load
+- m_pPerInstInfo = (Dictionary *) pamTracker->Track(pAllocator->GetLowFrequencyHeap()->AllocMem(dwAllocSize));
++ m_pPerInstInfo.SetValue((Dictionary *) pamTracker->Track(pAllocator->GetLowFrequencyHeap()->AllocMem(dwAllocSize)));
+
+- TypeHandle * pInstDest = (TypeHandle *)m_pPerInstInfo;
++ TypeHandle * pInstDest = (TypeHandle *) IMD_GetMethodDictionaryNonNull();
+ for(unsigned int i = 0; i < numTyPars; i++)
+ {
+ hEnumTyPars.EnumNext(&tkTyPar);
+@@ -1553,7 +1553,7 @@ void InstantiatedMethodDesc::SetupWrapperStubWithInstantiations(MethodDesc* wrap
+
+ m_pWrappedMethodDesc.SetValue(wrappedMD);
+ m_wFlags2 = WrapperStubWithInstantiations | (m_wFlags2 & ~KindMask);
+- m_pPerInstInfo = (Dictionary*)pInst;
++ m_pPerInstInfo.SetValueMaybeNull((Dictionary*)pInst);
+
+ _ASSERTE(FitsIn<WORD>(numGenericArgs));
+ m_wNumGenericArgs = static_cast<WORD>(numGenericArgs);
+@@ -1571,7 +1571,7 @@ void InstantiatedMethodDesc::SetupSharedMethodInstantiation(DWORD numGenericArgs
+ _ASSERTE(numGenericArgs != 0);
+ // Initially the dictionary layout is empty
+ m_wFlags2 = SharedMethodInstantiation | (m_wFlags2 & ~KindMask);
+- m_pPerInstInfo = (Dictionary *)pPerInstInfo;
++ m_pPerInstInfo.SetValueMaybeNull((Dictionary *)pPerInstInfo);
+
+ _ASSERTE(FitsIn<WORD>(numGenericArgs));
+ m_wNumGenericArgs = static_cast<WORD>(numGenericArgs);
+@@ -1589,7 +1589,7 @@ void InstantiatedMethodDesc::SetupUnsharedMethodInstantiation(DWORD numGenericAr
+
+ // The first field is never used
+ m_wFlags2 = UnsharedMethodInstantiation | (m_wFlags2 & ~KindMask);
+- m_pPerInstInfo = (Dictionary *)pInst;
++ m_pPerInstInfo.SetValueMaybeNull((Dictionary *)pInst);
+
+ _ASSERTE(FitsIn<WORD>(numGenericArgs));
+ m_wNumGenericArgs = static_cast<WORD>(numGenericArgs);
+diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
+index f7617ad..08965a7 100644
+--- a/src/vm/jitinterface.cpp
++++ b/src/vm/jitinterface.cpp
+@@ -3141,6 +3141,8 @@ void CEEInfo::ComputeRuntimeLookupForSharedGenericToken(DictionaryEntryKind entr
+ CORINFO_RUNTIME_LOOKUP *pResult = &pResultLookup->runtimeLookup;
+ pResult->signature = NULL;
+
++ pResult->indirectFirstOffset = 0;
++
+ // Unless we decide otherwise, just do the lookup via a helper function
+ pResult->indirections = CORINFO_USEHELPER;
+
+@@ -3264,6 +3266,11 @@ void CEEInfo::ComputeRuntimeLookupForSharedGenericToken(DictionaryEntryKind entr
+ #endif
+ pResult->offsets[0] = offsetof(InstantiatedMethodDesc, m_pPerInstInfo);
+
++ if (decltype(InstantiatedMethodDesc::m_pPerInstInfo)::isRelative)
++ {
++ pResult->indirectFirstOffset = 1;
++ }
++
+ ULONG data;
+ IfFailThrow(sigptr.GetData(&data));
+ pResult->offsets[1] = sizeof(TypeHandle) * data;
+@@ -3568,6 +3575,11 @@ NoSpecialCase:
+
+ // Indirect through dictionary table pointer in InstantiatedMethodDesc
+ pResult->offsets[0] = offsetof(InstantiatedMethodDesc, m_pPerInstInfo);
++
++ if (decltype(InstantiatedMethodDesc::m_pPerInstInfo)::isRelative)
++ {
++ pResult->indirectFirstOffset = 1;
++ }
+ }
+ }
+
+diff --git a/src/vm/method.cpp b/src/vm/method.cpp
+index c8c1b9f..1407264 100644
+--- a/src/vm/method.cpp
++++ b/src/vm/method.cpp
+@@ -3651,7 +3651,14 @@ MethodDesc::Fixup(
+ }
+ }
+
+- image->FixupPointerField(this, offsetof(InstantiatedMethodDesc, m_pPerInstInfo));
++ if (decltype(InstantiatedMethodDesc::m_pPerInstInfo)::isRelative)
++ {
++ image->FixupRelativePointerField(this, offsetof(InstantiatedMethodDesc, m_pPerInstInfo));
++ }
++ else
++ {
++ image->FixupPointerField(this, offsetof(InstantiatedMethodDesc, m_pPerInstInfo));
++ }
+
+ // Generic methods are dealt with specially to avoid encoding the formal method type parameters
+ if (IsTypicalMethodDefinition())
+@@ -3730,7 +3737,14 @@ MethodDesc::Fixup(
+
+ NDirectMethodDesc *pNMD = (NDirectMethodDesc *)this;
+
+- image->FixupPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pWriteableData));
++ if (decltype(NDirectMethodDesc::ndirect.m_pWriteableData)::isRelative)
++ {
++ image->FixupRelativePointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pWriteableData));
++ }
++ else
++ {
++ image->FixupPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pWriteableData));
++ }
+
+ NDirectWriteableData *pWriteableData = pNMD->GetWriteableData();
+ NDirectImportThunkGlue *pImportThunkGlue = pNMD->GetNDirectImportThunkGlue();
+diff --git a/src/vm/method.hpp b/src/vm/method.hpp
+index ae65f30..9023a1b 100644
+--- a/src/vm/method.hpp
++++ b/src/vm/method.hpp
+@@ -2590,7 +2590,11 @@ public:
+ };
+
+ // The writeable part of the methoddesc.
+- PTR_NDirectWriteableData m_pWriteableData;
++#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
++ RelativePointer<PTR_NDirectWriteableData> m_pWriteableData;
++#else
++ PlainPointer<PTR_NDirectWriteableData> m_pWriteableData;
++#endif
+
+ #ifdef HAS_NDIRECT_IMPORT_PRECODE
+ RelativePointer<PTR_NDirectImportThunkGlue> m_pImportThunkGlue;
+@@ -2812,11 +2816,11 @@ public:
+ return (ndirect.m_wFlags & kStdCallWithRetBuf) != 0;
+ }
+
+- NDirectWriteableData* GetWriteableData() const
++ PTR_NDirectWriteableData GetWriteableData() const
+ {
+- LIMITED_METHOD_CONTRACT;
++ LIMITED_METHOD_DAC_CONTRACT;
+
+- return ndirect.m_pWriteableData;
++ return ReadPointer(this, &NDirectMethodDesc::ndirect, &decltype(NDirectMethodDesc::ndirect)::m_pWriteableData);
+ }
+
+ PTR_NDirectImportThunkGlue GetNDirectImportThunkGlue()
+@@ -3221,7 +3225,7 @@ public:
+ if (IMD_IsGenericMethodDefinition())
+ return TRUE;
+ else
+- return m_pPerInstInfo != NULL;
++ return !m_pPerInstInfo.IsNull();
+ }
+
+ // All varieties of InstantiatedMethodDesc's support this method.
+@@ -3229,13 +3233,21 @@ public:
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+- return Instantiation(m_pPerInstInfo->GetInstantiation(), m_wNumGenericArgs);
++ return Instantiation(IMD_GetMethodDictionary()->GetInstantiation(), m_wNumGenericArgs);
+ }
+
+ PTR_Dictionary IMD_GetMethodDictionary()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+- return m_pPerInstInfo;
++
++ return ReadPointerMaybeNull(this, &InstantiatedMethodDesc::m_pPerInstInfo);
++ }
++
++ PTR_Dictionary IMD_GetMethodDictionaryNonNull()
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return ReadPointer(this, &InstantiatedMethodDesc::m_pPerInstInfo);
+ }
+
+ BOOL IMD_IsGenericMethodDefinition()
+@@ -3394,7 +3406,11 @@ public: // <TODO>make private: JITinterface.cpp accesses through this </TODO>
+ //
+ // For generic method definitions that are not the typical method definition (e.g. C<int>.m<U>)
+ // this field is null; to obtain the instantiation use LoadMethodInstantiation
+- PTR_Dictionary m_pPerInstInfo; //SHARED
++#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
++ RelativePointer<PTR_Dictionary> m_pPerInstInfo; //SHARED
++#else
++ PlainPointer<PTR_Dictionary> m_pPerInstInfo; //SHARED
++#endif
+
+ private:
+ WORD m_wFlags2;
+diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
+index 3162f7c..fdf4f48 100644
+--- a/src/vm/methodtablebuilder.cpp
++++ b/src/vm/methodtablebuilder.cpp
+@@ -6121,8 +6121,8 @@ MethodTableBuilder::InitMethodDesc(
+ NDirectMethodDesc *pNewNMD = (NDirectMethodDesc*)pNewMD;
+
+ // Allocate writeable data
+- pNewNMD->ndirect.m_pWriteableData = (NDirectWriteableData*)
+- AllocateFromHighFrequencyHeap(S_SIZE_T(sizeof(NDirectWriteableData)));
++ pNewNMD->ndirect.m_pWriteableData.SetValue((NDirectWriteableData*)
++ AllocateFromHighFrequencyHeap(S_SIZE_T(sizeof(NDirectWriteableData))));
+
+ #ifdef HAS_NDIRECT_IMPORT_PRECODE
+ pNewNMD->ndirect.m_pImportThunkGlue.SetValue(Precode::Allocate(PRECODE_NDIRECT_IMPORT, pNewMD,
+diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
+index 88bd9de..af3f190 100644
+--- a/src/vm/prestub.cpp
++++ b/src/vm/prestub.cpp
+@@ -2405,6 +2405,9 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
+
+ pResult->testForFixup = pResult->testForNull = false;
+ pResult->signature = NULL;
++
++ pResult->indirectFirstOffset = 0;
++
+ pResult->indirections = CORINFO_USEHELPER;
+
+ DWORD numGenericArgs = 0;
+@@ -2455,6 +2458,11 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
+ pResult->indirections = 2;
+ pResult->offsets[0] = offsetof(InstantiatedMethodDesc, m_pPerInstInfo);
+
++ if (decltype(InstantiatedMethodDesc::m_pPerInstInfo)::isRelative)
++ {
++ pResult->indirectFirstOffset = 1;
++ }
++
+ ULONG data;
+ IfFailThrow(sigptr.GetData(&data));
+ pResult->offsets[1] = sizeof(TypeHandle) * data;
+@@ -2494,6 +2502,11 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
+ // Indirect through dictionary table pointer in InstantiatedMethodDesc
+ pResult->offsets[0] = offsetof(InstantiatedMethodDesc, m_pPerInstInfo);
+
++ if (decltype(InstantiatedMethodDesc::m_pPerInstInfo)::isRelative)
++ {
++ pResult->indirectFirstOffset = 1;
++ }
++
+ *pDictionaryIndexAndSlot |= dictionarySlot;
+ }
+ }
+--
+2.7.4
+
diff --git a/packaging/0010-Partially-remove-relocations-for-ModuleSection-ZapVi.patch b/packaging/0010-Partially-remove-relocations-for-ModuleSection-ZapVi.patch
new file mode 100644
index 0000000000..62647ea3ae
--- /dev/null
+++ b/packaging/0010-Partially-remove-relocations-for-ModuleSection-ZapVi.patch
@@ -0,0 +1,1759 @@
+From 9fa388f63a78596afdbe59ecc792487f96576b6c Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <ruben-ayrapetyan@users.noreply.github.com>
+Date: Tue, 13 Jun 2017 16:58:41 +0300
+Subject: [PATCH 10/32] Partially remove relocations for ModuleSection
+ (ZapVirtualSectionType). (#11853)
+
+---
+ src/debug/daccess/nidump.cpp | 26 +++---
+ src/vm/class.cpp | 14 ++--
+ src/vm/class.h | 67 ++++++++++-----
+ src/vm/class.inl | 2 +-
+ src/vm/classhash.cpp | 57 ++++++-------
+ src/vm/comcallablewrapper.cpp | 2 +-
+ src/vm/comdelegate.cpp | 38 ++++-----
+ src/vm/comsynchronizable.cpp | 2 +-
+ src/vm/instmethhash.cpp | 8 +-
+ src/vm/jitinterface.cpp | 4 +-
+ src/vm/methodtablebuilder.cpp | 14 ++--
+ src/vm/ngenhash.h | 114 ++++++++++++++++++++++---
+ src/vm/ngenhash.inl | 189 ++++++++++++++++++++++++------------------
+ src/vm/stubhelpers.cpp | 4 +-
+ src/vm/typedesc.cpp | 18 ++--
+ src/vm/typedesc.h | 7 +-
+ src/vm/typehash.cpp | 39 +++++----
+ src/vm/typehash.h | 16 +++-
+ 18 files changed, 397 insertions(+), 224 deletions(-)
+
+diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
+index d1e69f6..42705a5 100644
+--- a/src/debug/daccess/nidump.cpp
++++ b/src/debug/daccess/nidump.cpp
+@@ -4492,14 +4492,14 @@ void NativeImageDumper::TraverseNgenHash(DPTR(HASH_CLASS) pTable,
+ }
+
+ DisplayWriteFieldPointer(m_pModule,
+- DPtrToPreferredAddr(pTable->m_pModule),
++ DPtrToPreferredAddr(pTable->GetModule()),
+ HASH_CLASS, MODULE);
+
+ // Dump warm (volatile) entries.
+ DisplayWriteFieldUInt(m_cWarmEntries, pTable->m_cWarmEntries, HASH_CLASS, MODULE);
+ DisplayWriteFieldUInt(m_cWarmBuckets, pTable->m_cWarmBuckets, HASH_CLASS, MODULE);
+ DisplayWriteFieldAddress(m_pWarmBuckets,
+- DPtrToPreferredAddr(pTable->m_pWarmBuckets),
++ DPtrToPreferredAddr(pTable->GetWarmBuckets()),
+ sizeof(HASH_ENTRY_CLASS*) * pTable->m_cWarmBuckets,
+ HASH_CLASS, MODULE);
+
+@@ -4535,11 +4535,11 @@ void NativeImageDumper::TraverseNgenPersistedEntries(DPTR(HASH_CLASS) pTable,
+ DisplayWriteFieldUInt(m_cEntries, pEntries->m_cEntries, typename HASH_CLASS::PersistedEntries, MODULE);
+ DisplayWriteFieldUInt(m_cBuckets, pEntries->m_cBuckets, typename HASH_CLASS::PersistedEntries, MODULE);
+ DisplayWriteFieldAddress(m_pBuckets,
+- DPtrToPreferredAddr(pEntries->m_pBuckets),
+- pEntries->m_cBuckets ? pEntries->m_pBuckets->GetSize(pEntries->m_cBuckets) : 0,
++ DPtrToPreferredAddr(pTable->GetPersistedBuckets(pEntries)),
++ pEntries->m_cBuckets ? pTable->GetPersistedBuckets(pEntries)->GetSize(pEntries->m_cBuckets) : 0,
+ typename HASH_CLASS::PersistedEntries, MODULE);
+ DisplayWriteFieldAddress(m_pEntries,
+- DPtrToPreferredAddr(pEntries->m_pEntries),
++ DPtrToPreferredAddr(pTable->GetPersistedEntries(pEntries)),
+ sizeof(typename HASH_CLASS::PersistedEntry) * pEntries->m_cEntries,
+ typename HASH_CLASS::PersistedEntries, MODULE);
+
+@@ -4551,7 +4551,7 @@ void NativeImageDumper::TraverseNgenPersistedEntries(DPTR(HASH_CLASS) pTable,
+ {
+ // Get index of the first entry and the count of entries in the bucket.
+ DWORD dwEntryId, cEntries;
+- pEntries->m_pBuckets->GetBucket(i, &dwEntryId, &cEntries);
++ pTable->GetPersistedBuckets(pEntries)->GetBucket(i, &dwEntryId, &cEntries);
+
+ // Loop over entries.
+ while (cEntries && (CHECK_OPT(SLIM_MODULE_TBLS)
+@@ -4559,7 +4559,7 @@ void NativeImageDumper::TraverseNgenPersistedEntries(DPTR(HASH_CLASS) pTable,
+ || CHECK_OPT(METHODTABLES)))
+ {
+ // Lookup entry in the array via the index we have.
+- typename HASH_CLASS::PTR_PersistedEntry pEntry(PTR_TO_TADDR(pEntries->m_pEntries) +
++ typename HASH_CLASS::PTR_PersistedEntry pEntry(PTR_TO_TADDR(pTable->GetPersistedEntries(pEntries)) +
+ (dwEntryId * sizeof(typename HASH_CLASS::PersistedEntry)));
+
+ IF_OPT(SLIM_MODULE_TBLS)
+@@ -8297,7 +8297,7 @@ NativeImageDumper::DumpEEClassForMethodTable( PTR_MethodTable mt )
+ EEClass, EECLASSES );
+ #endif
+
+- WriteFieldMethodTable( m_pMethodTable, clazz->m_pMethodTable, EEClass,
++ WriteFieldMethodTable( m_pMethodTable, clazz->GetMethodTable(), EEClass,
+ EECLASSES );
+
+ WriteFieldCorElementType( m_NormType, (CorElementType)clazz->m_NormType,
+@@ -8558,7 +8558,7 @@ NativeImageDumper::DumpEEClassForMethodTable( PTR_MethodTable mt )
+ DelegateEEClass, EECLASSES );
+
+ WriteFieldMethodDesc( m_pInvokeMethod,
+- delegateClass->m_pInvokeMethod,
++ delegateClass->GetInvokeMethod(),
+ DelegateEEClass, EECLASSES );
+ DumpFieldStub( m_pMultiCastInvokeStub,
+ delegateClass->m_pMultiCastInvokeStub,
+@@ -8585,10 +8585,10 @@ NativeImageDumper::DumpEEClassForMethodTable( PTR_MethodTable mt )
+ }
+
+ WriteFieldMethodDesc( m_pBeginInvokeMethod,
+- delegateClass->m_pBeginInvokeMethod,
++ delegateClass->GetBeginInvokeMethod(),
+ DelegateEEClass, EECLASSES );
+ WriteFieldMethodDesc( m_pEndInvokeMethod,
+- delegateClass->m_pEndInvokeMethod,
++ delegateClass->GetEndInvokeMethod(),
+ DelegateEEClass, EECLASSES );
+ DisplayWriteFieldPointer( m_pMarshalStub, delegateClass->m_pMarshalStub,
+ DelegateEEClass, EECLASSES );
+@@ -8717,7 +8717,7 @@ NativeImageDumper::DumpEEClassForMethodTable( PTR_MethodTable mt )
+ }
+ }
+ }
+- PTR_BYTE varianceInfo = TO_TADDR(pClassOptional->m_pVarianceInfo);
++ PTR_BYTE varianceInfo = pClassOptional->GetVarianceInfo();
+ if( varianceInfo == NULL )
+ {
+ DisplayWriteFieldPointer( m_pVarianceInfo, NULL,
+@@ -8879,7 +8879,7 @@ void NativeImageDumper::DumpTypeDesc( PTR_TypeDesc td )
+ PTR_TypeVarTypeDesc tvtd(td);
+ DisplayStartVStructure( "TypeVarTypeDesc", TYPEDESCS );
+ DisplayWriteFieldPointer( m_pModule,
+- DPtrToPreferredAddr(tvtd->m_pModule),
++ DPtrToPreferredAddr(tvtd->GetModule()),
+ TypeVarTypeDesc, TYPEDESCS );
+ DisplayWriteFieldUInt( m_typeOrMethodDef,
+ tvtd->m_typeOrMethodDef,
+diff --git a/src/vm/class.cpp b/src/vm/class.cpp
+index 2172090..0259b1e 100644
+--- a/src/vm/class.cpp
++++ b/src/vm/class.cpp
+@@ -2884,7 +2884,7 @@ void EEClass::Save(DataImage *image, MethodTable *pMT)
+ {
+ // make sure we don't store a GUID_NULL guid in the NGEN image
+ // instead we'll compute the GUID at runtime, and throw, if appropriate
+- m_pGuidInfo = NULL;
++ m_pGuidInfo.SetValueMaybeNull(NULL);
+ }
+ }
+ }
+@@ -2961,14 +2961,14 @@ void EEClass::Fixup(DataImage *image, MethodTable *pMT)
+ }
+
+ if (HasOptionalFields())
+- image->FixupPointerField(GetOptionalFields(), offsetof(EEClassOptionalFields, m_pVarianceInfo));
++ image->FixupRelativePointerField(GetOptionalFields(), offsetof(EEClassOptionalFields, m_pVarianceInfo));
+
+ //
+ // We pass in the method table, because some classes (e.g. remoting proxy)
+ // have fake method tables set up in them & we want to restore the regular
+ // one.
+ //
+- image->FixupField(this, offsetof(EEClass, m_pMethodTable), pMT);
++ image->FixupField(this, offsetof(EEClass, m_pMethodTable), pMT, 0, IMAGE_REL_BASED_RelativePointer);
+
+ //
+ // Fixup MethodDescChunk and MethodDescs
+@@ -3043,9 +3043,9 @@ void EEClass::Fixup(DataImage *image, MethodTable *pMT)
+ }
+ else if (IsDelegate())
+ {
+- image->FixupPointerField(this, offsetof(DelegateEEClass, m_pInvokeMethod));
+- image->FixupPointerField(this, offsetof(DelegateEEClass, m_pBeginInvokeMethod));
+- image->FixupPointerField(this, offsetof(DelegateEEClass, m_pEndInvokeMethod));
++ image->FixupRelativePointerField(this, offsetof(DelegateEEClass, m_pInvokeMethod));
++ image->FixupRelativePointerField(this, offsetof(DelegateEEClass, m_pBeginInvokeMethod));
++ image->FixupRelativePointerField(this, offsetof(DelegateEEClass, m_pEndInvokeMethod));
+
+ image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pUMThunkMarshInfo));
+ image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pStaticCallStub));
+@@ -3078,7 +3078,7 @@ void EEClass::Fixup(DataImage *image, MethodTable *pMT)
+ //
+
+ if (IsInterface() && GetGuidInfo() != NULL)
+- image->FixupPointerField(this, offsetof(EEClass, m_pGuidInfo));
++ image->FixupRelativePointerField(this, offsetof(EEClass, m_pGuidInfo));
+ else
+ image->ZeroPointerField(this, offsetof(EEClass, m_pGuidInfo));
+
+diff --git a/src/vm/class.h b/src/vm/class.h
+index 6358624..13b2e50 100644
+--- a/src/vm/class.h
++++ b/src/vm/class.h
+@@ -676,7 +676,7 @@ class EEClassOptionalFields
+
+ // Variance info for each type parameter (gpNonVariant, gpCovariant, or gpContravariant)
+ // If NULL, this type has no type parameters that are co/contravariant
+- BYTE* m_pVarianceInfo;
++ RelativePointer<PTR_BYTE> m_pVarianceInfo;
+
+ //
+ // COM RELATED FIELDS.
+@@ -717,6 +717,13 @@ class EEClassOptionalFields
+
+ // Set default values for optional fields.
+ inline void Init();
++
++ PTR_BYTE GetVarianceInfo()
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return ReadPointerMaybeNull(this, &EEClassOptionalFields::m_pVarianceInfo);
++ }
+ };
+ typedef DPTR(EEClassOptionalFields) PTR_EEClassOptionalFields;
+
+@@ -1014,12 +1021,12 @@ public:
+ // will return the method table pointer corresponding to the "canonical"
+ // instantiation, as defined in typehandle.h.
+ //
+- inline MethodTable* GetMethodTable()
++ inline PTR_MethodTable GetMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+- return m_pMethodTable;
++ return ReadPointerMaybeNull(this, &EEClass::m_pMethodTable);
+ }
+
+ // DO NOT ADD ANY ASSERTS TO THIS METHOD.
+@@ -1036,14 +1043,14 @@ public:
+ CANNOT_HAVE_CONTRACT;
+ SUPPORTS_DAC;
+
+- return m_pMethodTable;
++ return ReadPointerMaybeNull(this, &EEClass::m_pMethodTable);
+ }
+-#ifndef DACCESS_COMPILE
+
++#ifndef DACCESS_COMPILE
+ inline void SetMethodTable(MethodTable* pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+- m_pMethodTable = pMT;
++ m_pMethodTable.SetValueMaybeNull(pMT);
+ }
+ #endif // !DACCESS_COMPILE
+
+@@ -1710,14 +1717,15 @@ public:
+ inline PTR_GuidInfo GetGuidInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+- return m_pGuidInfo;
++
++ return ReadPointerMaybeNull(this, &EEClass::m_pGuidInfo);
+ }
+
+ inline void SetGuidInfo(GuidInfo* pGuidInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ #ifndef DACCESS_COMPILE
+- *EnsureWritablePages(&m_pGuidInfo) = pGuidInfo;
++ EnsureWritablePages(&m_pGuidInfo)->SetValueMaybeNull(pGuidInfo);
+ #endif // DACCESS_COMPILE
+ }
+
+@@ -1879,6 +1887,7 @@ public:
+ GetOptionalFields()->m_pDictLayout = pLayout;
+ }
+
++#ifndef DACCESS_COMPILE
+ static CorGenericParamAttr GetVarianceOfTypeParameter(BYTE * pbVarianceInfo, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+@@ -1897,15 +1906,16 @@ public:
+ BYTE* GetVarianceInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+- return HasOptionalFields() ? GetOptionalFields()->m_pVarianceInfo : NULL;
++ return HasOptionalFields() ? GetOptionalFields()->GetVarianceInfo() : NULL;
+ }
+
+ void SetVarianceInfo(BYTE *pVarianceInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasOptionalFields());
+- GetOptionalFields()->m_pVarianceInfo = pVarianceInfo;
++ GetOptionalFields()->m_pVarianceInfo.SetValueMaybeNull(pVarianceInfo);
+ }
++#endif // !DACCESS_COMPILE
+
+ // Check that a signature blob uses type parameters correctly
+ // in accordance with the variance annotations specified by this class
+@@ -2145,7 +2155,7 @@ public:
+ // C_ASSERTs in Jitinterface.cpp need this to be public to check the offset.
+ // Put it first so the offset rarely changes, which just reduces the number of times we have to fiddle
+ // with the offset.
+- PTR_GuidInfo m_pGuidInfo; // The cached guid inforation for interfaces.
++ RelativePointer<PTR_GuidInfo> m_pGuidInfo; // The cached guid information for interfaces.
+
+ #ifdef _DEBUG
+ public:
+@@ -2159,7 +2169,7 @@ private:
+ RelativePointer<PTR_EEClassOptionalFields> m_rpOptionalFields;
+
+ // TODO: Remove this field. It is only used by SOS and object validation for stress.
+- PTR_MethodTable m_pMethodTable;
++ RelativePointer<PTR_MethodTable> m_pMethodTable;
+
+ RelativePointer<PTR_FieldDesc> m_pFieldDescList;
+ RelativePointer<PTR_MethodDescChunk> m_pChunks;
+@@ -2355,15 +2365,15 @@ struct ComPlusCallInfo;
+ class DelegateEEClass : public EEClass
+ {
+ public:
+- PTR_Stub m_pStaticCallStub;
+- PTR_Stub m_pInstRetBuffCallStub;
+- PTR_MethodDesc m_pInvokeMethod;
+- PTR_Stub m_pMultiCastInvokeStub;
+- PTR_Stub m_pSecureDelegateInvokeStub;
+- UMThunkMarshInfo* m_pUMThunkMarshInfo;
+- PTR_MethodDesc m_pBeginInvokeMethod;
+- PTR_MethodDesc m_pEndInvokeMethod;
+- Volatile<PCODE> m_pMarshalStub;
++ PTR_Stub m_pStaticCallStub;
++ PTR_Stub m_pInstRetBuffCallStub;
++ RelativePointer<PTR_MethodDesc> m_pInvokeMethod;
++ PTR_Stub m_pMultiCastInvokeStub;
++ PTR_Stub m_pSecureDelegateInvokeStub;
++ UMThunkMarshInfo* m_pUMThunkMarshInfo;
++ RelativePointer<PTR_MethodDesc> m_pBeginInvokeMethod;
++ RelativePointer<PTR_MethodDesc> m_pEndInvokeMethod;
++ Volatile<PCODE> m_pMarshalStub;
+
+ #ifdef FEATURE_COMINTEROP
+ ComPlusCallInfo *m_pComPlusCallInfo;
+@@ -2376,6 +2386,21 @@ public:
+ MethodDesc* m_pForwardStubMD; // marshaling stub for calls to unmanaged code
+ MethodDesc* m_pReverseStubMD; // marshaling stub for calls from unmanaged code
+
++ PTR_MethodDesc GetInvokeMethod()
++ {
++ return ReadPointer(this, &DelegateEEClass::m_pInvokeMethod);
++ }
++
++ PTR_MethodDesc GetBeginInvokeMethod()
++ {
++ return ReadPointer(this, &DelegateEEClass::m_pBeginInvokeMethod);
++ }
++
++ PTR_MethodDesc GetEndInvokeMethod()
++ {
++ return ReadPointer(this, &DelegateEEClass::m_pEndInvokeMethod);
++ }
++
+ #ifndef DACCESS_COMPILE
+ DelegateEEClass() : EEClass(sizeof(DelegateEEClass))
+ {
+diff --git a/src/vm/class.inl b/src/vm/class.inl
+index 1a7e169..755463d 100644
+--- a/src/vm/class.inl
++++ b/src/vm/class.inl
+@@ -39,7 +39,7 @@ inline void EEClassOptionalFields::Init()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pDictLayout = NULL;
+- m_pVarianceInfo = NULL;
++ m_pVarianceInfo.SetValueMaybeNull(NULL);
+ #ifdef FEATURE_COMINTEROP
+ m_pSparseVTableMap = NULL;
+ m_pCoClassForIntf = TypeHandle();
+diff --git a/src/vm/classhash.cpp b/src/vm/classhash.cpp
+index 2ffc612..31c7e84 100644
+--- a/src/vm/classhash.cpp
++++ b/src/vm/classhash.cpp
+@@ -145,7 +145,7 @@ VOID EEClassHashTable::UncompressModuleAndNonExportClassDef(HashDatum Data, Modu
+ _ASSERTE(!(dwData & EECLASSHASH_MDEXPORT_DISCR));
+
+ *pCL = ((dwData >> 1) & 0x00ffffff) | mdtTypeDef;
+- *ppModule = m_pModule;
++ *ppModule = GetModule();
+ }
+
+ bool EEClassHashTable::UncompressModuleAndClassDef(HashDatum Data, Loader::LoadFlag loadFlag,
+@@ -172,8 +172,7 @@ bool EEClassHashTable::UncompressModuleAndClassDef(HashDatum Data, Loader::LoadF
+ if(dwData & EECLASSHASH_MDEXPORT_DISCR) {
+ *pmdFoundExportedType = ((dwData >> 1) & 0x00ffffff) | mdtExportedType;
+
+- *ppModule = m_pModule->GetAssembly()->
+- FindModuleByExportedType(*pmdFoundExportedType, loadFlag, mdTypeDefNil, pCL);
++ *ppModule = GetModule()->GetAssembly()->FindModuleByExportedType(*pmdFoundExportedType, loadFlag, mdTypeDefNil, pCL);
+ }
+ else {
+ UncompressModuleAndNonExportClassDef(Data, ppModule, pCL);
+@@ -232,7 +231,7 @@ VOID EEClassHashTable::ConstructKeyFromData(PTR_EEClassHashEntry pEntry, // IN
+ // in this case, the lifetime of Key is bounded by the lifetime of cqb, which will free the memory
+ // it allocated on destruction.
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ LPSTR pszName = NULL;
+ LPSTR pszNameSpace = NULL;
+ IMDInternalImport *pInternalImport = NULL;
+@@ -259,7 +258,7 @@ VOID EEClassHashTable::ConstructKeyFromData(PTR_EEClassHashEntry pEntry, // IN
+ mdToken mdtUncompressed = UncompressModuleAndClassDef(Data);
+ if (TypeFromToken(mdtUncompressed) == mdtExportedType)
+ {
+- IfFailThrow(m_pModule->GetClassLoader()->GetAssembly()->GetManifestImport()->GetExportedTypeProps(
++ IfFailThrow(GetModule()->GetClassLoader()->GetAssembly()->GetManifestImport()->GetExportedTypeProps(
+ mdtUncompressed,
+ (LPCSTR *)&pszNameSpace,
+ (LPCSTR *)&pszName,
+@@ -355,7 +354,7 @@ EEClassHashEntry_t *EEClassHashTable::InsertValue(LPCUTF8 pszNamespace, LPCUTF8
+
+ _ASSERTE(pszNamespace != NULL);
+ _ASSERTE(pszClassName != NULL);
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+
+ EEClassHashEntry *pEntry = BaseAllocateEntry(pamTracker);
+
+@@ -433,10 +432,9 @@ EEClassHashEntry_t *EEClassHashTable::InsertValueIfNotFound(LPCUTF8 pszNamespace
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ _ASSERTE(pszNamespace != NULL);
+ _ASSERTE(pszClassName != NULL);
+- _ASSERTE(m_pModule);
+
+ EEClassHashEntry_t * pNewEntry = FindItem(pszNamespace, pszClassName, IsNested, NULL);
+
+@@ -479,7 +477,7 @@ EEClassHashEntry_t *EEClassHashTable::FindItem(LPCUTF8 pszNamespace, LPCUTF8 psz
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ _ASSERTE(pszNamespace != NULL);
+ _ASSERTE(pszClassName != NULL);
+
+@@ -533,7 +531,7 @@ EEClassHashEntry_t *EEClassHashTable::FindNextNestedClass(NameHandle* pName, PTR
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ _ASSERTE(pName);
+
+ if (pName->GetNameSpace())
+@@ -564,7 +562,7 @@ EEClassHashEntry_t *EEClassHashTable::FindNextNestedClass(LPCUTF8 pszNamespace,
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+
+ PTR_EEClassHashEntry pSearch = BaseFindNextEntryByHash(pContext);
+
+@@ -597,7 +595,7 @@ EEClassHashEntry_t *EEClassHashTable::FindNextNestedClass(LPCUTF8 pszFullyQualif
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+
+ CQuickBytes szNamespace;
+
+@@ -639,7 +637,7 @@ EEClassHashEntry_t * EEClassHashTable::GetValue(LPCUTF8 pszFullyQualifiedName, P
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+
+ CQuickBytes szNamespace;
+
+@@ -685,7 +683,7 @@ EEClassHashEntry_t * EEClassHashTable::GetValue(LPCUTF8 pszNamespace, LPCUTF8 ps
+ CONTRACTL_END;
+
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ EEClassHashEntry_t *pItem = FindItem(pszNamespace, pszClassName, IsNested, pContext);
+ if (pItem)
+ *pData = pItem->GetData();
+@@ -709,7 +707,7 @@ EEClassHashEntry_t * EEClassHashTable::GetValue(NameHandle* pName, PTR_VOID *pDa
+
+
+ _ASSERTE(pName);
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ if(pName->GetNameSpace() == NULL) {
+ return GetValue(pName->GetName(), pData, IsNested, pContext);
+ }
+@@ -753,7 +751,7 @@ BOOL EEClassHashTable::CompareKeys(PTR_EEClassHashEntry pEntry, LPCUTF8 * pKey2)
+ CONTRACTL_END;
+
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ _ASSERTE (pEntry);
+ _ASSERTE (pKey2);
+
+@@ -778,7 +776,7 @@ void EEClassHashTable::Save(DataImage *image, CorProfileData *profileData)
+ STANDARD_VM_CONTRACT;
+
+ // See comment on PrepareExportedTypesForSaving for what's going on here.
+- if (m_pModule->IsManifest())
++ if (GetModule()->IsManifest())
+ PrepareExportedTypesForSaving(image);
+
+ // The base class handles most of the saving logic (it controls the layout of the hash memory). It will
+@@ -876,11 +874,11 @@ void EEClassHashTable::PrepareExportedTypesForSaving(DataImage *image)
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(GetAppDomain()->IsCompilationDomain());
+- PRECONDITION(m_pModule->IsManifest());
++ PRECONDITION(GetModule()->IsManifest());
+ }
+ CONTRACTL_END
+
+- IMDInternalImport *pImport = m_pModule->GetMDImport();
++ IMDInternalImport *pImport = GetModule()->GetMDImport();
+
+ HENUMInternalHolder phEnum(pImport);
+ phEnum.EnumInit(mdtExportedType, mdTokenNil);
+@@ -900,7 +898,7 @@ void EEClassHashTable::PrepareExportedTypesForSaving(DataImage *image)
+ &typeDef,
+ &dwFlags)))
+ {
+- THROW_BAD_FORMAT(BFA_NOFIND_EXPORTED_TYPE, m_pModule);
++ THROW_BAD_FORMAT(BFA_NOFIND_EXPORTED_TYPE, GetModule());
+ continue;
+ }
+
+@@ -936,16 +934,19 @@ void EEClassHashTable::PrepareExportedTypesForSaving(DataImage *image)
+ // "CompareNestedEntryWithExportedType" will check if "pEntry->pEncloser" is a type of "mdImpl",
+ // as well as walking up the enclosing chain.
+ _ASSERTE (TypeFromToken(mdImpl) == mdtExportedType);
+- while ((!m_pModule->GetClassLoader()->CompareNestedEntryWithExportedType(pImport,
+- mdImpl,
+- this,
+- pEntry->GetEncloser())) &&
+- (pEntry = FindNextNestedClass(pszNameSpace, pszName, &data, &sContext)) != NULL);
++ while ((!GetModule()->GetClassLoader()->CompareNestedEntryWithExportedType(pImport,
++ mdImpl,
++ this,
++ pEntry->GetEncloser()))
++ && (pEntry = FindNextNestedClass(pszNameSpace, pszName, &data, &sContext)) != NULL)
++ {
++ ;
++ }
+ }
+ }
+
+ if (!pEntry) {
+- THROW_BAD_FORMAT(BFA_NOFIND_EXPORTED_TYPE, m_pModule);
++ THROW_BAD_FORMAT(BFA_NOFIND_EXPORTED_TYPE, GetModule());
+ continue;
+ }
+
+@@ -1057,8 +1058,8 @@ EEClassHashTable *EEClassHashTable::MakeCaseInsensitiveTable(Module *pModule, Al
+
+
+
+- _ASSERTE(m_pModule);
+- _ASSERTE (pModule == m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
++ _ASSERTE(pModule == GetModule());
+
+ // Allocate the table and verify that we actually got one.
+ EEClassHashTable * pCaseInsTable = EEClassHashTable::Create(pModule,
+diff --git a/src/vm/comcallablewrapper.cpp b/src/vm/comcallablewrapper.cpp
+index 540c708..719d5ba 100644
+--- a/src/vm/comcallablewrapper.cpp
++++ b/src/vm/comcallablewrapper.cpp
+@@ -5044,7 +5044,7 @@ void ComMethodTable::LayOutDelegateMethodTable()
+
+ // Some space for a CALL xx xx xx xx stub is reserved before the beginning of the MethodDesc
+ ComCallMethodDescHolder NewMDHolder = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+- MethodDesc* pInvokeMD = ((DelegateEEClass *)(pDelegateMT->GetClass()))->m_pInvokeMethod;
++ MethodDesc* pInvokeMD = ((DelegateEEClass *)(pDelegateMT->GetClass()))->GetInvokeMethod();
+
+ if (pInvokeMD->IsSharedByGenericInstantiations())
+ {
+diff --git a/src/vm/comdelegate.cpp b/src/vm/comdelegate.cpp
+index 2682c2d..e920a4a 100644
+--- a/src/vm/comdelegate.cpp
++++ b/src/vm/comdelegate.cpp
+@@ -581,7 +581,7 @@ ComPlusCallInfo * COMDelegate::PopulateComPlusCallInfo(MethodTable * pDelMT)
+ // We need a LoaderHeap that lives at least as long as the DelegateEEClass, but ideally no longer
+ LoaderHeap *DelegateEEClass::GetStubHeap()
+ {
+- return m_pInvokeMethod->GetLoaderAllocator()->GetStubHeap();
++ return GetInvokeMethod()->GetLoaderAllocator()->GetStubHeap();
+ }
+
+
+@@ -600,7 +600,7 @@ Stub* COMDelegate::SetupShuffleThunk(MethodTable * pDelMT, MethodDesc *pTargetMe
+
+ DelegateEEClass * pClass = (DelegateEEClass *)pDelMT->GetClass();
+
+- MethodDesc *pMD = pClass->m_pInvokeMethod;
++ MethodDesc *pMD = pClass->GetInvokeMethod();
+
+ StackSArray<ShuffleEntry> rShuffleEntryArray;
+ GenerateShuffleArray(pMD, pTargetMeth, &rShuffleEntryArray);
+@@ -2385,7 +2385,7 @@ PCODE COMDelegate::GetInvokeMethodStub(EEImplMethodDesc* pMD)
+ MethodTable * pDelMT = pMD->GetMethodTable();
+ DelegateEEClass* pClass = (DelegateEEClass*) pDelMT->GetClass();
+
+- if (pMD == pClass->m_pInvokeMethod)
++ if (pMD == pClass->GetInvokeMethod())
+ {
+ // Validate the invoke method, which at the moment just means checking the calling convention
+
+@@ -2401,7 +2401,7 @@ PCODE COMDelegate::GetInvokeMethodStub(EEImplMethodDesc* pMD)
+ // and not an invalid-delegate-layout condition.
+ //
+ // If the call was indeed for async delegate invocation, we will just throw an exception.
+- if ((pMD == pClass->m_pBeginInvokeMethod) || (pMD == pClass->m_pEndInvokeMethod))
++ if ((pMD == pClass->GetBeginInvokeMethod()) || (pMD == pClass->GetEndInvokeMethod()))
+ {
+ COMPlusThrow(kPlatformNotSupportedException);
+ }
+@@ -2525,7 +2525,7 @@ DELEGATEREF COMDelegate::CreateSecureDelegate(DELEGATEREF delegate, MethodDesc*
+ CONTRACTL_END;
+
+ MethodTable *pDelegateType = delegate->GetMethodTable();
+- MethodDesc *pMD = ((DelegateEEClass*)(pDelegateType->GetClass()))->m_pInvokeMethod;
++ MethodDesc *pMD = ((DelegateEEClass*)(pDelegateType->GetClass()))->GetInvokeMethod();
+ // allocate the object
+ struct _gc {
+ DELEGATEREF refSecDel;
+@@ -2625,7 +2625,7 @@ FCIMPL1(MethodDesc*, COMDelegate::GetInvokeMethod, Object* refThisIn)
+ OBJECTREF refThis = ObjectToOBJECTREF(refThisIn);
+ MethodTable * pDelMT = refThis->GetMethodTable();
+
+- MethodDesc* pMD = ((DelegateEEClass*)(pDelMT->GetClass()))->m_pInvokeMethod;
++ MethodDesc* pMD = ((DelegateEEClass*)(pDelMT->GetClass()))->GetInvokeMethod();
+ _ASSERTE(pMD);
+ return pMD;
+ }
+@@ -2643,7 +2643,7 @@ FCIMPL1(PCODE, COMDelegate::GetMulticastInvoke, Object* refThisIn)
+ Stub *pStub = delegateEEClass->m_pMultiCastInvokeStub;
+ if (pStub == NULL)
+ {
+- MethodDesc* pMD = delegateEEClass->m_pInvokeMethod;
++ MethodDesc* pMD = delegateEEClass->GetInvokeMethod();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+@@ -2767,7 +2767,7 @@ FCIMPL1(PCODE, COMDelegate::GetMulticastInvoke, Object* refThisIn)
+ Stub *pStub = delegateEEClass->m_pMultiCastInvokeStub;
+ if (pStub == NULL)
+ {
+- MethodDesc* pMD = delegateEEClass->m_pInvokeMethod;
++ MethodDesc* pMD = delegateEEClass->GetInvokeMethod();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+@@ -3101,7 +3101,7 @@ MethodDesc* COMDelegate::FindDelegateInvokeMethod(MethodTable *pMT)
+
+ _ASSERTE(pMT->IsDelegate());
+
+- MethodDesc * pMD = ((DelegateEEClass*)pMT->GetClass())->m_pInvokeMethod;
++ MethodDesc * pMD = ((DelegateEEClass*)pMT->GetClass())->GetInvokeMethod();
+ if (pMD == NULL)
+ COMPlusThrowNonLocalized(kMissingMethodException, W("Invoke"));
+ return pMD;
+@@ -3114,7 +3114,7 @@ BOOL COMDelegate::IsDelegateInvokeMethod(MethodDesc *pMD)
+ MethodTable *pMT = pMD->GetMethodTable();
+ _ASSERTE(pMT->IsDelegate());
+
+- return (pMD == ((DelegateEEClass *)pMT->GetClass())->m_pInvokeMethod);
++ return (pMD == ((DelegateEEClass *)pMT->GetClass())->GetInvokeMethod());
+ }
+
+ BOOL COMDelegate::IsMethodDescCompatible(TypeHandle thFirstArg,
+@@ -3667,7 +3667,7 @@ BOOL COMDelegate::ValidateCtor(TypeHandle instHnd,
+
+ DelegateEEClass *pdlgEEClass = (DelegateEEClass*)dlgtHnd.AsMethodTable()->GetClass();
+ PREFIX_ASSUME(pdlgEEClass != NULL);
+- MethodDesc *pDlgtInvoke = pdlgEEClass->m_pInvokeMethod;
++ MethodDesc *pDlgtInvoke = pdlgEEClass->GetInvokeMethod();
+ if (pDlgtInvoke == NULL)
+ return FALSE;
+ return IsMethodDescCompatible(instHnd, ftnParentHnd, pFtn, dlgtHnd, pDlgtInvoke, DBF_RelaxedSignature, pfIsOpenDelegate);
+@@ -3716,18 +3716,18 @@ BOOL COMDelegate::ValidateBeginInvoke(DelegateEEClass* pClass)
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pClass));
+- PRECONDITION(CheckPointer(pClass->m_pBeginInvokeMethod));
++ PRECONDITION(CheckPointer(pClass->GetBeginInvokeMethod()));
+
+ // insert fault. Can the binder throw an OOM?
+ }
+ CONTRACTL_END;
+
+- if (pClass->m_pInvokeMethod == NULL)
++ if (pClass->GetInvokeMethod() == NULL)
+ return FALSE;
+
+ // We check the signatures under the typical instantiation of the possibly generic class
+- MetaSig beginInvokeSig(pClass->m_pBeginInvokeMethod->LoadTypicalMethodDefinition());
+- MetaSig invokeSig(pClass->m_pInvokeMethod->LoadTypicalMethodDefinition());
++ MetaSig beginInvokeSig(pClass->GetBeginInvokeMethod()->LoadTypicalMethodDefinition());
++ MetaSig invokeSig(pClass->GetInvokeMethod()->LoadTypicalMethodDefinition());
+
+ if (beginInvokeSig.GetCallingConventionInfo() != (IMAGE_CEE_CS_CALLCONV_HASTHIS | IMAGE_CEE_CS_CALLCONV_DEFAULT))
+ return FALSE;
+@@ -3768,18 +3768,18 @@ BOOL COMDelegate::ValidateEndInvoke(DelegateEEClass* pClass)
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pClass));
+- PRECONDITION(CheckPointer(pClass->m_pEndInvokeMethod));
++ PRECONDITION(CheckPointer(pClass->GetEndInvokeMethod()));
+
+ // insert fault. Can the binder throw an OOM?
+ }
+ CONTRACTL_END;
+
+- if (pClass->m_pInvokeMethod == NULL)
++ if (pClass->GetInvokeMethod() == NULL)
+ return FALSE;
+
+ // We check the signatures under the typical instantiation of the possibly generic class
+- MetaSig endInvokeSig(pClass->m_pEndInvokeMethod->LoadTypicalMethodDefinition());
+- MetaSig invokeSig(pClass->m_pInvokeMethod->LoadTypicalMethodDefinition());
++ MetaSig endInvokeSig(pClass->GetEndInvokeMethod()->LoadTypicalMethodDefinition());
++ MetaSig invokeSig(pClass->GetInvokeMethod()->LoadTypicalMethodDefinition());
+
+ if (endInvokeSig.GetCallingConventionInfo() != (IMAGE_CEE_CS_CALLCONV_HASTHIS | IMAGE_CEE_CS_CALLCONV_DEFAULT))
+ return FALSE;
+diff --git a/src/vm/comsynchronizable.cpp b/src/vm/comsynchronizable.cpp
+index 08b5281..01ba496 100644
+--- a/src/vm/comsynchronizable.cpp
++++ b/src/vm/comsynchronizable.cpp
+@@ -235,7 +235,7 @@ void ThreadNative::KickOffThread_Worker(LPVOID ptr)
+ delete args->share;
+ args->share = 0;
+
+- MethodDesc *pMeth = ((DelegateEEClass*)( gc.orDelegate->GetMethodTable()->GetClass() ))->m_pInvokeMethod;
++ MethodDesc *pMeth = ((DelegateEEClass*)( gc.orDelegate->GetMethodTable()->GetClass() ))->GetInvokeMethod();
+ _ASSERTE(pMeth);
+ MethodDescCallSite invokeMethod(pMeth, &gc.orDelegate);
+
+diff --git a/src/vm/instmethhash.cpp b/src/vm/instmethhash.cpp
+index 250a6d5..560e955 100644
+--- a/src/vm/instmethhash.cpp
++++ b/src/vm/instmethhash.cpp
+@@ -86,8 +86,8 @@ PTR_LoaderAllocator InstMethodHashTable::GetLoaderAllocator()
+ }
+ else
+ {
+- _ASSERTE(m_pModule != NULL);
+- return m_pModule->GetLoaderAllocator();
++ _ASSERTE(!m_pModule.IsNull());
++ return GetModule()->GetLoaderAllocator();
+ }
+ }
+
+@@ -188,7 +188,7 @@ MethodDesc* InstMethodHashTable::FindMethodDesc(TypeHandle declaringType,
+ RelativeFixupPointer<PTR_MethodTable> * ppMT = pMD->GetMethodTablePtr();
+ TADDR pMT = ppMT->GetValueMaybeTagged((TADDR)ppMT);
+
+- if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, pMT, declaringType))
++ if (!ZapSig::CompareTaggedPointerToTypeHandle(GetModule(), pMT, declaringType))
+ {
+ continue; // Next iteration of the for loop
+ }
+@@ -208,7 +208,7 @@ MethodDesc* InstMethodHashTable::FindMethodDesc(TypeHandle declaringType,
+ // asserts on encoded fixups.
+ TADDR candidateArg = ((FixupPointer<TADDR> *)candidateInst.GetRawArgs())[i].GetValue();
+
+- if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, candidateArg, inst[i]))
++ if (!ZapSig::CompareTaggedPointerToTypeHandle(GetModule(), candidateArg, inst[i]))
+ {
+ match = false;
+ break;
+diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
+index 08965a7..84f635a 100644
+--- a/src/vm/jitinterface.cpp
++++ b/src/vm/jitinterface.cpp
+@@ -5286,7 +5286,7 @@ void CEEInfo::getCallInfo(
+ // 2) Delegate.Invoke() - since a Delegate is a sealed class as per ECMA spec
+ // 3) JIT intrinsics - since they have pre-defined behavior
+ devirt = pTargetMD->GetMethodTable()->IsValueType() ||
+- (pTargetMD->GetMethodTable()->IsDelegate() && ((DelegateEEClass*)(pTargetMD->GetMethodTable()->GetClass()))->m_pInvokeMethod == pMD) ||
++ (pTargetMD->GetMethodTable()->IsDelegate() && ((DelegateEEClass*)(pTargetMD->GetMethodTable()->GetClass()))->GetInvokeMethod() == pMD) ||
+ (pTargetMD->IsFCall() && ECall::GetIntrinsicID(pTargetMD) != CORINFO_INTRINSIC_Illegal);
+
+ callVirtCrossingVersionBubble = true;
+@@ -6705,7 +6705,7 @@ DWORD CEEInfo::getMethodAttribsInternal (CORINFO_METHOD_HANDLE ftn)
+ result |= CORINFO_FLG_FORCEINLINE;
+ }
+
+- if (pMT->IsDelegate() && ((DelegateEEClass*)(pMT->GetClass()))->m_pInvokeMethod == pMD)
++ if (pMT->IsDelegate() && ((DelegateEEClass*)(pMT->GetClass()))->GetInvokeMethod() == pMD)
+ {
+ // This is now used to emit efficient invoke code for any delegate invoke,
+ // including multicast.
+diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
+index fdf4f48..05ab438 100644
+--- a/src/vm/methodtablebuilder.cpp
++++ b/src/vm/methodtablebuilder.cpp
+@@ -6156,18 +6156,18 @@ MethodTableBuilder::InitMethodDesc(
+
+ if (strcmp(pMethodName, "Invoke") == 0)
+ {
+- BAD_FORMAT_NOTHROW_ASSERT(NULL == ((DelegateEEClass*)GetHalfBakedClass())->m_pInvokeMethod);
+- ((DelegateEEClass*)GetHalfBakedClass())->m_pInvokeMethod = pNewMD;
++ BAD_FORMAT_NOTHROW_ASSERT(((DelegateEEClass*)GetHalfBakedClass())->m_pInvokeMethod.IsNull());
++ ((DelegateEEClass*)GetHalfBakedClass())->m_pInvokeMethod.SetValue(pNewMD);
+ }
+ else if (strcmp(pMethodName, "BeginInvoke") == 0)
+ {
+- BAD_FORMAT_NOTHROW_ASSERT(NULL == ((DelegateEEClass*)GetHalfBakedClass())->m_pBeginInvokeMethod);
+- ((DelegateEEClass*)GetHalfBakedClass())->m_pBeginInvokeMethod = pNewMD;
++ BAD_FORMAT_NOTHROW_ASSERT(((DelegateEEClass*)GetHalfBakedClass())->m_pBeginInvokeMethod.IsNull());
++ ((DelegateEEClass*)GetHalfBakedClass())->m_pBeginInvokeMethod.SetValue(pNewMD);
+ }
+ else if (strcmp(pMethodName, "EndInvoke") == 0)
+ {
+- BAD_FORMAT_NOTHROW_ASSERT(NULL == ((DelegateEEClass*)GetHalfBakedClass())->m_pEndInvokeMethod);
+- ((DelegateEEClass*)GetHalfBakedClass())->m_pEndInvokeMethod = pNewMD;
++ BAD_FORMAT_NOTHROW_ASSERT(((DelegateEEClass*)GetHalfBakedClass())->m_pEndInvokeMethod.IsNull());
++ ((DelegateEEClass*)GetHalfBakedClass())->m_pEndInvokeMethod.SetValue(pNewMD);
+ }
+ else
+ {
+@@ -10332,7 +10332,7 @@ MethodTableBuilder::SetupMethodTable2(
+ GetMemTracker());
+
+ pMT->SetClass(pClass);
+- pClass->m_pMethodTable = pMT;
++ pClass->m_pMethodTable.SetValue(pMT);
+ m_pHalfBakedMT = pMT;
+
+ #ifdef _DEBUG
+diff --git a/src/vm/ngenhash.h b/src/vm/ngenhash.h
+index 667a55e..c59eb8e 100644
+--- a/src/vm/ngenhash.h
++++ b/src/vm/ngenhash.h
+@@ -203,7 +203,7 @@ protected:
+ private:
+ friend class NgenHashTable<NGEN_HASH_ARGS>;
+
+- NgenHashTable<NGEN_HASH_ARGS> *m_pTable; // Pointer back to the table being enumerated.
++ DPTR(NgenHashTable<NGEN_HASH_ARGS>) m_pTable; // Pointer back to the table being enumerated.
+ TADDR m_pEntry; // The entry the caller is currently looking at (or
+ // NULL to begin with). This is a VolatileEntry* or
+ // PersistedEntry* (depending on m_eType below) and
+@@ -303,8 +303,13 @@ protected:
+ void BaseEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ #endif // DACCESS_COMPILE
+
++ PTR_Module GetModule()
++ {
++ return ReadPointerMaybeNull(this, &NgenHashTable<NGEN_HASH_ARGS>::m_pModule);
++ }
++
+ // Owning module set at hash creation time (possibly NULL if this hash instance is not to be ngen'd).
+- PTR_Module m_pModule;
++ RelativePointer<PTR_Module> m_pModule;
+
+ private:
+ // Internal implementation details. Nothing of interest to sub-classers for here on.
+@@ -385,13 +390,13 @@ private:
+ // because this logic is replicated for Hot and Cold entries so we can factor some common code.
+ struct PersistedEntries
+ {
+- APTR_PersistedEntry m_pEntries; // Pointer to a contiguous block of PersistedEntry structures
+- // (NULL if zero entries)
+- PTR_PersistedBucketList m_pBuckets; // Pointer to abstracted bucket list mapping above entries
+- // into a hash (NULL if zero buckets, which is iff zero
+- // entries)
+- DWORD m_cEntries; // Count of entries in the above block
+- DWORD m_cBuckets; // Count of buckets in the above bucket list
++ RelativePointer<APTR_PersistedEntry> m_pEntries; // Pointer to a contiguous block of PersistedEntry structures
++ // (NULL if zero entries)
++ RelativePointer<PTR_PersistedBucketList> m_pBuckets; // Pointer to abstracted bucket list mapping above entries
++ // into a hash (NULL if zero buckets, which is iff zero
++ // entries)
++ DWORD m_cEntries; // Count of entries in the above block
++ DWORD m_cBuckets; // Count of buckets in the above bucket list
+ };
+ #endif // FEATURE_PREJIT
+
+@@ -439,13 +444,98 @@ private:
+ DWORD NextLargestPrime(DWORD dwNumber);
+ #endif // !DACCESS_COMPILE
+
++ DPTR(PTR_VolatileEntry) GetWarmBuckets()
++ {
++ SUPPORTS_DAC;
++
++ return ReadPointer(this, &NgenHashTable<NGEN_HASH_ARGS>::m_pWarmBuckets);
++ }
++
++#ifdef FEATURE_PREJIT
++ APTR_PersistedEntry GetPersistedHotEntries()
++ {
++ SUPPORTS_DAC;
++
++ return ReadPointerMaybeNull(this,
++ &NgenHashTable<NGEN_HASH_ARGS>::m_sHotEntries,
++ &decltype(NgenHashTable<NGEN_HASH_ARGS>::m_sHotEntries)::m_pEntries);
++ }
++
++ PTR_PersistedBucketList GetPersistedHotBuckets()
++ {
++ SUPPORTS_DAC;
++
++ return ReadPointerMaybeNull(this,
++ &NgenHashTable<NGEN_HASH_ARGS>::m_sHotEntries,
++ &decltype(NgenHashTable<NGEN_HASH_ARGS>::m_sHotEntries)::m_pBuckets);
++ }
++
++ APTR_PersistedEntry GetPersistedColdEntries()
++ {
++ SUPPORTS_DAC;
++
++ return ReadPointerMaybeNull(this,
++ &NgenHashTable<NGEN_HASH_ARGS>::m_sColdEntries,
++ &decltype(NgenHashTable<NGEN_HASH_ARGS>::m_sColdEntries)::m_pEntries);
++ }
++
++ PTR_PersistedBucketList GetPersistedColdBuckets()
++ {
++ SUPPORTS_DAC;
++
++ return ReadPointerMaybeNull(this,
++ &NgenHashTable<NGEN_HASH_ARGS>::m_sColdEntries,
++ &decltype(NgenHashTable<NGEN_HASH_ARGS>::m_sColdEntries)::m_pBuckets);
++ }
++
++#ifdef DACCESS_COMPILE
++ APTR_PersistedEntry GetPersistedEntries(DPTR(PersistedEntries) pEntries)
++ {
++ SUPPORTS_DAC;
++
++ TADDR hotEntriesAddr = dac_cast<TADDR>(this) + offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries);
++ TADDR coldEntriesAddr = dac_cast<TADDR>(this) + offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries);
++
++ if (hotEntriesAddr == dac_cast<TADDR>(pEntries))
++ {
++ return GetPersistedHotEntries();
++ }
++ else
++ {
++ _ASSERTE(hotEntriesAddr == dac_cast<TADDR>(pEntries));
++
++ return GetPersistedColdEntries();
++ }
++ }
++
++ PTR_PersistedBucketList GetPersistedBuckets(DPTR(PersistedEntries) pEntries)
++ {
++ SUPPORTS_DAC;
++
++ TADDR hotEntriesAddr = dac_cast<TADDR>(this) + offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries);
++ TADDR coldEntriesAddr = dac_cast<TADDR>(this) + offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries);
++
++ if (hotEntriesAddr == dac_cast<TADDR>(pEntries))
++ {
++ return GetPersistedHotBuckets();
++ }
++ else
++ {
++ _ASSERTE(hotEntriesAddr == dac_cast<TADDR>(pEntries));
++
++ return GetPersistedColdBuckets();
++ }
++ }
++#endif // DACCESS_COMPILE
++#endif // FEATURE_PREJIT
++
+ // Loader heap provided at construction time. May be NULL (in which case m_pModule must *not* be NULL).
+ LoaderHeap *m_pHeap;
+
+ // Fields related to the runtime (volatile or warm) part of the hash.
+- DPTR(PTR_VolatileEntry) m_pWarmBuckets; // Pointer to a simple bucket list (array of VolatileEntry pointers)
+- DWORD m_cWarmBuckets; // Count of buckets in the above array (always non-zero)
+- DWORD m_cWarmEntries; // Count of elements in the warm section of the hash
++ RelativePointer<DPTR(PTR_VolatileEntry)> m_pWarmBuckets; // Pointer to a simple bucket list (array of VolatileEntry pointers)
++ DWORD m_cWarmBuckets; // Count of buckets in the above array (always non-zero)
++ DWORD m_cWarmEntries; // Count of elements in the warm section of the hash
+
+ #ifdef FEATURE_PREJIT
+ PersistedEntries m_sHotEntries; // Hot persisted hash entries (if any)
+diff --git a/src/vm/ngenhash.inl b/src/vm/ngenhash.inl
+index 070b1da..6e55345 100644
+--- a/src/vm/ngenhash.inl
++++ b/src/vm/ngenhash.inl
+@@ -48,14 +48,14 @@ NgenHashTable<NGEN_HASH_ARGS>::NgenHashTable(Module *pModule, LoaderHeap *pHeap,
+ // At least one of module or heap must have been specified or we won't know how to allocate entries and
+ // buckets.
+ _ASSERTE(pModule || pHeap);
+- m_pModule = pModule;
++ m_pModule.SetValueMaybeNull(pModule);
+ m_pHeap = pHeap;
+
+ S_SIZE_T cbBuckets = S_SIZE_T(sizeof(VolatileEntry*)) * S_SIZE_T(cInitialBuckets);
+
+ m_cWarmEntries = 0;
+ m_cWarmBuckets = cInitialBuckets;
+- m_pWarmBuckets = (PTR_VolatileEntry*)(void*)GetHeap()->AllocMem(cbBuckets);
++ m_pWarmBuckets.SetValue((PTR_VolatileEntry*)(void*)GetHeap()->AllocMem(cbBuckets));
+
+ // Note: Memory allocated on loader heap is zero filled
+ // memset(m_pWarmBuckets, 0, sizeof(VolatileEntry*) * cInitialBuckets);
+@@ -83,7 +83,7 @@ VALUE *NgenHashTable<NGEN_HASH_ARGS>::BaseAllocateEntry(AllocMemTracker *pamTrac
+
+ // Faults are forbidden in BaseInsertEntry. Make the table writeable now that the faults are still allowed.
+ EnsureWritablePages(this);
+- EnsureWritablePages(this->m_pWarmBuckets, m_cWarmBuckets * sizeof(PTR_VolatileEntry));
++ EnsureWritablePages(this->GetWarmBuckets(), m_cWarmBuckets * sizeof(PTR_VolatileEntry));
+
+ TaggedMemAllocPtr pMemory = GetHeap()->AllocMem(S_SIZE_T(sizeof(VolatileEntry)));
+
+@@ -119,8 +119,8 @@ LoaderHeap *NgenHashTable<NGEN_HASH_ARGS>::GetHeap()
+
+ // If not specified then we fall back to the owning module's heap (a module must have been specified in
+ // this case).
+- _ASSERTE(m_pModule != NULL);
+- return m_pModule->GetAssembly()->GetLowFrequencyHeap();
++ _ASSERTE(!m_pModule.IsNull());
++ return GetModule()->GetAssembly()->GetLowFrequencyHeap();
+ }
+
+ // Insert an entry previously allocated via BaseAllocateEntry (you cannot allocated entries in any other
+@@ -154,13 +154,13 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseInsertEntry(NgenHashValue iHash, VALUE *
+ DWORD dwBucket = iHash % m_cWarmBuckets;
+
+ // Prepare to link the new entry at the head of the bucket chain.
+- pVolatileEntry->m_pNextEntry = m_pWarmBuckets[dwBucket];
++ pVolatileEntry->m_pNextEntry = (GetWarmBuckets())[dwBucket];
+
+ // Make sure that all writes to the entry are visible before publishing the entry.
+ MemoryBarrier();
+
+ // Publish the entry by pointing the bucket at it.
+- m_pWarmBuckets[dwBucket] = pVolatileEntry;
++ (GetWarmBuckets())[dwBucket] = pVolatileEntry;
+
+ m_cWarmEntries++;
+
+@@ -205,7 +205,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::GrowTable()
+ // again.
+ for (DWORD i = 0; i < m_cWarmBuckets; i++)
+ {
+- PTR_VolatileEntry pEntry = m_pWarmBuckets[i];
++ PTR_VolatileEntry pEntry = (GetWarmBuckets())[i];
+
+ // Try to lock out readers from scanning this bucket. This is obviously a race which may fail.
+ // However, note that it's OK if somebody is already in the list - it's OK if we mess with the bucket
+@@ -213,7 +213,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::GrowTable()
+ // comparison even if it wanders aimlessly amongst entries while we are rearranging things. If a
+ // lookup finds a match under those circumstances, great. If not, they will have to acquire the lock &
+ // try again anyway.
+- m_pWarmBuckets[i] = NULL;
++ (GetWarmBuckets())[i] = NULL;
+
+ while (pEntry != NULL)
+ {
+@@ -229,7 +229,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::GrowTable()
+
+ // Make sure that all writes are visible before publishing the new array.
+ MemoryBarrier();
+- m_pWarmBuckets = pNewBuckets;
++ m_pWarmBuckets.SetValue(pNewBuckets);
+
+ // The new number of buckets has to be published last (prior to this readers may miscalculate a bucket
+ // index, but the result will always be in range and they'll simply walk the wrong chain and get a miss,
+@@ -697,7 +697,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ // Persisted hashes had better have supplied an owning module at creation time (otherwise we won't know
+ // how to find a loader heap for further allocations at runtime: we don't know how to serialize a loader
+ // heap pointer).
+- _ASSERTE(m_pModule != NULL);
++ _ASSERTE(!m_pModule.IsNull());
+
+ // We can only save once during ngen so the hot and cold sections of the hash cannot have been populated
+ // yet.
+@@ -732,7 +732,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ for (i = 0; i < m_cWarmBuckets; i++)
+ {
+ // Iterate through the chain of warm entries for this bucket.
+- VolatileEntry *pOldEntry = m_pWarmBuckets[i];
++ VolatileEntry *pOldEntry = (GetWarmBuckets())[i];
+ while (pOldEntry)
+ {
+ // Is the current entry being saved into the image?
+@@ -842,18 +842,18 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ {
+ m_sHotEntries.m_cEntries = cHotEntries;
+ m_sHotEntries.m_cBuckets = cHotBuckets;
+- m_sHotEntries.m_pEntries = new PersistedEntry[cHotEntries];
+- m_sHotEntries.m_pBuckets = PersistedBucketList::CreateList(cHotBuckets, cHotEntries, cMaxHotChain);
+- memset(m_sHotEntries.m_pEntries, 0, cHotEntries * sizeof(PersistedEntry)); // NGen determinism
++ m_sHotEntries.m_pEntries.SetValue(new PersistedEntry[cHotEntries]);
++ m_sHotEntries.m_pBuckets.SetValue(PersistedBucketList::CreateList(cHotBuckets, cHotEntries, cMaxHotChain));
++ memset(GetPersistedHotEntries(), 0, cHotEntries * sizeof(PersistedEntry)); // NGen determinism
+ }
+
+ if (cColdEntries)
+ {
+ m_sColdEntries.m_cEntries = cColdEntries;
+ m_sColdEntries.m_cBuckets = cColdBuckets;
+- m_sColdEntries.m_pEntries = new PersistedEntry[cColdEntries];
+- m_sColdEntries.m_pBuckets = PersistedBucketList::CreateList(cColdBuckets, cColdEntries, cMaxColdChain);
+- memset(m_sColdEntries.m_pEntries, 0, cColdEntries * sizeof(PersistedEntry)); // NGen determinism
++ m_sColdEntries.m_pEntries.SetValue(new PersistedEntry[cColdEntries]);
++ m_sColdEntries.m_pBuckets.SetValue(PersistedBucketList::CreateList(cColdBuckets, cColdEntries, cMaxColdChain));
++ memset(GetPersistedColdEntries(), 0, cColdEntries * sizeof(PersistedEntry)); // NGen determinism
+ }
+
+ //
+@@ -871,7 +871,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ DWORD dwNextId = 0; // This represents the index of the next entry to start a bucket chain
+ for (i = 0; i < cHotBuckets; i++)
+ {
+- m_sHotEntries.m_pBuckets->SetBucket(i, dwNextId, pHotBucketSizes[i]);
++ m_sHotEntries.m_pBuckets.GetValue()->SetBucket(i, dwNextId, pHotBucketSizes[i]);
+ dwNextId += pHotBucketSizes[i];
+ }
+ _ASSERTE(dwNextId == m_sHotEntries.m_cEntries);
+@@ -879,7 +879,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ dwNextId = 0; // Reset index for the cold entries (remember they have their own table of entries)
+ for (i = 0; i < cColdBuckets; i++)
+ {
+- m_sColdEntries.m_pBuckets->SetBucket(i, dwNextId, pColdBucketSizes[i]);
++ m_sColdEntries.m_pBuckets.GetValue()->SetBucket(i, dwNextId, pColdBucketSizes[i]);
+ dwNextId += pColdBucketSizes[i];
+ }
+ _ASSERTE(dwNextId == m_sColdEntries.m_cEntries);
+@@ -897,15 +897,16 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ typename EntryMappingTable::Entry *pMapEntry = &sEntryMap.m_pEntries[i];
+
+ // Entry block depends on whether this entry is hot or cold.
+- PersistedEntries *pEntries = pMapEntry->m_fHot ? &m_sHotEntries : &m_sColdEntries;
++ APTR_PersistedEntry pPersistedEntries = pMapEntry->m_fHot ? GetPersistedHotEntries() : GetPersistedColdEntries();
++ PTR_PersistedBucketList pPersistedBucketsList = pMapEntry->m_fHot ? GetPersistedHotBuckets() : GetPersistedColdBuckets();
+
+ // We already know the new bucket this entry will go into. Retrieve the index of the first entry in
+ // that bucket chain.
+- DWORD dwBaseChainIndex = pEntries->m_pBuckets->GetInitialEntry(pMapEntry->m_dwNewBucket);
++ DWORD dwBaseChainIndex = pPersistedBucketsList->GetInitialEntry(pMapEntry->m_dwNewBucket);
+
+ // This entry will be located at some offset from the index above (we calculated this ordinal in phase
+ // 2).
+- PersistedEntry *pNewEntry = &pEntries->m_pEntries[dwBaseChainIndex + pMapEntry->m_dwChainOrdinal];
++ PersistedEntry *pNewEntry = &pPersistedEntries[dwBaseChainIndex + pMapEntry->m_dwChainOrdinal];
+
+ // Record the address of the embedded sub-class hash entry in the map entry (sub-classes will use this
+ // info to map old entry addresses to their new locations).
+@@ -931,7 +932,11 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+
+ bool fAllEntriesImmutable = true;
+ for (i = 0; i < sEntryMap.m_cEntries; i++)
+- if (!DOWNCALL(SaveEntry)(pImage, pProfileData, sEntryMap.m_pEntries[i].m_pOldEntry, sEntryMap.m_pEntries[i].m_pNewEntry, &sEntryMap))
++ if (!DOWNCALL(SaveEntry)(pImage,
++ pProfileData,
++ sEntryMap.m_pEntries[i].m_pOldEntry,
++ sEntryMap.m_pEntries[i].m_pNewEntry,
++ &sEntryMap))
+ fAllEntriesImmutable = false;
+
+ // We're mostly done. Now just some cleanup and the actual DataImage storage operations.
+@@ -943,24 +948,24 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ // If there are any hot entries store the entry array and bucket list.
+ if (cHotEntries)
+ {
+- pImage->StoreStructure(m_sHotEntries.m_pEntries,
++ pImage->StoreStructure(GetPersistedHotEntries(),
+ static_cast<ULONG>(sizeof(PersistedEntry) * cHotEntries),
+ fAllEntriesImmutable ? DataImage::ITEM_NGEN_HASH_ENTRIES_RO_HOT : DataImage::ITEM_NGEN_HASH_ENTRIES_HOT);
+
+- pImage->StoreStructure(m_sHotEntries.m_pBuckets,
+- static_cast<ULONG>(m_sHotEntries.m_pBuckets->GetSize(m_sHotEntries.m_cBuckets)),
++ pImage->StoreStructure(GetPersistedHotBuckets(),
++ static_cast<ULONG>(m_sHotEntries.m_pBuckets.GetValue()->GetSize(m_sHotEntries.m_cBuckets)),
+ DataImage::ITEM_NGEN_HASH_BUCKETLIST_HOT);
+ }
+
+ // If there are any cold entries store the entry array and bucket list.
+ if (cColdEntries)
+ {
+- pImage->StoreStructure(m_sColdEntries.m_pEntries,
++ pImage->StoreStructure(GetPersistedColdEntries(),
+ static_cast<ULONG>(sizeof(PersistedEntry) * cColdEntries),
+ fAllEntriesImmutable ? DataImage::ITEM_NGEN_HASH_ENTRIES_RO_COLD : DataImage::ITEM_NGEN_HASH_ENTRIES_COLD);
+
+- pImage->StoreStructure(m_sColdEntries.m_pBuckets,
+- static_cast<ULONG>(m_sColdEntries.m_pBuckets->GetSize(m_sColdEntries.m_cBuckets)),
++ pImage->StoreStructure(GetPersistedColdBuckets(),
++ static_cast<ULONG>(GetPersistedColdBuckets()->GetSize(m_sColdEntries.m_cBuckets)),
+ DataImage::ITEM_NGEN_HASH_BUCKETLIST_COLD);
+ }
+
+@@ -987,7 +992,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ DWORD cNewWarmBuckets = min(m_cInitialBuckets, 11);
+
+ // Create the ngen version of the warm buckets.
+- pImage->StoreStructure(m_pWarmBuckets,
++ pImage->StoreStructure(GetWarmBuckets(),
+ cNewWarmBuckets * sizeof(VolatileEntry*),
+ DataImage::ITEM_NGEN_HASH_HOT);
+
+@@ -997,7 +1002,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ pNewTable->m_cWarmBuckets = cNewWarmBuckets;
+
+ // Zero-out the ngen version of the warm buckets.
+- VolatileEntry *pNewBuckets = (VolatileEntry*)pImage->GetImagePointer(m_pWarmBuckets);
++ VolatileEntry *pNewBuckets = (VolatileEntry*)pImage->GetImagePointer(GetWarmBuckets());
+ memset(pNewBuckets, 0, cNewWarmBuckets * sizeof(VolatileEntry*));
+ }
+
+@@ -1011,7 +1016,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseFixup(DataImage *pImage)
+ DWORD i;
+
+ // Fixup the module pointer.
+- pImage->FixupPointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pModule));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pModule));
+
+ // Throw away the heap pointer, we can't serialize it into the image. We'll rely on the loader heap
+ // associated with the module above at runtime.
+@@ -1023,29 +1028,27 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseFixup(DataImage *pImage)
+ // be relative to the base of this array.
+
+ for (i = 0; i < m_sHotEntries.m_cEntries; i++)
+- DOWNCALL(FixupEntry)(pImage, &m_sHotEntries.m_pEntries[i].m_sValue, m_sHotEntries.m_pEntries, i * sizeof(PersistedEntry));
++ DOWNCALL(FixupEntry)(pImage,
++ &(GetPersistedHotEntries())[i].m_sValue,
++ GetPersistedHotEntries(),
++ i * sizeof(PersistedEntry));
+
+ for (i = 0; i < m_sColdEntries.m_cEntries; i++)
+- DOWNCALL(FixupEntry)(pImage, &m_sColdEntries.m_pEntries[i].m_sValue, m_sColdEntries.m_pEntries, i * sizeof(PersistedEntry));
++ DOWNCALL(FixupEntry)(pImage,
++ &(GetPersistedColdEntries())[i].m_sValue,
++ GetPersistedColdEntries(),
++ i * sizeof(PersistedEntry));
+
+ // Fixup the warm (empty) bucket list.
+- pImage->FixupPointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pWarmBuckets));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pWarmBuckets));
+
+ // Fixup the hot entry array and bucket list.
+- pImage->FixupPointerField(this,
+- offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) +
+- offsetof(PersistedEntries, m_pEntries));
+- pImage->FixupPointerField(this,
+- offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) +
+- offsetof(PersistedEntries, m_pBuckets));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) + offsetof(PersistedEntries, m_pEntries));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) + offsetof(PersistedEntries, m_pBuckets));
+
+ // Fixup the cold entry array and bucket list.
+- pImage->FixupPointerField(this,
+- offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) +
+- offsetof(PersistedEntries, m_pEntries));
+- pImage->FixupPointerField(this,
+- offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) +
+- offsetof(PersistedEntries, m_pBuckets));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) + offsetof(PersistedEntries, m_pEntries));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) + offsetof(PersistedEntries, m_pBuckets));
+ }
+ #endif // !DACCESS_COMPILE
+ #endif // FEATURE_PREJIT
+@@ -1064,14 +1067,14 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseEnumMemoryRegions(CLRDataEnumMemoryFlags
+ DacEnumMemoryRegion(dac_cast<TADDR>(this), sizeof(FINAL_CLASS));
+
+ // Save the warm bucket list.
+- DacEnumMemoryRegion(dac_cast<TADDR>(m_pWarmBuckets), m_cWarmBuckets * sizeof(VolatileEntry*));
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetWarmBuckets()), m_cWarmBuckets * sizeof(VolatileEntry*));
+
+ // Save all the warm entries.
+- if (m_pWarmBuckets.IsValid())
++ if (GetWarmBuckets().IsValid())
+ {
+ for (DWORD i = 0; i < m_cWarmBuckets; i++)
+ {
+- PTR_VolatileEntry pEntry = m_pWarmBuckets[i];
++ PTR_VolatileEntry pEntry = (GetWarmBuckets())[i];
+ while (pEntry.IsValid())
+ {
+ pEntry.EnumMem();
+@@ -1088,25 +1091,35 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseEnumMemoryRegions(CLRDataEnumMemoryFlags
+ // Save hot buckets and entries.
+ if (m_sHotEntries.m_cEntries > 0)
+ {
+- DacEnumMemoryRegion(dac_cast<TADDR>(m_sHotEntries.m_pEntries), m_sHotEntries.m_cEntries * sizeof(PersistedEntry));
+- DacEnumMemoryRegion(dac_cast<TADDR>(m_sHotEntries.m_pBuckets), m_sHotEntries.m_pBuckets->GetSize(m_sHotEntries.m_cBuckets));
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedHotEntries()),
++ m_sHotEntries.m_cEntries * sizeof(PersistedEntry));
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedHotBuckets()),
++ GetPersistedHotBuckets()->GetSize(m_sHotEntries.m_cBuckets));
+ for (DWORD i = 0; i < m_sHotEntries.m_cEntries; i++)
+- DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(dac_cast<PTR_PersistedEntry>(&m_sHotEntries.m_pEntries[i])), flags);
++ {
++ PTR_PersistedEntry pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedHotEntries())[i]);
++ DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(pEntry), flags);
++ }
+ }
+
+ // Save cold buckets and entries.
+ if (m_sColdEntries.m_cEntries > 0)
+ {
+- DacEnumMemoryRegion(dac_cast<TADDR>(m_sColdEntries.m_pEntries), m_sColdEntries.m_cEntries * sizeof(PersistedEntry));
+- DacEnumMemoryRegion(dac_cast<TADDR>(m_sColdEntries.m_pBuckets), m_sColdEntries.m_pBuckets->GetSize(m_sColdEntries.m_cBuckets));
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedColdEntries()),
++ m_sColdEntries.m_cEntries * sizeof(PersistedEntry));
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedColdBuckets()),
++ GetPersistedColdBuckets()->GetSize(m_sColdEntries.m_cBuckets));
+ for (DWORD i = 0; i < m_sColdEntries.m_cEntries; i++)
+- DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(dac_cast<PTR_PersistedEntry>(&m_sColdEntries.m_pEntries[i])), flags);
++ {
++ PTR_PersistedEntry pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedColdEntries())[i]);
++ DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(pEntry), flags);
++ }
+ }
+ #endif // FEATURE_PREJIT
+
+ // Save the module if present.
+- if (m_pModule.IsValid())
+- m_pModule->EnumMemoryRegions(flags, true);
++ if (GetModule().IsValid())
++ GetModule()->EnumMemoryRegions(flags, true);
+ }
+ #endif // DACCESS_COMPILE
+
+@@ -1136,13 +1149,31 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::FindPersistedEntryByHash(PersistedEnt
+ // Since there is at least one entry there must be at least one bucket.
+ _ASSERTE(pEntries->m_cBuckets > 0);
+
++ DWORD eType = (pEntries == &m_sHotEntries ? Hot : Cold);
++
+ // Get the first entry and count of entries for the bucket which contains all entries with the given hash
+ // code.
+ DWORD dwEntryIndex, cEntriesLeft;
+- pEntries->m_pBuckets->GetBucket(iHash % pEntries->m_cBuckets, &dwEntryIndex, &cEntriesLeft);
++ if (eType == Hot)
++ {
++ GetPersistedHotBuckets()->GetBucket(iHash % pEntries->m_cBuckets, &dwEntryIndex, &cEntriesLeft);
++ }
++ else
++ {
++ GetPersistedColdBuckets()->GetBucket(iHash % pEntries->m_cBuckets, &dwEntryIndex, &cEntriesLeft);
++ }
+
+ // Determine the address of the first entry in the chain by indexing into the entry array.
+- PTR_PersistedEntry pEntry = dac_cast<PTR_PersistedEntry>(&pEntries->m_pEntries[dwEntryIndex]);
++ PTR_PersistedEntry pEntry;
++
++ if (eType == Hot)
++ {
++ pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedHotEntries())[dwEntryIndex]);
++ }
++ else
++ {
++ pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedColdEntries())[dwEntryIndex]);
++ }
+
+ // Iterate while we've still got entries left to check in this chain.
+ while (cEntriesLeft--)
+@@ -1154,7 +1185,7 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::FindPersistedEntryByHash(PersistedEnt
+ // Record our current search state into the provided context so that a subsequent call to
+ // BaseFindNextEntryByHash can pick up the search where it left off.
+ pContext->m_pEntry = dac_cast<TADDR>(pEntry);
+- pContext->m_eType = pEntries == &m_sHotEntries ? Hot : Cold;
++ pContext->m_eType = eType;
+ pContext->m_cRemainingEntries = cEntriesLeft;
+
+ // Return the address of the sub-classes' embedded entry structure.
+@@ -1223,7 +1254,7 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::FindVolatileEntryByHash(NgenHashValue
+ _ASSERTE(m_cWarmBuckets > 0);
+
+ // Point at the first entry in the bucket chain which would contain any entries with the given hash code.
+- PTR_VolatileEntry pEntry = m_pWarmBuckets[iHash % m_cWarmBuckets];
++ PTR_VolatileEntry pEntry = (GetWarmBuckets())[iHash % m_cWarmBuckets];
+
+ // Walk the bucket chain one entry at a time.
+ while (pEntry)
+@@ -1257,7 +1288,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseInitIterator(BaseIterator *pIterator)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+- pIterator->m_pTable = this;
++ pIterator->m_pTable = dac_cast<DPTR(NgenHashTable<NGEN_HASH_ARGS>)>(this);
+ pIterator->m_pEntry = NULL;
+ #ifdef FEATURE_PREJIT
+ pIterator->m_eType = Hot;
+@@ -1299,7 +1330,7 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseIterator::Next()
+ if (m_pEntry == NULL)
+ {
+ // This is our first lookup in the hot section, return the first entry in the hot array.
+- m_pEntry = dac_cast<TADDR>(m_pTable->m_sHotEntries.m_pEntries);
++ m_pEntry = dac_cast<TADDR>(m_pTable->GetPersistedHotEntries());
+ }
+ else
+ {
+@@ -1329,7 +1360,7 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseIterator::Next()
+ {
+ // This is our first lookup in the warm section for a particular bucket, return the first
+ // entry in that bucket.
+- m_pEntry = dac_cast<TADDR>(m_pTable->m_pWarmBuckets[m_dwBucket]);
++ m_pEntry = dac_cast<TADDR>((m_pTable->GetWarmBuckets())[m_dwBucket]);
+ }
+ else
+ {
+@@ -1370,7 +1401,7 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseIterator::Next()
+ if (m_pEntry == NULL)
+ {
+ // This is our first lookup in the cold section, return the first entry in the cold array.
+- m_pEntry = dac_cast<TADDR>(m_pTable->m_sColdEntries.m_pEntries);
++ m_pEntry = dac_cast<TADDR>(m_pTable->GetPersistedColdEntries());
+ }
+ else
+ {
+@@ -1463,17 +1494,17 @@ void NgenHashEntryRef<NGEN_HASH_ARGS>::Fixup(DataImage *pImage, NgenHashTable<NG
+ BYTE *pLocationBase;
+ DWORD cbLocationOffset;
+
+- if (pLocation >= (BYTE*)pTable->m_sHotEntries.m_pEntries &&
+- pLocation < (BYTE*)(pTable->m_sHotEntries.m_pEntries + pTable->m_sHotEntries.m_cEntries))
++ if (pLocation >= (BYTE*)pTable->GetPersistedHotEntries() &&
++ pLocation < (BYTE*)(pTable->GetPersistedHotEntries() + pTable->m_sHotEntries.m_cEntries))
+ {
+ // The field is in a hot entry.
+- pLocationBase = (BYTE*)pTable->m_sHotEntries.m_pEntries;
++ pLocationBase = (BYTE*)pTable->GetPersistedHotEntries();
+ }
+- else if (pLocation >= (BYTE*)pTable->m_sColdEntries.m_pEntries &&
+- pLocation < (BYTE*)(pTable->m_sColdEntries.m_pEntries + pTable->m_sColdEntries.m_cEntries))
++ else if (pLocation >= (BYTE*)pTable->GetPersistedColdEntries() &&
++ pLocation < (BYTE*)(pTable->GetPersistedColdEntries() + pTable->m_sColdEntries.m_cEntries))
+ {
+ // The field is in a cold entry.
+- pLocationBase = (BYTE*)pTable->m_sColdEntries.m_pEntries;
++ pLocationBase = (BYTE*)pTable->GetPersistedColdEntries();
+ }
+ else
+ {
+@@ -1490,17 +1521,17 @@ void NgenHashEntryRef<NGEN_HASH_ARGS>::Fixup(DataImage *pImage, NgenHashTable<NG
+ BYTE *pTargetBase;
+ DWORD cbTargetOffset;
+
+- if (pTarget >= (BYTE*)pTable->m_sHotEntries.m_pEntries &&
+- pTarget < (BYTE*)(pTable->m_sHotEntries.m_pEntries + pTable->m_sHotEntries.m_cEntries))
++ if (pTarget >= (BYTE*)pTable->GetPersistedHotEntries() &&
++ pTarget < (BYTE*)(pTable->GetPersistedHotEntries() + pTable->m_sHotEntries.m_cEntries))
+ {
+ // The target is a hot entry.
+- pTargetBase = (BYTE*)pTable->m_sHotEntries.m_pEntries;
++ pTargetBase = (BYTE*)pTable->GetPersistedHotEntries();
+ }
+- else if (pTarget >= (BYTE*)pTable->m_sColdEntries.m_pEntries &&
+- pTarget < (BYTE*)(pTable->m_sColdEntries.m_pEntries + pTable->m_sColdEntries.m_cEntries))
++ else if (pTarget >= (BYTE*)pTable->GetPersistedColdEntries() &&
++ pTarget < (BYTE*)(pTable->GetPersistedColdEntries() + pTable->m_sColdEntries.m_cEntries))
+ {
+ // The target is a cold entry.
+- pTargetBase = (BYTE*)pTable->m_sColdEntries.m_pEntries;
++ pTargetBase = (BYTE*)pTable->GetPersistedColdEntries();
+ }
+ else
+ {
+diff --git a/src/vm/stubhelpers.cpp b/src/vm/stubhelpers.cpp
+index db593c6..36c6d43 100644
+--- a/src/vm/stubhelpers.cpp
++++ b/src/vm/stubhelpers.cpp
+@@ -96,7 +96,7 @@ MethodDesc *StubHelpers::ResolveInteropMethod(Object *pThisUNSAFE, MethodDesc *p
+ MethodTable *pMT = pThisUNSAFE->GetMethodTable();
+
+ _ASSERTE(pMT->IsDelegate());
+- return ((DelegateEEClass *)pMT->GetClass())->m_pInvokeMethod;
++ return ((DelegateEEClass *)pMT->GetClass())->GetInvokeMethod();
+ }
+ return pMD;
+ }
+@@ -1551,7 +1551,7 @@ FCIMPL3(SIZE_T, StubHelpers::ProfilerBeginTransitionCallback, SIZE_T pSecretPara
+ _ASSERTE(pMT->IsDelegate());
+
+ EEClass * pClass = pMT->GetClass();
+- pRealMD = ((DelegateEEClass*)pClass)->m_pInvokeMethod;
++ pRealMD = ((DelegateEEClass*)pClass)->GetInvokeMethod();
+ _ASSERTE(pRealMD);
+ }
+ }
+diff --git a/src/vm/typedesc.cpp b/src/vm/typedesc.cpp
+index 06170cb..6718068 100644
+--- a/src/vm/typedesc.cpp
++++ b/src/vm/typedesc.cpp
+@@ -1373,7 +1373,7 @@ void TypeVarTypeDesc::Fixup(DataImage *image)
+ STANDARD_VM_CONTRACT;
+
+ LOG((LF_ZAP, LL_INFO10000, " TypeVarTypeDesc::Fixup %x (%p)\n", GetToken(), this));
+- image->FixupPointerField(this, offsetof(TypeVarTypeDesc, m_pModule));
++ image->FixupRelativePointerField(this, offsetof(TypeVarTypeDesc, m_pModule));
+ image->ZeroField(this, offsetof(TypeVarTypeDesc, m_hExposedClassObject), sizeof(m_hExposedClassObject));
+
+ // We don't persist the constraints: instead, load them back on demand
+@@ -1394,10 +1394,10 @@ MethodDesc * TypeVarTypeDesc::LoadOwnerMethod()
+ }
+ CONTRACTL_END;
+
+- MethodDesc *pMD = m_pModule->LookupMethodDef(m_typeOrMethodDef);
++ MethodDesc *pMD = GetModule()->LookupMethodDef(m_typeOrMethodDef);
+ if (pMD == NULL)
+ {
+- pMD = MemberLoader::GetMethodDescFromMethodDef(m_pModule, m_typeOrMethodDef, FALSE);
++ pMD = MemberLoader::GetMethodDescFromMethodDef(GetModule(), m_typeOrMethodDef, FALSE);
+ }
+ return pMD;
+ }
+@@ -1414,10 +1414,10 @@ TypeHandle TypeVarTypeDesc::LoadOwnerType()
+ }
+ CONTRACTL_END;
+
+- TypeHandle genericType = m_pModule->LookupTypeDef(m_typeOrMethodDef);
++ TypeHandle genericType = GetModule()->LookupTypeDef(m_typeOrMethodDef);
+ if (genericType.IsNull())
+ {
+- genericType = ClassLoader::LoadTypeDefThrowing(m_pModule, m_typeOrMethodDef,
++ genericType = ClassLoader::LoadTypeDefThrowing(GetModule(), m_typeOrMethodDef,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+@@ -1506,7 +1506,7 @@ void TypeVarTypeDesc::LoadConstraints(ClassLoadLevel level /* = CLASS_LOADED */)
+ numConstraints = pInternalImport->EnumGetCount(&hEnum);
+ if (numConstraints != 0)
+ {
+- LoaderAllocator* pAllocator=m_pModule->GetLoaderAllocator();
++ LoaderAllocator* pAllocator = GetModule()->GetLoaderAllocator();
+ // If there is a single class constraint we put in in element 0 of the array
+ AllocMemHolder<TypeHandle> constraints
+ (pAllocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(numConstraints) * S_SIZE_T(sizeof(TypeHandle))));
+@@ -2434,9 +2434,11 @@ TypeVarTypeDesc::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+
+- if (m_pModule.IsValid())
++ PTR_TypeVarTypeDesc ptrThis(this);
++
++ if (GetModule().IsValid())
+ {
+- m_pModule->EnumMemoryRegions(flags, true);
++ GetModule()->EnumMemoryRegions(flags, true);
+ }
+
+ if (m_numConstraints != (DWORD)-1)
+diff --git a/src/vm/typedesc.h b/src/vm/typedesc.h
+index 4bc4978..a8b1c25 100644
+--- a/src/vm/typedesc.h
++++ b/src/vm/typedesc.h
+@@ -462,7 +462,7 @@ public:
+ }
+ CONTRACTL_END;
+
+- m_pModule = pModule;
++ m_pModule.SetValue(pModule);
+ m_typeOrMethodDef = typeOrMethodDef;
+ m_token = token;
+ m_index = index;
+@@ -479,7 +479,8 @@ public:
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+- return m_pModule;
++
++ return ReadPointer(this, &TypeVarTypeDesc::m_pModule);
+ }
+
+ unsigned int GetIndex()
+@@ -567,7 +568,7 @@ protected:
+ BOOL ConstrainedAsObjRefHelper();
+
+ // Module containing the generic definition, also the loader module for this type desc
+- PTR_Module m_pModule;
++ RelativePointer<PTR_Module> m_pModule;
+
+ // Declaring type or method
+ mdToken m_typeOrMethodDef;
+diff --git a/src/vm/typehash.cpp b/src/vm/typehash.cpp
+index b3de777..0d53a15 100644
+--- a/src/vm/typehash.cpp
++++ b/src/vm/typehash.cpp
+@@ -67,8 +67,8 @@ LoaderAllocator *EETypeHashTable::GetLoaderAllocator()
+ }
+ else
+ {
+- _ASSERTE(m_pModule != NULL);
+- return m_pModule->GetLoaderAllocator();
++ _ASSERTE(!m_pModule.IsNull());
++ return GetModule()->GetLoaderAllocator();
+ }
+ }
+
+@@ -417,7 +417,7 @@ EETypeHashEntry_t *EETypeHashTable::FindItem(TypeKey* pKey)
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+ Module *pDefiningModule;
+- PCCOR_SIGNATURE pSig = m_pModule->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
++ PCCOR_SIGNATURE pSig = GetModule()->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
+ if (pDefiningModule == NULL)
+ break;
+
+@@ -487,7 +487,8 @@ BOOL EETypeHashTable::CompareInstantiatedType(TypeHandle t, Module *pModule, mdT
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+ Module *pDefiningModule;
+- PCCOR_SIGNATURE pSig = m_pModule->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
++
++ PCCOR_SIGNATURE pSig = GetModule()->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
+
+ // First check that the modules for the generic type defs match
+ if (dac_cast<TADDR>(pDefiningModule) !=
+@@ -536,7 +537,7 @@ BOOL EETypeHashTable::CompareInstantiatedType(TypeHandle t, Module *pModule, mdT
+ DACCOP_IGNORE(CastOfMarshalledType, "Dual mode DAC problem, but since the size is the same, the cast is safe");
+ TADDR candidateArg = ((FixupPointer<TADDR> *)candidateInst.GetRawArgs())[i].GetValue();
+
+- if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, candidateArg, inst[i]))
++ if (!ZapSig::CompareTaggedPointerToTypeHandle(GetModule(), candidateArg, inst[i]))
+ {
+ return FALSE;
+ }
+@@ -578,7 +579,7 @@ BOOL EETypeHashTable::CompareFnPtrType(TypeHandle t, BYTE callConv, DWORD numArg
+ for (DWORD i = 0; i <= numArgs; i++)
+ {
+ TADDR candidateArg = retAndArgTypes2[i].AsTAddr();
+- if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, candidateArg, retAndArgTypes[i]))
++ if (!ZapSig::CompareTaggedPointerToTypeHandle(GetModule(), candidateArg, retAndArgTypes[i]))
+ {
+ return FALSE;
+ }
+@@ -647,7 +648,7 @@ VOID EETypeHashTable::InsertValue(TypeHandle data)
+ PRECONDITION(!data.IsEncodedFixup());
+ PRECONDITION(!data.IsGenericTypeDefinition()); // Generic type defs live in typedef table (availableClasses)
+ PRECONDITION(data.HasInstantiation() || data.HasTypeParam() || data.IsFnPtrType()); // It's an instantiated type or an array/ptr/byref type
+- PRECONDITION(!m_pModule || m_pModule->IsTenured()); // Destruct won't destruct m_pAvailableParamTypes for non-tenured modules - so make sure no one tries to insert one before the Module has been tenured
++ PRECONDITION(m_pModule.IsNull() || GetModule()->IsTenured()); // Destruct won't destruct m_pAvailableParamTypes for non-tenured modules - so make sure no one tries to insert one before the Module has been tenured
+ }
+ CONTRACTL_END
+
+@@ -673,7 +674,7 @@ void EETypeHashTable::Save(DataImage *image, Module *module, CorProfileData *pro
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+- PRECONDITION(image->GetModule() == m_pModule);
++ PRECONDITION(image->GetModule() == GetModule());
+ }
+ CONTRACTL_END;
+
+@@ -715,7 +716,7 @@ void EETypeHashTable::Save(DataImage *image, Module *module, CorProfileData *pro
+ {
+ if (flags & (1<<ReadTypeHashTable))
+ {
+- TypeHandle th = m_pModule->LoadIBCTypeHelper(pBlobSigEntry);
++ TypeHandle th = GetModule()->LoadIBCTypeHelper(pBlobSigEntry);
+ #if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ g_pConfig->DebugCheckAndForceIBCFailure(EEConfig::CallSite_8);
+ #endif
+@@ -798,14 +799,14 @@ void EETypeHashTable::FixupEntry(DataImage *pImage, EETypeHashEntry_t *pEntry, v
+ if (pType.IsTypeDesc())
+ {
+ pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(EETypeHashEntry_t, m_data),
+- pType.AsTypeDesc(), 2);
++ pType.AsTypeDesc(), 2, IMAGE_REL_BASED_RelativePointer);
+
+ pType.AsTypeDesc()->Fixup(pImage);
+ }
+ else
+ {
+ pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(EETypeHashEntry_t, m_data),
+- pType.AsMethodTable());
++ pType.AsMethodTable(), 0, IMAGE_REL_BASED_RelativePointer);
+
+ pType.AsMethodTable()->Fixup(pImage);
+ }
+@@ -838,17 +839,20 @@ TypeHandle EETypeHashEntry::GetTypeHandle()
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Remove any hot entry indicator bit that may have been set as the result of Ngen saving.
+- return TypeHandle::FromTAddr(m_data & ~0x1);
++ TADDR data = dac_cast<TADDR>(GetData());
++ return TypeHandle::FromTAddr(data & ~0x1);
+ }
+
++#ifndef DACCESS_COMPILE
+ void EETypeHashEntry::SetTypeHandle(TypeHandle handle)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // We plan to steal the low-order bit of the handle for ngen purposes.
+ _ASSERTE((handle.AsTAddr() & 0x1) == 0);
+- m_data = handle.AsTAddr();
++ m_data.SetValueMaybeNull(handle.AsPtr());
+ }
++#endif // !DACCESS_COMPILE
+
+ #ifdef FEATURE_PREJIT
+ bool EETypeHashEntry::IsHot()
+@@ -856,16 +860,21 @@ bool EETypeHashEntry::IsHot()
+ LIMITED_METHOD_CONTRACT;
+
+ // Low order bit of data field indicates a hot entry.
+- return (m_data & 1) != 0;
++ TADDR data = dac_cast<TADDR>(GetData());
++ return (data & 1) != 0;
+ }
+
++#ifndef DACCESS_COMPILE
+ void EETypeHashEntry::MarkAsHot()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Low order bit of data field indicates a hot entry.
+- m_data |= 0x1;
++ TADDR data = dac_cast<TADDR>(GetData());
++ data |= 0x1;
++ m_data.SetValueMaybeNull(dac_cast<PTR_VOID>(data));
+ }
++#endif // !DACCESS_COMPILE
+ #endif // FEATURE_PREJIT
+
+ #ifdef _MSC_VER
+diff --git a/src/vm/typehash.h b/src/vm/typehash.h
+index ce1f90b..c9b01d5 100644
+--- a/src/vm/typehash.h
++++ b/src/vm/typehash.h
+@@ -42,13 +42,27 @@ typedef struct EETypeHashEntry
+ void MarkAsHot();
+ #endif // FEATURE_PREJIT
+
++#ifndef DACCESS_COMPILE
++ EETypeHashEntry& operator=(const EETypeHashEntry& src)
++ {
++ m_data.SetValueMaybeNull(src.m_data.GetValueMaybeNull());
++
++ return *this;
++ }
++#endif // !DACCESS_COMPILE
++
++ PTR_VOID GetData()
++ {
++ return ReadPointerMaybeNull(this, &EETypeHashEntry::m_data);
++ }
++
+ private:
+ friend class EETypeHashTable;
+ #ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+ #endif
+
+- TADDR m_data;
++ RelativePointer<PTR_VOID> m_data;
+ } EETypeHashEntry_t;
+
+
+--
+2.7.4
+
diff --git a/packaging/0011-FIX-fix-No.2-incorrect-m_pBeginInvokeMethod.patch b/packaging/0011-FIX-fix-No.2-incorrect-m_pBeginInvokeMethod.patch
new file mode 100644
index 0000000000..4aae433b92
--- /dev/null
+++ b/packaging/0011-FIX-fix-No.2-incorrect-m_pBeginInvokeMethod.patch
@@ -0,0 +1,25 @@
+From 39faf6a5a0c6217aaed0dbccec40f5b43eb10529 Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Wed, 11 Apr 2018 16:34:59 +0300
+Subject: [PATCH 11/32] FIX: fix No.2, incorrect m_pBeginInvokeMethod
+
+---
+ src/vm/methodtablebuilder.cpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
+index 05ab438..a1e9095 100644
+--- a/src/vm/methodtablebuilder.cpp
++++ b/src/vm/methodtablebuilder.cpp
+@@ -5037,7 +5037,7 @@ void MethodTableBuilder::SetSecurityFlagsOnMethod(bmtRTMethod* pParentMethod,
+ if(!pNewMD->RequiresLinktimeCheck() && IsDelegate())
+ {
+ DelegateEEClass* pDelegateClass = (DelegateEEClass*)GetHalfBakedClass();
+- if(pNewMD == pDelegateClass->m_pBeginInvokeMethod)
++ if(pNewMD == pDelegateClass->m_pBeginInvokeMethod.GetValueMaybeNull())
+ {
+ pNewMD->SetRequiresLinktimeCheck();
+ pNewMD->SetRequiresLinkTimeCheckHostProtectionOnly(); // this link check is due to HP only
+--
+2.7.4
+
diff --git a/packaging/0012-Replace-array-type-handle-with-method-table-in-argum.patch b/packaging/0012-Replace-array-type-handle-with-method-table-in-argum.patch
new file mode 100644
index 0000000000..348709c38e
--- /dev/null
+++ b/packaging/0012-Replace-array-type-handle-with-method-table-in-argum.patch
@@ -0,0 +1,1214 @@
+From a0dc56413e9e5c61c0f6617e1ef091966b2f7a1d Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <ruben-ayrapetyan@users.noreply.github.com>
+Date: Tue, 27 Jun 2017 00:18:19 +0300
+Subject: [PATCH 12/32] Replace array type handle with method table in
+ arguments of array allocation helpers (#12369)
+
+* Remove direct usage of type handle in JIT_NewArr1, with except of retrieving template method table.
+
+* Assert that array type descriptor is loaded when array object's method table is set.
+
+* Pass template method tables instead of array type descriptors to array allocation helpers.
+---
+ src/vm/amd64/JitHelpers_InlineGetThread.asm | 39 ++++------------
+ src/vm/amd64/JitHelpers_Slow.asm | 69 ++++++++---------------------
+ src/vm/amd64/asmconstants.h | 4 --
+ src/vm/arm/asmconstants.h | 3 --
+ src/vm/arm/patchedcode.asm | 42 ++++--------------
+ src/vm/arm/stubs.cpp | 4 +-
+ src/vm/compile.cpp | 8 ++++
+ src/vm/gchelpers.cpp | 50 ++++++++++++++-------
+ src/vm/gchelpers.h | 8 ++--
+ src/vm/i386/jitinterfacex86.cpp | 24 +++-------
+ src/vm/interpreter.cpp | 7 ++-
+ src/vm/jithelpers.cpp | 52 +++++++++++-----------
+ src/vm/jitinterface.cpp | 12 ++++-
+ src/vm/jitinterface.h | 10 ++---
+ src/vm/jitinterfacegen.cpp | 12 ++---
+ src/vm/methodtable.cpp | 13 ++++++
+ src/vm/object.cpp | 18 ++++++++
+ src/vm/object.h | 50 ++++++++++++++++++---
+ src/vm/object.inl | 27 +++++++++++
+ src/vm/prestub.cpp | 4 +-
+ 20 files changed, 245 insertions(+), 211 deletions(-)
+
+diff --git a/src/vm/amd64/JitHelpers_InlineGetThread.asm b/src/vm/amd64/JitHelpers_InlineGetThread.asm
+index 700c3b3..022ec67 100644
+--- a/src/vm/amd64/JitHelpers_InlineGetThread.asm
++++ b/src/vm/amd64/JitHelpers_InlineGetThread.asm
+@@ -147,15 +147,6 @@ align 16
+ jmp JIT_Box
+ NESTED_END JIT_BoxFastMP_InlineGetThread, _TEXT
+
+-FIX_INDIRECTION macro Reg
+-ifdef FEATURE_PREJIT
+- test Reg, 1
+- jz @F
+- mov Reg, [Reg-1]
+- @@:
+-endif
+-endm
+-
+ LEAF_ENTRY AllocateStringFastMP_InlineGetThread, _TEXT
+ ; We were passed the number of characters in ECX
+
+@@ -203,10 +194,9 @@ endif ; _DEBUG
+ jmp FramedAllocateString
+ LEAF_END AllocateStringFastMP_InlineGetThread, _TEXT
+
+-; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++; HCIMPL2(Object*, JIT_NewArr1VC_MP_InlineGetThread, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ LEAF_ENTRY JIT_NewArr1VC_MP_InlineGetThread, _TEXT
+- ; We were passed a type descriptor in RCX, which contains the (shared)
+- ; array method table and the element type.
++ ; We were passed a (shared) method table in RCX, which contains the element type.
+
+ ; The element count is in RDX
+
+@@ -223,17 +213,12 @@ LEAF_ENTRY JIT_NewArr1VC_MP_InlineGetThread, _TEXT
+
+ ; In both cases we do a final overflow check after adding to the alloc_ptr.
+
+- ; we need to load the true method table from the type desc
+- mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+-
+- FIX_INDIRECTION r9
+-
+ cmp rdx, (65535 - 256)
+ jae OversizedArray
+
+- movzx r8d, word ptr [r9 + OFFSETOF__MethodTable__m_dwFlags] ; component size is low 16 bits
++ movzx r8d, word ptr [rcx + OFFSETOF__MethodTable__m_dwFlags] ; component size is low 16 bits
+ imul r8d, edx
+- add r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
++ add r8d, dword ptr [rcx + OFFSET__MethodTable__m_BaseSize]
+
+ ; round the size to a multiple of 8
+
+@@ -252,7 +237,7 @@ LEAF_ENTRY JIT_NewArr1VC_MP_InlineGetThread, _TEXT
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+- mov [rax], r9
++ mov [rax], rcx
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+
+@@ -268,10 +253,9 @@ endif ; _DEBUG
+ LEAF_END JIT_NewArr1VC_MP_InlineGetThread, _TEXT
+
+
+-; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++; HCIMPL2(Object*, JIT_NewArr1OBJ_MP_InlineGetThread, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ LEAF_ENTRY JIT_NewArr1OBJ_MP_InlineGetThread, _TEXT
+- ; We were passed a type descriptor in RCX, which contains the (shared)
+- ; array method table and the element type.
++ ; We were passed a (shared) method table in RCX, which contains the element type.
+
+ ; The element count is in RDX
+
+@@ -285,16 +269,11 @@ LEAF_ENTRY JIT_NewArr1OBJ_MP_InlineGetThread, _TEXT
+ cmp rdx, (ASM_LARGE_OBJECT_SIZE - 256)/8 ; sizeof(void*)
+ jae OversizedArray
+
+- ; we need to load the true method table from the type desc
+- mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+-
+- FIX_INDIRECTION r9
+-
+ ; In this case we know the element size is sizeof(void *), or 8 for x64
+ ; This helps us in two ways - we can shift instead of multiplying, and
+ ; there's no need to align the size either
+
+- mov r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
++ mov r8d, dword ptr [rcx + OFFSET__MethodTable__m_BaseSize]
+ lea r8d, [r8d + edx * 8]
+
+ ; No need for rounding in this case - element size is 8, and m_BaseSize is guaranteed
+@@ -310,7 +289,7 @@ LEAF_ENTRY JIT_NewArr1OBJ_MP_InlineGetThread, _TEXT
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+- mov [rax], r9
++ mov [rax], rcx
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+
+diff --git a/src/vm/amd64/JitHelpers_Slow.asm b/src/vm/amd64/JitHelpers_Slow.asm
+index 293e447..448bcb2 100644
+--- a/src/vm/amd64/JitHelpers_Slow.asm
++++ b/src/vm/amd64/JitHelpers_Slow.asm
+@@ -321,22 +321,12 @@ endif ; _DEBUG
+ jmp FramedAllocateString
+ NESTED_END AllocateStringFastMP, _TEXT
+
+-FIX_INDIRECTION macro Reg
+-ifdef FEATURE_PREJIT
+- test Reg, 1
+- jz @F
+- mov Reg, [Reg-1]
+- @@:
+-endif
+-endm
+-
+-; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++; HCIMPL2(Object*, JIT_NewArr1VC_MP, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ NESTED_ENTRY JIT_NewArr1VC_MP, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+- ; We were passed a type descriptor in RCX, which contains the (shared)
+- ; array method table and the element type.
++ ; We were passed a (shared) method table in RCX, which contains the element type.
+
+ ; The element count is in RDX
+
+@@ -356,17 +346,12 @@ NESTED_ENTRY JIT_NewArr1VC_MP, _TEXT
+ CALL_GETTHREAD
+ mov r11, rax
+
+- ; we need to load the true method table from the type desc
+- mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+-
+- FIX_INDIRECTION r9
+-
+ cmp rdx, (65535 - 256)
+ jae OversizedArray
+
+- movzx r8d, word ptr [r9 + OFFSETOF__MethodTable__m_dwFlags] ; component size is low 16 bits
++ movzx r8d, word ptr [rcx + OFFSETOF__MethodTable__m_dwFlags] ; component size is low 16 bits
+ imul r8d, edx ; signed mul, but won't overflow due to length restriction above
+- add r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
++ add r8d, dword ptr [rcx + OFFSET__MethodTable__m_BaseSize]
+
+ ; round the size to a multiple of 8
+
+@@ -383,7 +368,7 @@ NESTED_ENTRY JIT_NewArr1VC_MP, _TEXT
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+- mov [rax], r9
++ mov [rax], rcx
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+
+@@ -401,13 +386,12 @@ endif ; _DEBUG
+ NESTED_END JIT_NewArr1VC_MP, _TEXT
+
+
+-; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++; HCIMPL2(Object*, JIT_NewArr1OBJ_MP, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ NESTED_ENTRY JIT_NewArr1OBJ_MP, _TEXT
+ alloc_stack MIN_SIZE
+ END_PROLOGUE
+
+- ; We were passed a type descriptor in RCX, which contains the (shared)
+- ; array method table and the element type.
++ ; We were passed a (shared) method table in RCX, which contains the element type.
+
+ ; The element count is in RDX
+
+@@ -424,16 +408,11 @@ NESTED_ENTRY JIT_NewArr1OBJ_MP, _TEXT
+ CALL_GETTHREAD
+ mov r11, rax
+
+- ; we need to load the true method table from the type desc
+- mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+-
+- FIX_INDIRECTION r9
+-
+ ; In this case we know the element size is sizeof(void *), or 8 for x64
+ ; This helps us in two ways - we can shift instead of multiplying, and
+ ; there's no need to align the size either
+
+- mov r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
++ mov r8d, dword ptr [rcx + OFFSET__MethodTable__m_BaseSize]
+ lea r8d, [r8d + edx * 8]
+
+ ; No need for rounding in this case - element size is 8, and m_BaseSize is guaranteed
+@@ -448,7 +427,7 @@ NESTED_ENTRY JIT_NewArr1OBJ_MP, _TEXT
+ ja AllocFailed
+
+ mov [r11 + OFFSET__Thread__m_alloc_context__alloc_ptr], r8
+- mov [rax], r9
++ mov [rax], rcx
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+
+@@ -626,11 +605,10 @@ endif ; _DEBUG
+ jmp FramedAllocateString
+ LEAF_END AllocateStringFastUP, _TEXT
+
+-; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++; HCIMPL2(Object*, JIT_NewArr1VC_UP, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ LEAF_ENTRY JIT_NewArr1VC_UP, _TEXT
+
+- ; We were passed a type descriptor in RCX, which contains the (shared)
+- ; array method table and the element type.
++ ; We were passed a (shared) method table in RCX, which contains the element type.
+
+ ; The element count is in RDX
+
+@@ -647,17 +625,12 @@ LEAF_ENTRY JIT_NewArr1VC_UP, _TEXT
+
+ ; In both cases we do a final overflow check after adding to the alloc_ptr.
+
+- ; we need to load the true method table from the type desc
+- mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+-
+- FIX_INDIRECTION r9
+-
+ cmp rdx, (65535 - 256)
+ jae JIT_NewArr1
+
+- movzx r8d, word ptr [r9 + OFFSETOF__MethodTable__m_dwFlags] ; component size is low 16 bits
++ movzx r8d, word ptr [rcx + OFFSETOF__MethodTable__m_dwFlags] ; component size is low 16 bits
+ imul r8d, edx ; signed mul, but won't overflow due to length restriction above
+- add r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
++ add r8d, dword ptr [rcx + OFFSET__MethodTable__m_BaseSize]
+
+ ; round the size to a multiple of 8
+
+@@ -677,7 +650,7 @@ LEAF_ENTRY JIT_NewArr1VC_UP, _TEXT
+ ja AllocFailed
+
+ mov qword ptr [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr], r8 ; update the alloc ptr
+- mov [rax], r9
++ mov [rax], rcx
+ mov [g_global_alloc_lock], -1
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+@@ -694,11 +667,10 @@ endif ; _DEBUG
+ LEAF_END JIT_NewArr1VC_UP, _TEXT
+
+
+-; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++; HCIMPL2(Object*, JIT_NewArr1OBJ_UP, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ LEAF_ENTRY JIT_NewArr1OBJ_UP, _TEXT
+
+- ; We were passed a type descriptor in RCX, which contains the (shared)
+- ; array method table and the element type.
++ ; We were passed a (shared) method table in RCX, which contains the element type.
+
+ ; The element count is in RDX
+
+@@ -712,16 +684,11 @@ LEAF_ENTRY JIT_NewArr1OBJ_UP, _TEXT
+ cmp rdx, (ASM_LARGE_OBJECT_SIZE - 256)/8 ; sizeof(void*)
+ jae OversizedArray
+
+- ; we need to load the true method table from the type desc
+- mov r9, [rcx + OFFSETOF__ArrayTypeDesc__m_TemplateMT - 2]
+-
+- FIX_INDIRECTION r9
+-
+ ; In this case we know the element size is sizeof(void *), or 8 for x64
+ ; This helps us in two ways - we can shift instead of multiplying, and
+ ; there's no need to align the size either
+
+- mov r8d, dword ptr [r9 + OFFSET__MethodTable__m_BaseSize]
++ mov r8d, dword ptr [rcx + OFFSET__MethodTable__m_BaseSize]
+ lea r8d, [r8d + edx * 8]
+
+ ; No need for rounding in this case - element size is 8, and m_BaseSize is guaranteed
+@@ -739,7 +706,7 @@ LEAF_ENTRY JIT_NewArr1OBJ_UP, _TEXT
+ ja AllocFailed
+
+ mov qword ptr [g_global_alloc_context + OFFSETOF__gc_alloc_context__alloc_ptr], r8 ; update the alloc ptr
+- mov [rax], r9
++ mov [rax], rcx
+ mov [g_global_alloc_lock], -1
+
+ mov dword ptr [rax + OFFSETOF__ArrayBase__m_NumComponents], edx
+diff --git a/src/vm/amd64/asmconstants.h b/src/vm/amd64/asmconstants.h
+index e4f77de..4a100c1 100644
+--- a/src/vm/amd64/asmconstants.h
++++ b/src/vm/amd64/asmconstants.h
+@@ -609,10 +609,6 @@ ASMCONSTANTS_C_ASSERT(OFFSETOF__ArrayBase__m_NumComponents
+ ASMCONSTANTS_C_ASSERT(OFFSETOF__StringObject__m_StringLength
+ == offsetof(StringObject, m_StringLength));
+
+-#define OFFSETOF__ArrayTypeDesc__m_TemplateMT 8
+-ASMCONSTANTS_C_ASSERT(OFFSETOF__ArrayTypeDesc__m_TemplateMT
+- == offsetof(ArrayTypeDesc, m_TemplateMT));
+-
+ #define OFFSETOF__ArrayTypeDesc__m_Arg 0x10
+ ASMCONSTANTS_C_ASSERT(OFFSETOF__ArrayTypeDesc__m_Arg
+ == offsetof(ArrayTypeDesc, m_Arg));
+diff --git a/src/vm/arm/asmconstants.h b/src/vm/arm/asmconstants.h
+index 41597b2..704fa28 100644
+--- a/src/vm/arm/asmconstants.h
++++ b/src/vm/arm/asmconstants.h
+@@ -110,9 +110,6 @@ ASMCONSTANTS_C_ASSERT(SIZEOF__ArrayOfValueType == ObjSizeOf(ArrayBase));
+ #define ArrayBase__m_NumComponents 0x4
+ ASMCONSTANTS_C_ASSERT(ArrayBase__m_NumComponents == offsetof(ArrayBase, m_NumComponents));
+
+-#define ArrayTypeDesc__m_TemplateMT 0x4
+-ASMCONSTANTS_C_ASSERT(ArrayTypeDesc__m_TemplateMT == offsetof(ArrayTypeDesc, m_TemplateMT));
+-
+ #define ArrayTypeDesc__m_Arg 0x8
+ ASMCONSTANTS_C_ASSERT(ArrayTypeDesc__m_Arg == offsetof(ArrayTypeDesc, m_Arg));
+
+diff --git a/src/vm/arm/patchedcode.asm b/src/vm/arm/patchedcode.asm
+index 2ef175e..9fdd609 100644
+--- a/src/vm/arm/patchedcode.asm
++++ b/src/vm/arm/patchedcode.asm
+@@ -63,17 +63,6 @@ $label
+ TEXTAREA
+
+
+- MACRO
+- FIX_INDIRECTION $Reg, $label
+-#ifdef FEATURE_PREJIT
+- tst $Reg, #1
+- beq $label
+- ldr $Reg, [$Reg, #-1]
+-$label
+-#endif
+- MEND
+-
+-
+ ; ------------------------------------------------------------------
+ ; Start of the writeable code region
+ LEAF_ENTRY JIT_PatchedCodeStart
+@@ -336,11 +325,11 @@ AllocFailed3
+ LEAF_END
+
+
+-; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++; HCIMPL2(Object*, JIT_NewArr1VC_MP_InlineGetThread, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ ;---------------------------------------------------------------------------
+-; IN: r0: type descriptor which contains the (shared) array method table and the element type.
++; IN: r0: a (shared) array method table, which contains the element type.
+ ; IN: r1: number of array elements
+-;; OUT: r0: address of newly allocated string
++;; OUT: r0: address of newly allocated object
+
+ LEAF_ENTRY JIT_NewArr1VC_MP_InlineGetThread
+
+@@ -358,13 +347,8 @@ AllocFailed3
+ cmp r1, #MAX_FAST_ALLOCATE_ARRAY_VC_SIZE
+ bhs OverSizedArray3
+
+- ;load MethodTable from ArrayTypeDesc
+- ldr r3, [r0, #ArrayTypeDesc__m_TemplateMT - 2]
+-
+- FIX_INDIRECTION r3, label1
+-
+ ;get element size - stored in low 16bits of m_dwFlags
+- ldrh r12, [r3, #MethodTable__m_dwFlags]
++ ldrh r12, [r0, #MethodTable__m_dwFlags]
+
+ ; getting size of object to allocate
+
+@@ -398,11 +382,7 @@ AllocFailed3
+ str r1, [r2, #ArrayBase__m_NumComponents]
+
+ ;store methodtable
+- ldr r3, [r0, #ArrayTypeDesc__m_TemplateMT - 2]
+-
+- FIX_INDIRECTION r3, label2
+-
+- str r3, [r2]
++ str r0, [r2]
+
+ ;copy return value
+ mov r0, r2
+@@ -426,11 +406,11 @@ OverSizedArray3
+
+
+
+-; HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++; HCIMPL2(Object*, JIT_NewArr1OBJ_MP_InlineGetThread, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ ;---------------------------------------------------------------------------
+-; IN: r0: type descriptor which contains the (shared) array method table and the element type.
++; IN: r0: a (shared) array method table, which contains the element type.
+ ; IN: r1: number of array elements
+-;; OUT: r0: address of newly allocated string
++;; OUT: r0: address of newly allocated object
+
+ LEAF_ENTRY JIT_NewArr1OBJ_MP_InlineGetThread
+
+@@ -466,11 +446,7 @@ OverSizedArray3
+ str r1, [r2, #ArrayBase__m_NumComponents]
+
+ ;store methodtable
+- ldr r3, [r0, #ArrayTypeDesc__m_TemplateMT - 2]
+-
+- FIX_INDIRECTION r3, label3
+-
+- str r3, [r2]
++ str r0, [r2]
+
+ ;copy return value
+ mov r0, r2
+diff --git a/src/vm/arm/stubs.cpp b/src/vm/arm/stubs.cpp
+index c832911..70cc900 100644
+--- a/src/vm/arm/stubs.cpp
++++ b/src/vm/arm/stubs.cpp
+@@ -2567,9 +2567,9 @@ static const LPVOID InlineGetThreadLocations[] = {
+
+ //EXTERN_C Object* JIT_TrialAllocSFastMP(CORINFO_CLASS_HANDLE typeHnd_);
+ Object* JIT_TrialAllocSFastMP(CORINFO_CLASS_HANDLE typeHnd_);
+-EXTERN_C Object* JIT_NewArr1OBJ_MP(CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
++EXTERN_C Object* JIT_NewArr1OBJ_MP(CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
+ EXTERN_C Object* AllocateStringFastMP(CLR_I4 cch);
+-EXTERN_C Object* JIT_NewArr1VC_MP(CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
++EXTERN_C Object* JIT_NewArr1VC_MP(CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
+ EXTERN_C Object* JIT_BoxFastMP(CORINFO_CLASS_HANDLE type, void* unboxedData);
+
+
+diff --git a/src/vm/compile.cpp b/src/vm/compile.cpp
+index 9727430..22b8203 100644
+--- a/src/vm/compile.cpp
++++ b/src/vm/compile.cpp
+@@ -1857,6 +1857,14 @@ void EncodeTypeInDictionarySignature(
+
+ return;
+ }
++ else if((CorElementTypeZapSig)typ == ELEMENT_TYPE_NATIVE_ARRAY_TEMPLATE_ZAPSIG)
++ {
++ pSigBuilder->AppendElementType((CorElementType)ELEMENT_TYPE_NATIVE_ARRAY_TEMPLATE_ZAPSIG);
++
++ IfFailThrow(ptr.GetElemType(&typ));
++
++ _ASSERTE(typ == ELEMENT_TYPE_SZARRAY || typ == ELEMENT_TYPE_ARRAY);
++ }
+
+ pSigBuilder->AppendElementType(typ);
+
+diff --git a/src/vm/gchelpers.cpp b/src/vm/gchelpers.cpp
+index 9669f98..046f06e 100644
+--- a/src/vm/gchelpers.cpp
++++ b/src/vm/gchelpers.cpp
+@@ -439,11 +439,31 @@ void ThrowOutOfMemoryDimensionsExceeded()
+ //
+ // Handles arrays of arbitrary dimensions
+ //
++// This is wrapper overload to handle TypeHandle arrayType
++//
++OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap
++ DEBUG_ARG(BOOL bDontSetAppDomain))
++{
++ CONTRACTL
++ {
++ WRAPPER_NO_CONTRACT;
++ } CONTRACTL_END;
++
++ ArrayTypeDesc* arrayDesc = arrayType.AsArray();
++ MethodTable* pArrayMT = arrayDesc->GetMethodTable();
++
++ return AllocateArrayEx(pArrayMT, pArgs, dwNumArgs, bAllocateInLargeHeap
++ DEBUG_ARG(bDontSetAppDomain));
++}
++
++//
++// Handles arrays of arbitrary dimensions
++//
+ // If dwNumArgs is set to greater than 1 for a SZARRAY this function will recursively
+ // allocate sub-arrays and fill them in.
+ //
+ // For arrays with lower bounds, pBounds is <lower bound 1>, <count 1>, <lower bound 2>, ...
+-OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap
++OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap
+ DEBUG_ARG(BOOL bDontSetAppDomain))
+ {
+ CONTRACTL {
+@@ -464,14 +484,12 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
+ }
+ #endif
+
+- ArrayTypeDesc* arrayDesc = arrayType.AsArray();
+- MethodTable* pArrayMT = arrayDesc->GetMethodTable();
+- _ASSERTE(pArrayMT->CheckInstanceActivated());
++ _ASSERTE(pArrayMT->CheckInstanceActivated());
+ PREFIX_ASSUME(pArrayMT != NULL);
+- CorElementType kind = arrayType.GetInternalCorElementType();
++ CorElementType kind = pArrayMT->GetInternalCorElementType();
+ _ASSERTE(kind == ELEMENT_TYPE_ARRAY || kind == ELEMENT_TYPE_SZARRAY);
+
+- CorElementType elemType = arrayDesc->GetTypeParam().GetInternalCorElementType();
++ CorElementType elemType = pArrayMT->GetArrayElementType();
+ // Disallow the creation of void[,] (a multi-dim array of System.Void)
+ if (elemType == ELEMENT_TYPE_VOID)
+ COMPlusThrow(kArgumentException);
+@@ -481,7 +499,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
+
+ // IBC Log MethodTable access
+ g_IBCLogger.LogMethodTableAccess(pArrayMT);
+- SetTypeHandleOnThreadForAlloc(arrayType);
++ SetTypeHandleOnThreadForAlloc(TypeHandle(pArrayMT));
+
+ SIZE_T componentSize = pArrayMT->GetComponentSize();
+ bool maxArrayDimensionLengthOverflow = false;
+@@ -489,7 +507,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
+
+ if (kind == ELEMENT_TYPE_ARRAY)
+ {
+- unsigned rank = arrayDesc->GetRank();
++ unsigned rank = pArrayMT->GetRank();
+ _ASSERTE(dwNumArgs == rank || dwNumArgs == 2*rank);
+
+ // Morph a ARRAY rank 1 with 0 lower bound into an SZARRAY
+@@ -498,7 +516,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
+
+ // This recursive call doesn't go any farther, because the dwNumArgs will be 1,
+ // so don't bother with stack probe.
+- TypeHandle szArrayType = ClassLoader::LoadArrayTypeThrowing(arrayDesc->GetArrayElementTypeHandle(), ELEMENT_TYPE_SZARRAY, 1);
++ TypeHandle szArrayType = ClassLoader::LoadArrayTypeThrowing(pArrayMT->GetApproxArrayElementTypeHandle(), ELEMENT_TYPE_SZARRAY, 1);
+ return AllocateArrayEx(szArrayType, &pArgs[dwNumArgs - 1], 1, bAllocateInLargeHeap DEBUG_ARG(bDontSetAppDomain));
+ }
+
+@@ -561,12 +579,12 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
+ if (bAllocateInLargeHeap)
+ {
+ orArray = (ArrayBase *) AllocLHeap(totalSize, FALSE, pArrayMT->ContainsPointers());
+- orArray->SetMethodTableForLargeObject(pArrayMT);
++ orArray->SetArrayMethodTableForLargeObject(pArrayMT);
+ }
+ else
+ {
+ #ifdef FEATURE_64BIT_ALIGNMENT
+- MethodTable *pElementMT = arrayDesc->GetTypeParam().GetMethodTable();
++ MethodTable *pElementMT = pArrayMT->GetApproxArrayElementTypeHandle().GetMethodTable();
+ if (pElementMT->RequiresAlign8() && pElementMT->IsValueType())
+ {
+ // This platform requires that certain fields are 8-byte aligned (and the runtime doesn't provide
+@@ -582,7 +600,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
+ {
+ orArray = (ArrayBase *) Alloc(totalSize, FALSE, pArrayMT->ContainsPointers());
+ }
+- orArray->SetMethodTable(pArrayMT);
++ orArray->SetArrayMethodTable(pArrayMT);
+ }
+
+ // Initialize Object
+@@ -655,7 +673,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
+ GCStressPolicy::InhibitHolder iholder;
+
+ // Allocate dwProvidedBounds arrays
+- if (!arrayDesc->GetArrayElementTypeHandle().IsArray())
++ if (!pArrayMT->GetApproxArrayElementTypeHandle().IsArray())
+ {
+ orArray = NULL;
+ }
+@@ -666,7 +684,7 @@ OBJECTREF AllocateArrayEx(TypeHandle arrayType, INT32 *pArgs, DWORD dwNumArgs, B
+ _ASSERTE(GetThread());
+ INTERIOR_STACK_PROBE(GetThread());
+
+- TypeHandle subArrayType = arrayDesc->GetArrayElementTypeHandle();
++ TypeHandle subArrayType = pArrayMT->GetApproxArrayElementTypeHandle();
+ for (UINT32 i = 0; i < cElements; i++)
+ {
+ OBJECTREF obj = AllocateArrayEx(subArrayType, &pArgs[1], dwNumArgs-1, bAllocateInLargeHeap DEBUG_ARG(bDontSetAppDomain));
+@@ -809,7 +827,7 @@ OBJECTREF FastAllocatePrimitiveArray(MethodTable* pMT, DWORD cElements, BOOL b
+ }
+
+ // Initialize Object
+- orObject->SetMethodTable( pMT );
++ orObject->SetArrayMethodTable( pMT );
+ _ASSERTE(orObject->GetMethodTable() != NULL);
+ orObject->m_NumComponents = cElements;
+
+@@ -931,7 +949,7 @@ OBJECTREF AllocateObjectArray(DWORD cElements, TypeHandle ElementType)
+ Thread::DisableSOCheckInHCALL disableSOCheckInHCALL;
+ #endif // FEATURE_STACK_PROBE
+ #endif // _DEBUG
+- return OBJECTREF( HCCALL2(fastObjectArrayAllocator, ArrayType.AsPtr(), cElements));
++ return OBJECTREF( HCCALL2(fastObjectArrayAllocator, ArrayType.AsArray()->GetTemplateMethodTable(), cElements));
+ }
+
+ STRINGREF AllocateString( DWORD cchStringLength )
+diff --git a/src/vm/gchelpers.h b/src/vm/gchelpers.h
+index 449524a..73933f6 100644
+--- a/src/vm/gchelpers.h
++++ b/src/vm/gchelpers.h
+@@ -22,6 +22,8 @@
+
+ OBJECTREF AllocateValueSzArray(TypeHandle elementType, INT32 length);
+ // The main Array allocation routine, can do multi-dimensional
++OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap = FALSE
++ DEBUG_ARG(BOOL bDontSetAppDomain = FALSE));
+ OBJECTREF AllocateArrayEx(TypeHandle arrayClass, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap = FALSE
+ DEBUG_ARG(BOOL bDontSetAppDomain = FALSE));
+ // Optimized verion of above
+@@ -47,10 +49,8 @@ OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements);
+ OBJECTREF AllocatePrimitiveArray(CorElementType type, DWORD cElements, BOOL bAllocateInLargeHeap);
+
+
+-// Allocate SD array of object pointers. StubLinker-generated asm code might
+-// implement this, so the element TypeHandle is passed as a PVOID to avoid any
+-// struct calling convention weirdness.
+-typedef HCCALL2_PTR(Object*, FastObjectArrayAllocatorFuncPtr, /*TypeHandle*/PVOID ArrayType, DWORD cElements);
++// Allocate SD array of object pointers.
++typedef HCCALL2_PTR(Object*, FastObjectArrayAllocatorFuncPtr, MethodTable *pArrayMT, DWORD cElements);
+
+ extern FastObjectArrayAllocatorFuncPtr fastObjectArrayAllocator;
+
+diff --git a/src/vm/i386/jitinterfacex86.cpp b/src/vm/i386/jitinterfacex86.cpp
+index 18acbf0..cdabb52 100644
+--- a/src/vm/i386/jitinterfacex86.cpp
++++ b/src/vm/i386/jitinterfacex86.cpp
+@@ -855,7 +855,7 @@ void *JIT_TrialAlloc::GenBox(Flags flags)
+ }
+
+
+-HCIMPL2_RAW(Object*, UnframedAllocateObjectArray, /*TypeHandle*/PVOID ArrayType, DWORD cElements)
++HCIMPL2_RAW(Object*, UnframedAllocateObjectArray, MethodTable *pArrayMT, DWORD cElements)
+ {
+ // This isn't _really_ an FCALL and therefore shouldn't have the
+ // SO_TOLERANT part of the FCALL_CONTRACT b/c it is not entered
+@@ -867,7 +867,7 @@ HCIMPL2_RAW(Object*, UnframedAllocateObjectArray, /*TypeHandle*/PVOID ArrayType,
+ SO_INTOLERANT;
+ } CONTRACTL_END;
+
+- return OBJECTREFToObject(AllocateArrayEx(TypeHandle::FromPtr(ArrayType),
++ return OBJECTREFToObject(AllocateArrayEx(pArrayMT,
+ (INT32 *)(&cElements),
+ 1,
+ FALSE
+@@ -902,8 +902,7 @@ void *JIT_TrialAlloc::GenAllocArray(Flags flags)
+ CodeLabel *noLock = sl.NewCodeLabel();
+ CodeLabel *noAlloc = sl.NewCodeLabel();
+
+- // We were passed a type descriptor in ECX, which contains the (shared)
+- // array method table and the element type.
++ // We were passed a (shared) method table in RCX, which contains the element type.
+
+ // If this is the allocator for use from unmanaged code, ECX contains the
+ // element type descriptor, or the CorElementType.
+@@ -920,12 +919,7 @@ void *JIT_TrialAlloc::GenAllocArray(Flags flags)
+
+ if (flags & NO_FRAME)
+ {
+- if (flags & OBJ_ARRAY)
+- {
+- // we need to load the true method table from the type desc
+- sl.X86EmitIndexRegLoad(kECX, kECX, offsetof(ArrayTypeDesc,m_TemplateMT)-2);
+- }
+- else
++ if ((flags & OBJ_ARRAY) == 0)
+ {
+ // mov ecx,[g_pPredefinedArrayTypes+ecx*4]
+ sl.Emit8(0x8b);
+@@ -937,16 +931,10 @@ void *JIT_TrialAlloc::GenAllocArray(Flags flags)
+
+ // je noLock
+ sl.X86EmitCondJump(noLock, X86CondCode::kJZ);
+-
+- // we need to load the true method table from the type desc
+- sl.X86EmitIndexRegLoad(kECX, kECX, offsetof(ArrayTypeDesc,m_TemplateMT));
+ }
+ }
+ else
+ {
+- // we need to load the true method table from the type desc
+- sl.X86EmitIndexRegLoad(kECX, kECX, offsetof(ArrayTypeDesc,m_TemplateMT)-2);
+-
+ #ifdef FEATURE_PREJIT
+ CodeLabel *indir = sl.NewCodeLabel();
+
+@@ -1064,7 +1052,7 @@ void *JIT_TrialAlloc::GenAllocArray(Flags flags)
+ // pop edx - element count
+ sl.X86EmitPopReg(kEDX);
+
+- // pop ecx - array type descriptor
++ // pop ecx - array method table
+ sl.X86EmitPopReg(kECX);
+
+ // mov dword ptr [eax]ArrayBase.m_NumComponents, edx
+@@ -1089,7 +1077,7 @@ void *JIT_TrialAlloc::GenAllocArray(Flags flags)
+ // pop edx - element count
+ sl.X86EmitPopReg(kEDX);
+
+- // pop ecx - array type descriptor
++ // pop ecx - array method table
+ sl.X86EmitPopReg(kECX);
+
+ CodeLabel * target;
+diff --git a/src/vm/interpreter.cpp b/src/vm/interpreter.cpp
+index 010fee6..4e39efb 100644
+--- a/src/vm/interpreter.cpp
++++ b/src/vm/interpreter.cpp
+@@ -6068,13 +6068,12 @@ void Interpreter::NewArr()
+ }
+ #endif
+
+- TypeHandle typeHnd(elemClsHnd);
+- ArrayTypeDesc* pArrayClassRef = typeHnd.AsArray();
++ MethodTable *pArrayMT = (MethodTable *) elemClsHnd;
+
+- pArrayClassRef->GetMethodTable()->CheckRunClassInitThrowing();
++ pArrayMT->CheckRunClassInitThrowing();
+
+ INT32 size32 = (INT32)sz;
+- Object* newarray = OBJECTREFToObject(AllocateArrayEx(typeHnd, &size32, 1));
++ Object* newarray = OBJECTREFToObject(AllocateArrayEx(pArrayMT, &size32, 1));
+
+ GCX_FORBID();
+ OpStackTypeSet(stkInd, InterpreterType(CORINFO_TYPE_CLASS));
+diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp
+index aaab589..2552b01 100644
+--- a/src/vm/jithelpers.cpp
++++ b/src/vm/jithelpers.cpp
+@@ -3002,7 +3002,7 @@ HCIMPLEND
+ //*************************************************************
+ // Array allocation fast path for arrays of value type elements
+ //
+-HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ {
+ FCALL_CONTRACT;
+
+@@ -3028,16 +3028,14 @@ HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHn
+ // some reshuffling of intermediate values into nonvolatile registers around the call.
+ Thread *thread = GetThread();
+
+- TypeHandle arrayTypeHandle(arrayTypeHnd_);
+- ArrayTypeDesc *arrayTypeDesc = arrayTypeHandle.AsArray();
+- MethodTable *arrayMethodTable = arrayTypeDesc->GetTemplateMethodTable();
++ MethodTable *pArrayMT = (MethodTable *)arrayMT;
+
+- _ASSERTE(arrayMethodTable->HasComponentSize());
+- SIZE_T componentSize = arrayMethodTable->RawGetComponentSize();
++ _ASSERTE(pArrayMT->HasComponentSize());
++ SIZE_T componentSize = pArrayMT->RawGetComponentSize();
+ SIZE_T totalSize = componentCount * componentSize;
+ _ASSERTE(totalSize / componentSize == componentCount);
+
+- SIZE_T baseSize = arrayMethodTable->GetBaseSize();
++ SIZE_T baseSize = pArrayMT->GetBaseSize();
+ totalSize += baseSize;
+ _ASSERTE(totalSize >= baseSize);
+
+@@ -3056,7 +3054,7 @@ HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHn
+
+ _ASSERTE(allocPtr != nullptr);
+ ArrayBase *array = reinterpret_cast<ArrayBase *>(allocPtr);
+- array->SetMethodTable(arrayMethodTable);
++ array->SetArrayMethodTable(pArrayMT);
+ _ASSERTE(static_cast<DWORD>(componentCount) == componentCount);
+ array->m_NumComponents = static_cast<DWORD>(componentCount);
+
+@@ -3072,14 +3070,14 @@ HCIMPL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHn
+
+ // Tail call to the slow helper
+ ENDFORBIDGC();
+- return HCCALL2(JIT_NewArr1, arrayTypeHnd_, size);
++ return HCCALL2(JIT_NewArr1, arrayMT, size);
+ }
+ HCIMPLEND
+
+ //*************************************************************
+ // Array allocation fast path for arrays of object elements
+ //
+-HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ {
+ FCALL_CONTRACT;
+
+@@ -3100,14 +3098,12 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeH
+ // some reshuffling of intermediate values into nonvolatile registers around the call.
+ Thread *thread = GetThread();
+
+- TypeHandle arrayTypeHandle(arrayTypeHnd_);
+- ArrayTypeDesc *arrayTypeDesc = arrayTypeHandle.AsArray();
+- MethodTable *arrayMethodTable = arrayTypeDesc->GetTemplateMethodTable();
+-
+ SIZE_T totalSize = componentCount * sizeof(void *);
+ _ASSERTE(totalSize / sizeof(void *) == componentCount);
+
+- SIZE_T baseSize = arrayMethodTable->GetBaseSize();
++ MethodTable *pArrayMT = (MethodTable *)arrayMT;
++
++ SIZE_T baseSize = pArrayMT->GetBaseSize();
+ totalSize += baseSize;
+ _ASSERTE(totalSize >= baseSize);
+
+@@ -3124,7 +3120,7 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeH
+
+ _ASSERTE(allocPtr != nullptr);
+ ArrayBase *array = reinterpret_cast<ArrayBase *>(allocPtr);
+- array->SetMethodTable(arrayMethodTable);
++ array->SetArrayMethodTable(pArrayMT);
+ _ASSERTE(static_cast<DWORD>(componentCount) == componentCount);
+ array->m_NumComponents = static_cast<DWORD>(componentCount);
+
+@@ -3140,14 +3136,14 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayTypeH
+
+ // Tail call to the slow helper
+ ENDFORBIDGC();
+- return HCCALL2(JIT_NewArr1, arrayTypeHnd_, size);
++ return HCCALL2(JIT_NewArr1, arrayMT, size);
+ }
+ HCIMPLEND
+
+ #include <optdefault.h>
+
+ /*************************************************************/
+-HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ {
+ FCALL_CONTRACT;
+
+@@ -3155,11 +3151,11 @@ HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0(); // Set up a frame
+
+- TypeHandle typeHnd(arrayTypeHnd_);
++ MethodTable *pArrayMT = (MethodTable *)arrayMT;
+
+- _ASSERTE(typeHnd.GetInternalCorElementType() == ELEMENT_TYPE_SZARRAY);
+- typeHnd.CheckRestore();
+- ArrayTypeDesc* pArrayClassRef = typeHnd.AsArray();
++ _ASSERTE(pArrayMT->IsFullyLoaded());
++ _ASSERTE(pArrayMT->IsArray());
++ _ASSERTE(!pArrayMT->IsMultiDimArray());
+
+ if (size < 0)
+ COMPlusThrow(kOverflowException);
+@@ -3176,7 +3172,7 @@ HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+ // is this a primitive type?
+ //
+
+- CorElementType elemType = pArrayClassRef->GetArrayElementTypeHandle().GetSignatureCorElementType();
++ CorElementType elemType = pArrayMT->GetInternalCorElementType();
+
+ if (CorTypeInfo::IsPrimitiveType(elemType)
+ #ifdef FEATURE_64BIT_ALIGNMENT
+@@ -3209,9 +3205,13 @@ HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+ #endif
+
+ if (g_pPredefinedArrayTypes[elemType] == NULL)
+- g_pPredefinedArrayTypes[elemType] = pArrayClassRef;
++ {
++ TypeHandle elemTypeHnd = TypeHandle(MscorlibBinder::GetElementType(elemType));
++
++ g_pPredefinedArrayTypes[elemType] = ClassLoader::LoadArrayTypeThrowing(elemTypeHnd, ELEMENT_TYPE_SZARRAY, 0).AsArray();
++ }
+
+- newArray = FastAllocatePrimitiveArray(pArrayClassRef->GetMethodTable(), static_cast<DWORD>(size), bAllocateInLargeHeap);
++ newArray = FastAllocatePrimitiveArray(pArrayMT, static_cast<DWORD>(size), bAllocateInLargeHeap);
+ }
+ else
+ {
+@@ -3221,7 +3221,7 @@ HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
+ }
+ #endif // _DEBUG
+ INT32 size32 = (INT32)size;
+- newArray = AllocateArrayEx(typeHnd, &size32, 1);
++ newArray = AllocateArrayEx(pArrayMT, &size32, 1);
+ }
+
+ HELPER_METHOD_FRAME_END();
+diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
+index 84f635a..f756c90 100644
+--- a/src/vm/jitinterface.cpp
++++ b/src/vm/jitinterface.cpp
+@@ -2726,7 +2726,14 @@ void CEEInfo::embedGenericHandle(
+
+ pResult->handleType = CORINFO_HANDLETYPE_CLASS;
+
+- pResult->compileTimeHandle = (CORINFO_GENERIC_HANDLE)th.AsPtr();
++ if (pResolvedToken->tokenType == CORINFO_TOKENKIND_Newarr)
++ {
++ pResult->compileTimeHandle = (CORINFO_GENERIC_HANDLE)th.AsArray()->GetTemplateMethodTable();
++ }
++ else
++ {
++ pResult->compileTimeHandle = (CORINFO_GENERIC_HANDLE)th.AsPtr();
++ }
+
+ if (fEmbedParent && pResolvedToken->hMethod != NULL)
+ {
+@@ -3404,7 +3411,10 @@ NoSpecialCase:
+ case TypeHandleSlot:
+ {
+ if (pResolvedToken->tokenType == CORINFO_TOKENKIND_Newarr)
++ {
++ sigBuilder.AppendElementType((CorElementType)ELEMENT_TYPE_NATIVE_ARRAY_TEMPLATE_ZAPSIG);
+ sigBuilder.AppendElementType(ELEMENT_TYPE_SZARRAY);
++ }
+
+ // Note that we can come here with pResolvedToken->pTypeSpec == NULL for invalid IL that
+ // directly references __Canon
+diff --git a/src/vm/jitinterface.h b/src/vm/jitinterface.h
+index d287248..a3017ef 100644
+--- a/src/vm/jitinterface.h
++++ b/src/vm/jitinterface.h
+@@ -215,9 +215,9 @@ extern FCDECL1(StringObject*, AllocateString_MP_FastPortable, DWORD stringLength
+ extern FCDECL1(StringObject*, UnframedAllocateString, DWORD stringLength);
+ extern FCDECL1(StringObject*, FramedAllocateString, DWORD stringLength);
+
+-extern FCDECL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_, INT_PTR size);
+-extern FCDECL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_, INT_PTR size);
+-extern FCDECL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE typeHnd_, INT_PTR size);
++extern FCDECL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
++extern FCDECL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
++extern FCDECL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
+
+ #ifndef JIT_Stelem_Ref
+ #define JIT_Stelem_Ref JIT_Stelem_Ref_Portable
+@@ -326,8 +326,8 @@ private:
+ #ifdef _WIN64
+ EXTERN_C FCDECL1(Object*, JIT_TrialAllocSFastMP_InlineGetThread, CORINFO_CLASS_HANDLE typeHnd_);
+ EXTERN_C FCDECL2(Object*, JIT_BoxFastMP_InlineGetThread, CORINFO_CLASS_HANDLE type, void* data);
+-EXTERN_C FCDECL2(Object*, JIT_NewArr1VC_MP_InlineGetThread, CORINFO_CLASS_HANDLE typeHnd_, INT_PTR size);
+-EXTERN_C FCDECL2(Object*, JIT_NewArr1OBJ_MP_InlineGetThread, CORINFO_CLASS_HANDLE typeHnd_, INT_PTR size);
++EXTERN_C FCDECL2(Object*, JIT_NewArr1VC_MP_InlineGetThread, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
++EXTERN_C FCDECL2(Object*, JIT_NewArr1OBJ_MP_InlineGetThread, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
+
+ #endif // _WIN64
+
+diff --git a/src/vm/jitinterfacegen.cpp b/src/vm/jitinterfacegen.cpp
+index 8d1c8cd..2638740 100644
+--- a/src/vm/jitinterfacegen.cpp
++++ b/src/vm/jitinterfacegen.cpp
+@@ -30,8 +30,8 @@
+ EXTERN_C Object* JIT_TrialAllocSFastMP_InlineGetThread(CORINFO_CLASS_HANDLE typeHnd_);
+ EXTERN_C Object* JIT_BoxFastMP_InlineGetThread (CORINFO_CLASS_HANDLE type, void* unboxedData);
+ EXTERN_C Object* AllocateStringFastMP_InlineGetThread (CLR_I4 cch);
+-EXTERN_C Object* JIT_NewArr1OBJ_MP_InlineGetThread (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+-EXTERN_C Object* JIT_NewArr1VC_MP_InlineGetThread (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
++EXTERN_C Object* JIT_NewArr1OBJ_MP_InlineGetThread (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
++EXTERN_C Object* JIT_NewArr1VC_MP_InlineGetThread (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
+
+ // This next set is the fast version that invoke GetThread but is still faster than the VM implementation (i.e.
+ // the "slow" versions).
+@@ -42,10 +42,10 @@ EXTERN_C Object* JIT_BoxFastUP (CORINFO_CLASS_HANDLE type, void* unboxedData);
+ EXTERN_C Object* AllocateStringFastMP (CLR_I4 cch);
+ EXTERN_C Object* AllocateStringFastUP (CLR_I4 cch);
+
+-EXTERN_C Object* JIT_NewArr1OBJ_MP (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+-EXTERN_C Object* JIT_NewArr1OBJ_UP (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+-EXTERN_C Object* JIT_NewArr1VC_MP (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+-EXTERN_C Object* JIT_NewArr1VC_UP (CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
++EXTERN_C Object* JIT_NewArr1OBJ_MP (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
++EXTERN_C Object* JIT_NewArr1OBJ_UP (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
++EXTERN_C Object* JIT_NewArr1VC_MP (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
++EXTERN_C Object* JIT_NewArr1VC_UP (CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
+
+ //For the optimized JIT_Mon helpers
+ #if defined(_TARGET_AMD64_)
+diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
+index 21fab72..598759a 100644
+--- a/src/vm/methodtable.cpp
++++ b/src/vm/methodtable.cpp
+@@ -5903,6 +5903,19 @@ void MethodTable::DoFullyLoad(Generics::RecursionGraph * const pVisited, const
+
+ }
+
++ if (level >= CLASS_DEPENDENCIES_LOADED && IsArray())
++ {
++ // The array type should be loaded, if template method table is loaded
++ // See also: ArrayBase::SetArrayMethodTable, ArrayBase::SetArrayMethodTableForLargeObject
++ TypeHandle th = ClassLoader::LoadArrayTypeThrowing(GetApproxArrayElementTypeHandle(),
++ GetInternalCorElementType(),
++ GetRank(),
++ ClassLoader::LoadTypes,
++ level);
++ _ASSERTE(th.IsTypeDesc() && th.IsArray());
++ _ASSERTE(!(level == CLASS_LOADED && !th.IsFullyLoaded()));
++ }
++
+ END_SO_INTOLERANT_CODE;
+
+ #endif //!DACCESS_COMPILE
+diff --git a/src/vm/object.cpp b/src/vm/object.cpp
+index 4c08f02..daa20e7 100644
+--- a/src/vm/object.cpp
++++ b/src/vm/object.cpp
+@@ -1818,6 +1818,24 @@ VOID Object::ValidateInner(BOOL bDeep, BOOL bVerifyNextHeader, BOOL bVerifySyncB
+
+ #endif // VERIFY_HEAP
+
++#ifndef DACCESS_COMPILE
++#ifdef _DEBUG
++void ArrayBase::AssertArrayTypeDescLoaded()
++{
++ _ASSERTE (m_pMethTab->IsArray());
++
++ // The type should already be loaded
++ // See also: MethodTable::DoFullyLoad
++ TypeHandle th = ClassLoader::LoadArrayTypeThrowing(m_pMethTab->GetApproxArrayElementTypeHandle(),
++ m_pMethTab->GetInternalCorElementType(),
++ m_pMethTab->GetRank(),
++ ClassLoader::DontLoadTypes);
++
++ _ASSERTE(!th.IsNull());
++}
++#endif // DEBUG
++#endif // !DACCESS_COMPILE
++
+ /*==================================NewString===================================
+ **Action: Creates a System.String object.
+ **Returns:
+diff --git a/src/vm/object.h b/src/vm/object.h
+index cb3743c..00def1d 100644
+--- a/src/vm/object.h
++++ b/src/vm/object.h
+@@ -207,19 +207,34 @@ class Object
+ m_pMethTab = pMT;
+ }
+
+- VOID SetMethodTable(MethodTable *pMT)
++ VOID SetMethodTable(MethodTable *pMT
++ DEBUG_ARG(BOOL bAllowArray = FALSE))
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pMethTab = pMT;
++
++#ifdef _DEBUG
++ if (!bAllowArray)
++ {
++ AssertNotArray();
++ }
++#endif // _DEBUG
+ }
+
+- VOID SetMethodTableForLargeObject(MethodTable *pMT)
++ VOID SetMethodTableForLargeObject(MethodTable *pMT
++ DEBUG_ARG(BOOL bAllowArray = FALSE))
+ {
+ // This function must be used if the allocation occurs on the large object heap, and the method table might be a collectible type
+ WRAPPER_NO_CONTRACT;
+ ErectWriteBarrierForMT(&m_pMethTab, pMT);
++
++#ifdef _DEBUG
++ if (!bAllowArray)
++ {
++ AssertNotArray();
++ }
++#endif // _DEBUG
+ }
+-
+ #endif //!DACCESS_COMPILE
+
+ // An object might be a proxy of some sort, with a thunking VTable. If so, we can
+@@ -664,6 +679,15 @@ class Object
+ BOOL ShouldCheckAppDomainAgile(BOOL raiseAssert, BOOL *pfResult);
+ #endif
+
++#ifdef _DEBUG
++ void AssertNotArray()
++ {
++ if (m_pMethTab->IsArray())
++ {
++ _ASSERTE(!"ArrayBase::SetArrayMethodTable/ArrayBase::SetArrayMethodTableForLargeObject should be used for arrays");
++ }
++ }
++#endif // _DEBUG
+ };
+
+ /*
+@@ -745,10 +769,10 @@ class ArrayBase : public Object
+ friend class GCHeap;
+ friend class CObjectHeader;
+ friend class Object;
+- friend OBJECTREF AllocateArrayEx(TypeHandle arrayClass, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap DEBUG_ARG(BOOL bDontSetAppDomain));
++ friend OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap DEBUG_ARG(BOOL bDontSetAppDomain));
+ friend OBJECTREF FastAllocatePrimitiveArray(MethodTable* arrayType, DWORD cElements, BOOL bAllocateInLargeHeap);
+- friend FCDECL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_, INT_PTR size);
+- friend FCDECL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE typeHnd_, INT_PTR size);
++ friend FCDECL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
++ friend FCDECL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
+ friend class JIT_TrialAlloc;
+ friend class CheckAsmOffsets;
+ friend struct _DacGlobals;
+@@ -790,6 +814,11 @@ public:
+ // Total element count for the array
+ inline DWORD GetNumComponents() const;
+
++#ifndef DACCESS_COMPILE
++ inline void SetArrayMethodTable(MethodTable *pArrayMT);
++ inline void SetArrayMethodTableForLargeObject(MethodTable *pArrayMT);
++#endif // !DACCESS_COMPILE
++
+ // Get pointer to elements, handles any number of dimensions
+ PTR_BYTE GetDataPtr(BOOL inGC = FALSE) const {
+ LIMITED_METHOD_CONTRACT;
+@@ -865,6 +894,13 @@ public:
+
+ inline static unsigned GetBoundsOffset(MethodTable* pMT);
+ inline static unsigned GetLowerBoundsOffset(MethodTable* pMT);
++
++private:
++#ifndef DACCESS_COMPILE
++#ifdef _DEBUG
++ void AssertArrayTypeDescLoaded();
++#endif // _DEBUG
++#endif // !DACCESS_COMPILE
+ };
+
+ //
+@@ -905,7 +941,7 @@ class PtrArray : public ArrayBase
+ {
+ friend class GCHeap;
+ friend class ClrDataAccess;
+- friend OBJECTREF AllocateArrayEx(TypeHandle arrayClass, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap);
++ friend OBJECTREF AllocateArrayEx(MethodTable *pArrayMT, INT32 *pArgs, DWORD dwNumArgs, BOOL bAllocateInLargeHeap);
+ friend class JIT_TrialAlloc;
+ friend class CheckAsmOffsets;
+
+diff --git a/src/vm/object.inl b/src/vm/object.inl
+index 5698321..5dc3d6d 100644
+--- a/src/vm/object.inl
++++ b/src/vm/object.inl
+@@ -146,6 +146,7 @@ inline /* static */ TypeHandle ArrayBase::GetTypeHandle(MethodTable * pMT)
+ // for T[] is available and restored
+
+ // @todo This should be turned into a probe with a hard SO when we have one
++ // See also: ArrayBase::SetArrayMethodTable, ArrayBase::SetArrayMethodTableForLargeObject and MethodTable::DoFullyLoad
+ CONTRACT_VIOLATION(SOToleranceViolation);
+ // == FailIfNotLoadedOrNotRestored
+ TypeHandle arrayType = ClassLoader::LoadArrayTypeThrowing(pMT->GetApproxArrayElementTypeHandle(), kind, rank, ClassLoader::DontLoadTypes);
+@@ -174,6 +175,32 @@ inline DWORD ArrayBase::GetNumComponents() const
+ return m_NumComponents;
+ }
+
++#ifndef DACCESS_COMPILE
++inline void ArrayBase::SetArrayMethodTable(MethodTable *pArrayMT)
++{
++ LIMITED_METHOD_CONTRACT;
++
++ SetMethodTable(pArrayMT
++ DEBUG_ARG(TRUE));
++
++#ifdef _DEBUG
++ AssertArrayTypeDescLoaded();
++#endif // _DEBUG
++}
++
++inline void ArrayBase::SetArrayMethodTableForLargeObject(MethodTable *pArrayMT)
++{
++ LIMITED_METHOD_CONTRACT;
++
++ SetMethodTableForLargeObject(pArrayMT
++ DEBUG_ARG(TRUE));
++
++#ifdef _DEBUG
++ AssertArrayTypeDescLoaded();
++#endif // _DEBUG
++}
++#endif // !DACCESS_COMPILE
++
+ inline /* static */ unsigned ArrayBase::GetDataPtrOffset(MethodTable* pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
+index af3f190..746e415 100644
+--- a/src/vm/prestub.cpp
++++ b/src/vm/prestub.cpp
+@@ -2764,7 +2764,9 @@ PCODE DynamicHelperFixup(TransitionBlock * pTransitionBlock, TADDR * pCell, DWOR
+ case ENCODE_NEW_ARRAY_HELPER:
+ {
+ CorInfoHelpFunc helpFunc = CEEInfo::getNewArrHelperStatic(th);
+- pHelper = DynamicHelpers::CreateHelperArgMove(pModule->GetLoaderAllocator(), th.AsTAddr(), CEEJitInfo::getHelperFtnStatic(helpFunc));
++ ArrayTypeDesc *pArrayTypeDesc = th.AsArray();
++ MethodTable *pArrayMT = pArrayTypeDesc->GetTemplateMethodTable();
++ pHelper = DynamicHelpers::CreateHelperArgMove(pModule->GetLoaderAllocator(), dac_cast<TADDR>(pArrayMT), CEEJitInfo::getHelperFtnStatic(helpFunc));
+ }
+ break;
+
+--
+2.7.4
+
diff --git a/packaging/0013-Implement-JIT_NewArr1_R2R-as-R2R-wrapper-for-JIT_New.patch b/packaging/0013-Implement-JIT_NewArr1_R2R-as-R2R-wrapper-for-JIT_New.patch
new file mode 100644
index 0000000000..957e1d8759
--- /dev/null
+++ b/packaging/0013-Implement-JIT_NewArr1_R2R-as-R2R-wrapper-for-JIT_New.patch
@@ -0,0 +1,384 @@
+From 6cda73133fcb8785d0c2ab6cb296f956286f4034 Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <ruben-ayrapetyan@users.noreply.github.com>
+Date: Tue, 27 Jun 2017 21:31:03 +0300
+Subject: [PATCH 13/32] Implement JIT_NewArr1_R2R as R2R wrapper for
+ JIT_NewArr1 to support both MethodTable-based and TypeDesc-based helpers.
+ (#12475)
+
+Related issue: #12463
+
+FIX: fix No.3, rebased
+---
+ src/inc/corinfo.h | 11 +--
+ src/inc/jithelpers.h | 1 +
+ src/inc/readytorunhelpers.h | 2 +-
+ src/jit/earlyprop.cpp | 2 +
+ src/jit/importer.cpp | 5 +-
+ src/jit/utils.cpp | 1 +
+ src/jit/valuenum.cpp | 1 +
+ src/vm/jithelpers.cpp | 15 ++++
+ src/vm/jitinterface.cpp | 6 +-
+ src/vm/jitinterface.h | 1 +
+ src/zap/zapinfo.cpp | 2 +-
+ tests/src/readytorun/tests/newarray.cs | 126 +++++++++++++++++++++++++++++
+ tests/src/readytorun/tests/newarray.csproj | 32 ++++++++
+ 13 files changed, 195 insertions(+), 10 deletions(-)
+ create mode 100644 tests/src/readytorun/tests/newarray.cs
+ create mode 100644 tests/src/readytorun/tests/newarray.csproj
+
+diff --git a/src/inc/corinfo.h b/src/inc/corinfo.h
+index f6a136c..a6acd71 100644
+--- a/src/inc/corinfo.h
++++ b/src/inc/corinfo.h
+@@ -213,11 +213,11 @@ TODO: Talk about initializing strutures before use
+ #define SELECTANY extern __declspec(selectany)
+ #endif
+
+-SELECTANY const GUID JITEEVersionIdentifier = { /* f00b3f49-ddd2-49be-ba43-6e49ffa66959 */
+- 0xf00b3f49,
+- 0xddd2,
+- 0x49be,
+- { 0xba, 0x43, 0x6e, 0x49, 0xff, 0xa6, 0x69, 0x59 }
++SELECTANY const GUID JITEEVersionIdentifier = { /* 28eb875f-b6a9-4a04-9ba7-69ba59deed46 */
++ 0x28eb875f,
++ 0xb6a9,
++ 0x4a04,
++ { 0x9b, 0xa7, 0x69, 0xba, 0x59, 0xde, 0xed, 0x46 }
+ };
+
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////
+@@ -400,6 +400,7 @@ enum CorInfoHelpFunc
+ CORINFO_HELP_NEW_MDARR, // multi-dim array helper (with or without lower bounds - dimensions passed in as vararg)
+ CORINFO_HELP_NEW_MDARR_NONVARARG,// multi-dim array helper (with or without lower bounds - dimensions passed in as unmanaged array)
+ CORINFO_HELP_NEWARR_1_DIRECT, // helper for any one dimensional array creation
++ CORINFO_HELP_NEWARR_1_R2R_DIRECT, // wrapper for R2R direct call, which extracts method table from ArrayTypeDesc
+ CORINFO_HELP_NEWARR_1_OBJ, // optimized 1-D object arrays
+ CORINFO_HELP_NEWARR_1_VC, // optimized 1-D value class arrays
+ CORINFO_HELP_NEWARR_1_ALIGN8, // like VC, but aligns the array start
+diff --git a/src/inc/jithelpers.h b/src/inc/jithelpers.h
+index 4e56250..b45948a 100644
+--- a/src/inc/jithelpers.h
++++ b/src/inc/jithelpers.h
+@@ -77,6 +77,7 @@
+ JITHELPER(CORINFO_HELP_NEW_MDARR, JIT_NewMDArr,CORINFO_HELP_SIG_8_VA)
+ JITHELPER(CORINFO_HELP_NEW_MDARR_NONVARARG, JIT_NewMDArrNonVarArg,CORINFO_HELP_SIG_4_STACK)
+ JITHELPER(CORINFO_HELP_NEWARR_1_DIRECT, JIT_NewArr1,CORINFO_HELP_SIG_REG_ONLY)
++ JITHELPER(CORINFO_HELP_NEWARR_1_R2R_DIRECT, JIT_NewArr1_R2R,CORINFO_HELP_SIG_REG_ONLY)
+ DYNAMICJITHELPER(CORINFO_HELP_NEWARR_1_OBJ, JIT_NewArr1,CORINFO_HELP_SIG_REG_ONLY)
+ DYNAMICJITHELPER(CORINFO_HELP_NEWARR_1_VC, JIT_NewArr1,CORINFO_HELP_SIG_REG_ONLY)
+ DYNAMICJITHELPER(CORINFO_HELP_NEWARR_1_ALIGN8, JIT_NewArr1,CORINFO_HELP_SIG_REG_ONLY)
+diff --git a/src/inc/readytorunhelpers.h b/src/inc/readytorunhelpers.h
+index 9baf0e4..7a1245c 100644
+--- a/src/inc/readytorunhelpers.h
++++ b/src/inc/readytorunhelpers.h
+@@ -46,7 +46,7 @@ HELPER(READYTORUN_HELPER_NewMultiDimArr, CORINFO_HELP_NEW_MDARR,
+ HELPER(READYTORUN_HELPER_NewMultiDimArr_NonVarArg, CORINFO_HELP_NEW_MDARR_NONVARARG, )
+
+ HELPER(READYTORUN_HELPER_NewObject, CORINFO_HELP_NEWFAST, )
+-HELPER(READYTORUN_HELPER_NewArray, CORINFO_HELP_NEWARR_1_DIRECT, )
++HELPER(READYTORUN_HELPER_NewArray, CORINFO_HELP_NEWARR_1_R2R_DIRECT, )
+ HELPER(READYTORUN_HELPER_CheckCastAny, CORINFO_HELP_CHKCASTANY, )
+ HELPER(READYTORUN_HELPER_CheckInstanceAny, CORINFO_HELP_ISINSTANCEOFANY, )
+
+diff --git a/src/jit/earlyprop.cpp b/src/jit/earlyprop.cpp
+index 51de631..ec460c6 100644
+--- a/src/jit/earlyprop.cpp
++++ b/src/jit/earlyprop.cpp
+@@ -79,6 +79,7 @@ GenTreePtr Compiler::getArrayLengthFromAllocation(GenTreePtr tree)
+ if (call->gtCallType == CT_HELPER)
+ {
+ if (call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) ||
++ call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) ||
+ call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) ||
+ call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWARR_1_VC) ||
+ call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8))
+@@ -116,6 +117,7 @@ GenTreePtr Compiler::getObjectHandleNodeFromAllocation(GenTreePtr tree)
+ call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWSFAST) ||
+ call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWSFAST_ALIGN8) ||
+ call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) ||
++ call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) ||
+ call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) ||
+ call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWARR_1_VC) ||
+ call->gtCallMethHnd == eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8))
+diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
+index 80c0b75..c5f2970 100644
+--- a/src/jit/importer.cpp
++++ b/src/jit/importer.cpp
+@@ -3004,9 +3004,10 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
+ if (newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_DIRECT) &&
+ newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_OBJ) &&
+ newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_VC) &&
+- newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8)
++ newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_ALIGN8) &&
+ #ifdef FEATURE_READYTORUN_COMPILER
+- && newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
++ newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_NEWARR_1_R2R_DIRECT) &&
++ newArrayCall->gtCall.gtCallMethHnd != eeFindHelper(CORINFO_HELP_READYTORUN_NEWARR_1)
+ #endif
+ )
+ {
+diff --git a/src/jit/utils.cpp b/src/jit/utils.cpp
+index 9fbe394..30ea30a 100644
+--- a/src/jit/utils.cpp
++++ b/src/jit/utils.cpp
+@@ -1315,6 +1315,7 @@ void HelperCallProperties::init()
+ case CORINFO_HELP_NEW_MDARR:
+ case CORINFO_HELP_NEWARR_1_DIRECT:
+ case CORINFO_HELP_NEWARR_1_OBJ:
++ case CORINFO_HELP_NEWARR_1_R2R_DIRECT:
+ case CORINFO_HELP_READYTORUN_NEWARR_1:
+
+ mayFinalize = true; // These may run a finalizer
+diff --git a/src/jit/valuenum.cpp b/src/jit/valuenum.cpp
+index 5b40122..e4836d6 100644
+--- a/src/jit/valuenum.cpp
++++ b/src/jit/valuenum.cpp
+@@ -7610,6 +7610,7 @@ VNFunc Compiler::fgValueNumberHelperMethVNFunc(CorInfoHelpFunc helpFunc)
+ vnf = VNF_JitNewArr;
+ break;
+
++ case CORINFO_HELP_NEWARR_1_R2R_DIRECT:
+ case CORINFO_HELP_READYTORUN_NEWARR_1:
+ vnf = VNF_JitReadyToRunNewArr;
+ break;
+diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp
+index 2552b01..0b51339 100644
+--- a/src/vm/jithelpers.cpp
++++ b/src/vm/jithelpers.cpp
+@@ -3140,6 +3140,21 @@ HCIMPL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, I
+ }
+ HCIMPLEND
+
++//*************************************************************
++// R2R-specific array allocation wrapper that extracts array method table from ArrayTypeDesc
++//
++HCIMPL2(Object*, JIT_NewArr1_R2R, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size)
++{
++ FCALL_CONTRACT;
++
++ TypeHandle arrayTypeHandle(arrayTypeHnd_);
++ ArrayTypeDesc *pArrayTypeDesc = arrayTypeHandle.AsArray();
++ MethodTable *pArrayMT = pArrayTypeDesc->GetTemplateMethodTable();
++
++ return HCCALL2(JIT_NewArr1, (CORINFO_CLASS_HANDLE)pArrayMT, size);
++}
++HCIMPLEND
++
+ #include <optdefault.h>
+
+ /*************************************************************/
+diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
+index f756c90..52db7c5 100644
+--- a/src/vm/jitinterface.cpp
++++ b/src/vm/jitinterface.cpp
+@@ -3412,7 +3412,11 @@ NoSpecialCase:
+ {
+ if (pResolvedToken->tokenType == CORINFO_TOKENKIND_Newarr)
+ {
+- sigBuilder.AppendElementType((CorElementType)ELEMENT_TYPE_NATIVE_ARRAY_TEMPLATE_ZAPSIG);
++ if (!IsReadyToRunCompilation())
++ {
++ sigBuilder.AppendElementType((CorElementType)ELEMENT_TYPE_NATIVE_ARRAY_TEMPLATE_ZAPSIG);
++ }
++
+ sigBuilder.AppendElementType(ELEMENT_TYPE_SZARRAY);
+ }
+
+diff --git a/src/vm/jitinterface.h b/src/vm/jitinterface.h
+index a3017ef..d67cfc5 100644
+--- a/src/vm/jitinterface.h
++++ b/src/vm/jitinterface.h
+@@ -217,6 +217,7 @@ extern FCDECL1(StringObject*, FramedAllocateString, DWORD stringLength);
+
+ extern FCDECL2(Object*, JIT_NewArr1VC_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
+ extern FCDECL2(Object*, JIT_NewArr1OBJ_MP_FastPortable, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
++extern FCDECL2(Object*, JIT_NewArr1_R2R, CORINFO_CLASS_HANDLE arrayTypeHnd_, INT_PTR size);
+ extern FCDECL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size);
+
+ #ifndef JIT_Stelem_Ref
+diff --git a/src/zap/zapinfo.cpp b/src/zap/zapinfo.cpp
+index 60e03af..e94dea6 100644
+--- a/src/zap/zapinfo.cpp
++++ b/src/zap/zapinfo.cpp
+@@ -3384,7 +3384,7 @@ CorInfoHelpFunc ZapInfo::getCastingHelper(CORINFO_RESOLVED_TOKEN * pResolvedToke
+ CorInfoHelpFunc ZapInfo::getNewArrHelper(CORINFO_CLASS_HANDLE arrayCls)
+ {
+ if (IsReadyToRunCompilation())
+- return CORINFO_HELP_NEWARR_1_DIRECT;
++ return CORINFO_HELP_NEWARR_1_R2R_DIRECT;
+
+ return m_pEEJitInfo->getNewArrHelper(arrayCls);
+ }
+diff --git a/tests/src/readytorun/tests/newarray.cs b/tests/src/readytorun/tests/newarray.cs
+new file mode 100644
+index 0000000..66917ab
+--- /dev/null
++++ b/tests/src/readytorun/tests/newarray.cs
+@@ -0,0 +1,126 @@
++// Licensed to the .NET Foundation under one or more agreements.
++// The .NET Foundation licenses this file to you under the MIT license.
++// See the LICENSE file in the project root for more information.
++//
++
++using System;
++using System.Collections.Generic;
++using System.Globalization;
++using System.Runtime.CompilerServices;
++using System.Text;
++using System.Threading;
++
++class Program
++{
++ const int ARRAY_SIZE = 1024;
++
++ static int Main()
++ {
++ // Run all tests 3x times to exercise both slow and fast paths work
++ for (int i = 0; i < 3; i++)
++ RunAllTests();
++
++ Console.WriteLine(Assert.HasAssertFired ? "FAILED" : "PASSED");
++ return Assert.HasAssertFired ? 1 : 100;
++ }
++
++ static void RunAllTests()
++ {
++ RunTest1();
++ RunTest2();
++ RunTest3();
++ RunTest4();
++ RunTest5();
++ RunTest6();
++ RunTest7();
++ RunTest8();
++ }
++
++ static void RunTest1()
++ {
++ int [] arr = new int[ARRAY_SIZE];
++
++ Assert.AreEqual(arr.GetType().ToString(), "System.Int32[]");
++ }
++
++ static void RunTest2()
++ {
++ object [] arr = new object[ARRAY_SIZE];
++
++ Assert.AreEqual(arr.GetType().ToString(), "System.Object[]");
++ }
++
++ static void RunTest3()
++ {
++ int [] arr = new_array_generic<int>();
++
++ Assert.AreEqual(arr.GetType().ToString(), "System.Int32[]");
++ }
++
++ static void RunTest4()
++ {
++ string [] arr = new_array_generic<string>();
++
++ Assert.AreEqual(arr.GetType().ToString(), "System.String[]");
++ }
++
++ static void RunTest5()
++ {
++ object [] arr = new_array_generic<object>();
++
++ Assert.AreEqual(arr.GetType().ToString(), "System.Object[]");
++ }
++
++ static void RunTest6()
++ {
++ GenericClass1<int> [] arr = new GenericClass1<int>[ARRAY_SIZE];
++
++ Assert.AreEqual(arr.GetType().ToString(), "GenericClass1`1[System.Int32][]");
++ }
++
++ static void RunTest7()
++ {
++ GenericClass1<object> [] arr = new_array_generic<GenericClass1<object>>();
++
++ Assert.AreEqual(arr.GetType().ToString(), "GenericClass1`1[System.Object][]");
++ }
++
++ static void RunTest8()
++ {
++ genericclass1_object_array_field = new_array_generic<GenericClass2<object>>();
++
++ Assert.AreEqual(genericclass1_object_array_field.GetType().ToString(), "GenericClass2`1[System.Object][]");
++ }
++
++ [MethodImpl(MethodImplOptions.NoInlining)]
++ static T[] new_array_generic<T>()
++ {
++ return new T[ARRAY_SIZE];
++ }
++
++ static volatile GenericClass1<object> [] genericclass1_object_array_field;
++}
++
++class GenericClass1<T>
++{
++}
++
++class GenericClass2<T> : GenericClass1<T>
++{
++}
++
++public static class Assert
++{
++ public static bool HasAssertFired;
++
++ public static void AreEqual(Object actual, Object expected)
++ {
++ if (!(actual == null && expected == null) && !actual.Equals(expected))
++ {
++ Console.WriteLine("Not equal!");
++ Console.WriteLine("actual = " + actual.ToString());
++ Console.WriteLine("expected = " + expected.ToString());
++ HasAssertFired = true;
++ }
++ }
++}
+diff --git a/tests/src/readytorun/tests/newarray.csproj b/tests/src/readytorun/tests/newarray.csproj
+new file mode 100644
+index 0000000..21acf81
+--- /dev/null
++++ b/tests/src/readytorun/tests/newarray.csproj
+@@ -0,0 +1,32 @@
++<?xml version="1.0" encoding="utf-8"?>
++<Project ToolsVersion="12.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
++ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.props))\dir.props" />
++ <PropertyGroup>
++ <AssemblyName>newarray</AssemblyName>
++ <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
++ <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
++ <SchemaVersion>2.0</SchemaVersion>
++ <ProjectGuid>{8DDE6EB9-7CAE-4DD1-B2CC-8D756855EF78}</ProjectGuid>
++ <ProjectTypeGuids>{786C830F-07A1-408B-BD7F-6EE04809D6DB};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}</ProjectTypeGuids>
++ <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
++ <OutputType>Exe</OutputType>
++ <CLRTestKind>BuildAndRun</CLRTestKind>
++ <CLRTestPriority>0</CLRTestPriority>
++ </PropertyGroup>
++
++ <ItemGroup>
++ <CodeAnalysisDependentAssemblyPaths Condition=" '$(VS100COMNTOOLS)' != '' " Include="$(VS100COMNTOOLS)..\IDE\PrivateAssemblies">
++ <Visible>False</Visible>
++ </CodeAnalysisDependentAssemblyPaths>
++ </ItemGroup>
++
++ <ItemGroup>
++ <Compile Include="newarray.cs" />
++ </ItemGroup>
++
++
++ <ItemGroup>
++ <Service Include="{82A7F48D-3B50-4B1E-B82E-3ADA8210C358}" />
++ </ItemGroup>
++ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.targets))\dir.targets" />
++</Project>
+--
+2.7.4
+
diff --git a/packaging/0014-Fix-JIT_NewArr1-8-byte-alignment-for-ELEMENT_TYPE_R8.patch b/packaging/0014-Fix-JIT_NewArr1-8-byte-alignment-for-ELEMENT_TYPE_R8.patch
new file mode 100644
index 0000000000..7bc7348d9d
--- /dev/null
+++ b/packaging/0014-Fix-JIT_NewArr1-8-byte-alignment-for-ELEMENT_TYPE_R8.patch
@@ -0,0 +1,26 @@
+From 59835bafae9faf37d770f7ab82830f7e1f2256f9 Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <r.ayrapetyan@samsung.com>
+Date: Tue, 27 Jun 2017 19:22:07 +0300
+Subject: [PATCH 14/32] Fix JIT_NewArr1 8-byte alignment for ELEMENT_TYPE_R8 on
+ x86.
+
+---
+ src/vm/jithelpers.cpp | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/src/vm/jithelpers.cpp b/src/vm/jithelpers.cpp
+index 0b51339..731b490 100644
+--- a/src/vm/jithelpers.cpp
++++ b/src/vm/jithelpers.cpp
+@@ -3187,7 +3187,7 @@ HCIMPL2(Object*, JIT_NewArr1, CORINFO_CLASS_HANDLE arrayMT, INT_PTR size)
+ // is this a primitive type?
+ //
+
+- CorElementType elemType = pArrayMT->GetInternalCorElementType();
++ CorElementType elemType = pArrayMT->GetArrayElementType();
+
+ if (CorTypeInfo::IsPrimitiveType(elemType)
+ #ifdef FEATURE_64BIT_ALIGNMENT
+--
+2.7.4
+
diff --git a/packaging/0015-Partially-remove-relocations-from-Class-section-of-N.patch b/packaging/0015-Partially-remove-relocations-from-Class-section-of-N.patch
new file mode 100644
index 0000000000..69b7fb70be
--- /dev/null
+++ b/packaging/0015-Partially-remove-relocations-from-Class-section-of-N.patch
@@ -0,0 +1,1105 @@
+From cb4f4da84d36ec631f69a1e9007035a62a6c1738 Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <ruben-ayrapetyan@users.noreply.github.com>
+Date: Wed, 28 Jun 2017 09:16:01 +0300
+Subject: [PATCH 15/32] Partially remove relocations from Class section of
+ NGEN-ed images (#11962)
+
+* Remove relocations for ParamTypeDesc::m_TemplateMT.
+
+* Remove relocations for LayoutEEClass::m_LayoutInfo.m_pFieldMarshalers.
+
+* Prepare RelativeFixupPointer.
+
+* Remove relocations for FieldMarshaler::m_pFD and FieldMarshaler_*::m_*.
+---
+ src/debug/daccess/nidump.cpp | 8 +-
+ src/inc/fixuppointer.h | 31 ++++++-
+ src/vm/ceeload.cpp | 7 +-
+ src/vm/ceeload.h | 3 +-
+ src/vm/class.cpp | 8 +-
+ src/vm/class.h | 15 +++-
+ src/vm/fieldmarshaler.cpp | 19 ++--
+ src/vm/fieldmarshaler.h | 197 ++++++++++++++++++++++++++++++++++++------
+ src/vm/methodtablebuilder.cpp | 8 +-
+ src/vm/typedesc.cpp | 22 ++---
+ src/vm/typedesc.h | 23 +++--
+ src/vm/typedesc.inl | 2 +-
+ 12 files changed, 269 insertions(+), 74 deletions(-)
+
+diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
+index 42705a5..2ec5d9a 100644
+--- a/src/debug/daccess/nidump.cpp
++++ b/src/debug/daccess/nidump.cpp
+@@ -4767,7 +4767,7 @@ void NativeImageDumper::TraverseTypeHashEntry(void *pContext, PTR_EETypeHashEntr
+ * all that much harm here (bloats m_discoveredMTs though,
+ * but not by a huge amount.
+ */
+- PTR_MethodTable mt(ptd->m_TemplateMT.GetValue());
++ PTR_MethodTable mt(ptd->GetTemplateMethodTableInternal());
+ if (isInRange(PTR_TO_TADDR(mt)))
+ {
+ m_discoveredMTs.AppendEx(mt);
+@@ -6243,7 +6243,7 @@ void NativeImageDumper::TypeDescToString( PTR_TypeDesc td, SString& buf )
+ if( td->IsArray() )
+ {
+ //td->HasTypeParam() may also be true.
+- PTR_MethodTable mt = ptd->m_TemplateMT.GetValue();
++ PTR_MethodTable mt = ptd->GetTemplateMethodTableInternal();
+ _ASSERTE( PTR_TO_TADDR(mt) );
+ if( CORCOMPILE_IS_POINTER_TAGGED(PTR_TO_TADDR(mt)) )
+ {
+@@ -8493,7 +8493,7 @@ NativeImageDumper::DumpEEClassForMethodTable( PTR_MethodTable mt )
+ VERBOSE_TYPES );
+ DisplayWriteFieldInt( m_numCTMFields, eecli->m_numCTMFields,
+ EEClassLayoutInfo, VERBOSE_TYPES );
+- PTR_FieldMarshaler fmArray( TO_TADDR(eecli->m_pFieldMarshalers) );
++ PTR_FieldMarshaler fmArray = eecli->GetFieldMarshalers();
+ DisplayWriteFieldAddress( m_pFieldMarshalers,
+ DPtrToPreferredAddr(fmArray),
+ eecli->m_numCTMFields
+@@ -8840,7 +8840,7 @@ void NativeImageDumper::DumpTypeDesc( PTR_TypeDesc td )
+ {
+ PTR_ParamTypeDesc ptd(td);
+ DisplayStartVStructure( "ParamTypeDesc", TYPEDESCS );
+- WriteFieldMethodTable( m_TemplateMT, ptd->m_TemplateMT.GetValue(),
++ WriteFieldMethodTable( m_TemplateMT, ptd->GetTemplateMethodTableInternal(),
+ ParamTypeDesc, TYPEDESCS );
+ WriteFieldTypeHandle( m_Arg, ptd->m_Arg,
+ ParamTypeDesc, TYPEDESCS );
+diff --git a/src/inc/fixuppointer.h b/src/inc/fixuppointer.h
+index 38ae348..83ff20e 100644
+--- a/src/inc/fixuppointer.h
++++ b/src/inc/fixuppointer.h
+@@ -249,6 +249,15 @@ public:
+ static constexpr bool isRelative = true;
+ typedef PTR_TYPE type;
+
++#ifndef DACCESS_COMPILE
++ RelativeFixupPointer()
++ {
++ SetValueMaybeNull(NULL);
++ }
++#else // DACCESS_COMPILE
++ RelativeFixupPointer() =delete;
++#endif // DACCESS_COMPILE
++
+ // Implicit copy/move is not allowed
+ RelativeFixupPointer<PTR_TYPE>(const RelativeFixupPointer<PTR_TYPE> &) =delete;
+ RelativeFixupPointer<PTR_TYPE>(RelativeFixupPointer<PTR_TYPE> &&) =delete;
+@@ -273,6 +282,15 @@ public:
+ return FALSE;
+ }
+
++#ifndef DACCESS_COMPILE
++ FORCEINLINE BOOL IsTagged() const
++ {
++ LIMITED_METHOD_CONTRACT;
++ TADDR base = (TADDR) this;
++ return IsTagged(base);
++ }
++#endif // !DACCESS_COMPILE
++
+ // Returns value of the encoded pointer. Assumes that the pointer is not NULL.
+ FORCEINLINE PTR_TYPE GetValue(TADDR base) const
+ {
+@@ -343,7 +361,7 @@ public:
+ {
+ LIMITED_METHOD_CONTRACT;
+ PRECONDITION(addr != NULL);
+- m_delta = (TADDR)addr - (TADDR)this;
++ m_delta = dac_cast<TADDR>(addr) - (TADDR)this;
+ }
+
+ // Set encoded value of the pointer. The value can be NULL.
+@@ -353,7 +371,7 @@ public:
+ if (addr == NULL)
+ m_delta = NULL;
+ else
+- m_delta = (TADDR)addr - (TADDR)base;
++ m_delta = dac_cast<TADDR>(addr) - (TADDR)base;
+ }
+
+ // Set encoded value of the pointer. The value can be NULL.
+@@ -373,6 +391,15 @@ public:
+ return dac_cast<DPTR(PTR_TYPE)>(addr - FIXUP_POINTER_INDIRECTION);
+ }
+
++#ifndef DACCESS_COMPILE
++ PTR_TYPE * GetValuePtr() const
++ {
++ LIMITED_METHOD_CONTRACT;
++ TADDR base = (TADDR) this;
++ return GetValuePtr(base);
++ }
++#endif // !DACCESS_COMPILE
++
+ // Returns value of the encoded pointer. Assumes that the pointer is not NULL.
+ // Allows the value to be tagged.
+ FORCEINLINE TADDR GetValueMaybeTagged(TADDR base) const
+diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
+index f995343..cd40ad7 100644
+--- a/src/vm/ceeload.cpp
++++ b/src/vm/ceeload.cpp
+@@ -10749,7 +10749,7 @@ void Module::RestoreMethodDescPointer(RelativeFixupPointer<PTR_MethodDesc> * ppM
+ }
+
+ /*static*/
+-void Module::RestoreFieldDescPointer(FixupPointer<PTR_FieldDesc> * ppFD)
++void Module::RestoreFieldDescPointer(RelativeFixupPointer<PTR_FieldDesc> * ppFD)
+ {
+ CONTRACTL
+ {
+@@ -10759,6 +10759,9 @@ void Module::RestoreFieldDescPointer(FixupPointer<PTR_FieldDesc> * ppFD)
+ }
+ CONTRACTL_END;
+
++ if (!ppFD->IsTagged())
++ return;
++
+ PTR_FieldDesc * ppValue = ppFD->GetValuePtr();
+
+ // Ensure that the compiler won't fetch the value twice
+@@ -10770,7 +10773,7 @@ void Module::RestoreFieldDescPointer(FixupPointer<PTR_FieldDesc> * ppFD)
+ CONSISTENCY_CHECK((CORCOMPILE_UNTAG_TOKEN(fixup)>>32) == 0);
+ #endif
+
+- Module * pContainingModule = ExecutionManager::FindZapModule(dac_cast<TADDR>(ppValue));
++ Module * pContainingModule = ExecutionManager::FindZapModule((TADDR)ppValue);
+ PREFIX_ASSUME(pContainingModule != NULL);
+
+ RVA fixupRva = (RVA) CORCOMPILE_UNTAG_TOKEN(fixup);
+diff --git a/src/vm/ceeload.h b/src/vm/ceeload.h
+index dc21eec..fa61089 100644
+--- a/src/vm/ceeload.h
++++ b/src/vm/ceeload.h
+@@ -2904,8 +2904,7 @@ public:
+ static void RestoreMethodDescPointer(RelativeFixupPointer<PTR_MethodDesc> * ppMD,
+ Module *pContainingModule = NULL,
+ ClassLoadLevel level = CLASS_LOADED);
+-
+- static void RestoreFieldDescPointer(FixupPointer<PTR_FieldDesc> * ppFD);
++ static void RestoreFieldDescPointer(RelativeFixupPointer<PTR_FieldDesc> * ppFD);
+
+ static void RestoreModulePointer(RelativeFixupPointer<PTR_Module> * ppModule, Module *pContainingModule);
+
+diff --git a/src/vm/class.cpp b/src/vm/class.cpp
+index 0259b1e..6697b23 100644
+--- a/src/vm/class.cpp
++++ b/src/vm/class.cpp
+@@ -2818,13 +2818,13 @@ void EEClass::Save(DataImage *image, MethodTable *pMT)
+
+ if (pInfo->m_numCTMFields > 0)
+ {
+- ZapStoredStructure * pNode = image->StoreStructure(pInfo->m_pFieldMarshalers,
++ ZapStoredStructure * pNode = image->StoreStructure(pInfo->GetFieldMarshalers(),
+ pInfo->m_numCTMFields * MAXFIELDMARSHALERSIZE,
+ DataImage::ITEM_FIELD_MARSHALERS);
+
+ for (UINT iField = 0; iField < pInfo->m_numCTMFields; iField++)
+ {
+- FieldMarshaler *pFM = (FieldMarshaler*)((BYTE *)pInfo->m_pFieldMarshalers + iField * MAXFIELDMARSHALERSIZE);
++ FieldMarshaler *pFM = (FieldMarshaler*)((BYTE *)pInfo->GetFieldMarshalers() + iField * MAXFIELDMARSHALERSIZE);
+ pFM->Save(image);
+
+ if (iField > 0)
+@@ -3029,11 +3029,11 @@ void EEClass::Fixup(DataImage *image, MethodTable *pMT)
+
+ if (HasLayout())
+ {
+- image->FixupPointerField(this, offsetof(LayoutEEClass, m_LayoutInfo.m_pFieldMarshalers));
++ image->FixupRelativePointerField(this, offsetof(LayoutEEClass, m_LayoutInfo.m_pFieldMarshalers));
+
+ EEClassLayoutInfo *pInfo = &((LayoutEEClass*)this)->m_LayoutInfo;
+
+- FieldMarshaler *pFM = pInfo->m_pFieldMarshalers;
++ FieldMarshaler *pFM = pInfo->GetFieldMarshalers();
+ FieldMarshaler *pFMEnd = (FieldMarshaler*) ((BYTE *)pFM + pInfo->m_numCTMFields*MAXFIELDMARSHALERSIZE);
+ while (pFM < pFMEnd)
+ {
+diff --git a/src/vm/class.h b/src/vm/class.h
+index 13b2e50..1d5f9a2 100644
+--- a/src/vm/class.h
++++ b/src/vm/class.h
+@@ -110,6 +110,7 @@ class LoaderAllocator;
+ class ComCallWrapperTemplate;
+
+ typedef DPTR(DictionaryLayout) PTR_DictionaryLayout;
++typedef DPTR(FieldMarshaler) PTR_FieldMarshaler;
+
+
+ //---------------------------------------------------------------------------------
+@@ -440,7 +441,7 @@ class EEClassLayoutInfo
+ // An array of FieldMarshaler data blocks, used to drive call-time
+ // marshaling of NStruct reference parameters. The number of elements
+ // equals m_numCTMFields.
+- FieldMarshaler *m_pFieldMarshalers;
++ RelativePointer<PTR_FieldMarshaler> m_pFieldMarshalers;
+
+
+ public:
+@@ -469,12 +470,20 @@ class EEClassLayoutInfo
+ return m_numCTMFields;
+ }
+
+- FieldMarshaler *GetFieldMarshalers() const
++ PTR_FieldMarshaler GetFieldMarshalers() const
+ {
+ LIMITED_METHOD_CONTRACT;
+- return m_pFieldMarshalers;
++ return ReadPointerMaybeNull(this, &EEClassLayoutInfo::m_pFieldMarshalers);
+ }
+
++#ifndef DACCESS_COMPILE
++ void SetFieldMarshalers(FieldMarshaler *pFieldMarshallers)
++ {
++ LIMITED_METHOD_CONTRACT;
++ m_pFieldMarshalers.SetValueMaybeNull(pFieldMarshallers);
++ }
++#endif // DACCESS_COMPILE
++
+ BOOL IsBlittable() const
+ {
+ LIMITED_METHOD_CONTRACT;
+diff --git a/src/vm/fieldmarshaler.cpp b/src/vm/fieldmarshaler.cpp
+index 0de71b5..9415b94 100644
+--- a/src/vm/fieldmarshaler.cpp
++++ b/src/vm/fieldmarshaler.cpp
+@@ -1318,7 +1318,7 @@ VOID EEClassLayoutInfo::CollectLayoutFieldMetadataThrowing(
+ }
+
+ pEEClassLayoutInfoOut->m_numCTMFields = fHasNonTrivialParent ? pParentMT->GetLayoutInfo()->m_numCTMFields : 0;
+- pEEClassLayoutInfoOut->m_pFieldMarshalers = NULL;
++ pEEClassLayoutInfoOut->SetFieldMarshalers(NULL);
+ pEEClassLayoutInfoOut->SetIsBlittable(TRUE);
+ if (fHasNonTrivialParent)
+ pEEClassLayoutInfoOut->SetIsBlittable(pParentMT->IsBlittable());
+@@ -1599,7 +1599,7 @@ VOID EEClassLayoutInfo::CollectLayoutFieldMetadataThrowing(
+
+ if (pEEClassLayoutInfoOut->m_numCTMFields)
+ {
+- pEEClassLayoutInfoOut->m_pFieldMarshalers = (FieldMarshaler*)(pamTracker->Track(pAllocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(MAXFIELDMARSHALERSIZE) * S_SIZE_T(pEEClassLayoutInfoOut->m_numCTMFields))));
++ pEEClassLayoutInfoOut->SetFieldMarshalers((FieldMarshaler*)(pamTracker->Track(pAllocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(MAXFIELDMARSHALERSIZE) * S_SIZE_T(pEEClassLayoutInfoOut->m_numCTMFields)))));
+
+ // Bring in the parent's fieldmarshalers
+ if (fHasNonTrivialParent)
+@@ -1608,8 +1608,8 @@ VOID EEClassLayoutInfo::CollectLayoutFieldMetadataThrowing(
+ PREFAST_ASSUME(pParentLayoutInfo != NULL); // See if (fParentHasLayout) branch above
+
+ UINT numChildCTMFields = pEEClassLayoutInfoOut->m_numCTMFields - pParentLayoutInfo->m_numCTMFields;
+- memcpyNoGCRefs( ((BYTE*)pEEClassLayoutInfoOut->m_pFieldMarshalers) + MAXFIELDMARSHALERSIZE*numChildCTMFields,
+- pParentLayoutInfo->m_pFieldMarshalers,
++ memcpyNoGCRefs( ((BYTE*)pEEClassLayoutInfoOut->GetFieldMarshalers()) + MAXFIELDMARSHALERSIZE*numChildCTMFields,
++ pParentLayoutInfo->GetFieldMarshalers(),
+ MAXFIELDMARSHALERSIZE * (pParentLayoutInfo->m_numCTMFields) );
+ }
+
+@@ -3726,7 +3726,7 @@ VOID FieldMarshaler_SafeArray::UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNa
+ pSafeArray = (LPSAFEARRAY*)pNativeValue;
+
+ VARTYPE vt = m_vt;
+- MethodTable* pMT = m_pMT.GetValue();
++ MethodTable* pMT = m_pMT.GetValueMaybeNull();
+
+ GCPROTECT_BEGIN(pArray)
+ {
+@@ -3771,7 +3771,7 @@ VOID FieldMarshaler_SafeArray::UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF
+ }
+
+ VARTYPE vt = m_vt;
+- MethodTable* pMT = m_pMT.GetValue();
++ MethodTable* pMT = m_pMT.GetValueMaybeNull();
+
+ // If we have an empty vartype, get it from the safearray vartype
+ if (vt == VT_EMPTY)
+@@ -4868,3 +4868,10 @@ IMPLEMENT_FieldMarshaler_METHOD(void, Restore,
+ (),
+ ,
+ ())
++
++#ifndef DACCESS_COMPILE
++IMPLEMENT_FieldMarshaler_METHOD(VOID, CopyTo,
++ (VOID *pDest, SIZE_T destSize) const,
++ ,
++ (pDest, destSize))
++#endif // !DACCESS_COMPILE
+diff --git a/src/vm/fieldmarshaler.h b/src/vm/fieldmarshaler.h
+index 287da41..f11c81b 100644
+--- a/src/vm/fieldmarshaler.h
++++ b/src/vm/fieldmarshaler.h
+@@ -253,6 +253,28 @@ VOID FmtValueTypeUpdateCLR(LPVOID pProtectedManagedData, MethodTable *pMT, BYTE
+ } \
+ ELEMENT_SIZE_IMPL(NativeSize, AlignmentReq)
+
++#define COPY_TO_IMPL_BASE_STRUCT_ONLY() \
++ VOID CopyToImpl(VOID *pDest, SIZE_T destSize) \
++ { \
++ static_assert(sizeof(*this) == sizeof(FieldMarshaler), \
++ "Please, implement CopyToImpl for correct copy of field values"); \
++ \
++ FieldMarshaler::CopyToImpl(pDest, destSize); \
++ }
++
++#define START_COPY_TO_IMPL(CLASS_NAME) \
++ VOID CopyToImpl(VOID *pDest, SIZE_T destSize) const \
++ { \
++ FieldMarshaler::CopyToImpl(pDest, destSize); \
++ \
++ CLASS_NAME *pDestFieldMarshaller = (std::remove_const<std::remove_pointer<decltype(this)>::type>::type *) pDest; \
++ _ASSERTE(sizeof(*pDestFieldMarshaller) <= destSize); \
++
++#define END_COPY_TO_IMPL(CLASS_NAME) \
++ static_assert(std::is_same<CLASS_NAME *, decltype(pDestFieldMarshaller)>::value, \
++ "Structure's name is required"); \
++ }
++
+
+ //=======================================================================
+ //
+@@ -278,6 +300,7 @@ public:
+ VOID ScalarUpdateCLR(const VOID *pNative, LPVOID pCLR) const;
+ VOID NestedValueClassUpdateNative(const VOID **ppProtectedCLR, SIZE_T startoffset, LPVOID pNative, OBJECTREF *ppCleanupWorkListOnStack) const;
+ VOID NestedValueClassUpdateCLR(const VOID *pNative, LPVOID *ppProtectedCLR, SIZE_T startoffset) const;
++ VOID CopyTo(VOID *pDest, SIZE_T destSize) const;
+ #ifdef FEATURE_PREJIT
+ void Save(DataImage *image);
+ void Fixup(DataImage *image);
+@@ -351,10 +374,21 @@ public:
+ #endif // FEATURE_PREJIT
+ }
+
++ void CopyToImpl(VOID *pDest, SIZE_T destSize) const
++ {
++ FieldMarshaler *pDestFieldMarshaller = (FieldMarshaler *) pDest;
++
++ _ASSERTE(sizeof(*pDestFieldMarshaller) <= destSize);
++
++ pDestFieldMarshaller->SetFieldDesc(GetFieldDesc());
++ pDestFieldMarshaller->SetExternalOffset(GetExternalOffset());
++ pDestFieldMarshaller->SetNStructFieldType(GetNStructFieldType());
++ }
++
+ void SetFieldDesc(FieldDesc* pFD)
+ {
+ LIMITED_METHOD_CONTRACT;
+- m_pFD.SetValue(pFD);
++ m_pFD.SetValueMaybeNull(pFD);
+ }
+
+ FieldDesc* GetFieldDesc() const
+@@ -369,7 +403,7 @@ public:
+ }
+ CONTRACT_END;
+
+- RETURN m_pFD.GetValue();
++ RETURN m_pFD.GetValueMaybeNull();
+ }
+
+ void SetExternalOffset(UINT32 dwExternalOffset)
+@@ -394,7 +428,7 @@ protected:
+ #endif
+ }
+
+- static inline void RestoreHelper(FixupPointer<PTR_MethodTable> *ppMT)
++ static inline void RestoreHelper(RelativeFixupPointer<PTR_MethodTable> *ppMT)
+ {
+ CONTRACTL
+ {
+@@ -414,7 +448,7 @@ protected:
+ }
+
+ #ifdef _DEBUG
+- static inline BOOL IsRestoredHelper(FixupPointer<PTR_MethodTable> pMT)
++ static inline BOOL IsRestoredHelper(const RelativeFixupPointer<PTR_MethodTable> &pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+
+@@ -428,7 +462,7 @@ protected:
+ #endif // _DEBUG
+
+
+- FixupPointer<PTR_FieldDesc> m_pFD; // FieldDesc
++ RelativeFixupPointer<PTR_FieldDesc> m_pFD; // FieldDesc
+ UINT32 m_dwExternalOffset; // offset of field in the fixed portion
+ NStructFieldType m_nft;
+ };
+@@ -449,6 +483,7 @@ public:
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(BSTR), sizeof(BSTR))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+ };
+
+ //=======================================================================
+@@ -462,6 +497,7 @@ public:
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(HSTRING), sizeof(HSTRING))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+ };
+
+ //=======================================================================
+@@ -473,7 +509,7 @@ public:
+
+ FieldMarshaler_Nullable(MethodTable* pMT)
+ {
+- m_pNullableTypeMT.SetValue(pMT);
++ m_pNullableTypeMT.SetValueMaybeNull(pMT);
+ }
+
+ BOOL IsNullableMarshalerImpl() const
+@@ -526,6 +562,12 @@ public:
+ FieldMarshaler::RestoreImpl();
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_Nullable)
++ {
++ pDestFieldMarshaller->m_pNullableTypeMT.SetValueMaybeNull(GetMethodTable());
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_Nullable)
++
+ #ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+@@ -550,7 +592,7 @@ public:
+ }
+
+ private:
+- FixupPointer<PTR_MethodTable> m_pNullableTypeMT;
++ RelativeFixupPointer<PTR_MethodTable> m_pNullableTypeMT;
+ };
+
+
+@@ -565,6 +607,7 @@ public:
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(HSTRING), sizeof(HSTRING))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+ };
+
+ //=======================================================================
+@@ -578,6 +621,7 @@ public:
+ VOID UpdateCLRImpl(const VOID * pNativeValue, OBJECTREF * ppProtectedCLRValue, OBJECTREF * ppProtectedOldCLRValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(HRESULT), sizeof(HRESULT))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+ };
+
+ #endif // FEATURE_COMINTEROP
+@@ -593,7 +637,7 @@ public:
+ FieldMarshaler_NestedLayoutClass(MethodTable *pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+- m_pNestedMethodTable.SetValue(pMT);
++ m_pNestedMethodTable.SetValueMaybeNull(pMT);
+ }
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+@@ -629,6 +673,12 @@ public:
+ FieldMarshaler::RestoreImpl();
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_NestedLayoutClass)
++ {
++ pDestFieldMarshaller->m_pNestedMethodTable.SetValueMaybeNull(GetMethodTable());
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_NestedLayoutClass)
++
+ #ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+@@ -649,12 +699,12 @@ public:
+ }
+ CONTRACTL_END;
+
+- return m_pNestedMethodTable.GetValue();
++ return m_pNestedMethodTable.GetValueMaybeNull();
+ }
+
+ private:
+ // MethodTable of nested FieldMarshaler.
+- FixupPointer<PTR_MethodTable> m_pNestedMethodTable;
++ RelativeFixupPointer<PTR_MethodTable> m_pNestedMethodTable;
+ };
+
+
+@@ -667,7 +717,7 @@ public:
+ FieldMarshaler_NestedValueClass(MethodTable *pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+- m_pNestedMethodTable.SetValue(pMT);
++ m_pNestedMethodTable.SetValueMaybeNull(pMT);
+ }
+
+ BOOL IsNestedValueClassMarshalerImpl() const
+@@ -712,6 +762,12 @@ public:
+ FieldMarshaler::RestoreImpl();
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_NestedValueClass)
++ {
++ pDestFieldMarshaller->m_pNestedMethodTable.SetValueMaybeNull(GetMethodTable());
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_NestedValueClass)
++
+ #ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+@@ -738,13 +794,13 @@ public:
+ }
+ CONTRACTL_END;
+
+- return m_pNestedMethodTable.GetValue();
++ return m_pNestedMethodTable.GetValueMaybeNull();
+ }
+
+
+ private:
+ // MethodTable of nested NStruct.
+- FixupPointer<PTR_MethodTable> m_pNestedMethodTable;
++ RelativeFixupPointer<PTR_MethodTable> m_pNestedMethodTable;
+ };
+
+
+@@ -760,6 +816,7 @@ public:
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(LPWSTR), sizeof(LPWSTR))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+ };
+
+ //=======================================================================
+@@ -774,6 +831,7 @@ public:
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(LPSTR), sizeof(LPSTR))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+ };
+
+ //=======================================================================
+@@ -806,6 +864,13 @@ public:
+ return m_ThrowOnUnmappableChar;
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_StringAnsi)
++ {
++ pDestFieldMarshaller->m_BestFitMap = m_BestFitMap;
++ pDestFieldMarshaller->m_ThrowOnUnmappableChar = m_ThrowOnUnmappableChar;
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_StringAnsi)
++
+ private:
+ bool m_BestFitMap:1;
+ bool m_ThrowOnUnmappableChar:1;
+@@ -829,6 +894,12 @@ public:
+ m_numchar = numChar;
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_FixedStringUni)
++ {
++ pDestFieldMarshaller->m_numchar = m_numchar;
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_FixedStringUni)
++
+ private:
+ // # of characters for fixed strings
+ UINT32 m_numchar;
+@@ -864,6 +935,14 @@ public:
+ return m_ThrowOnUnmappableChar;
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_FixedStringAnsi)
++ {
++ pDestFieldMarshaller->m_numchar = m_numchar;
++ pDestFieldMarshaller->m_BestFitMap = m_BestFitMap;
++ pDestFieldMarshaller->m_ThrowOnUnmappableChar = m_ThrowOnUnmappableChar;
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_FixedStringAnsi)
++
+ private:
+ // # of characters for fixed strings
+ UINT32 m_numchar;
+@@ -901,6 +980,14 @@ public:
+ return m_ThrowOnUnmappableChar;
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_FixedCharArrayAnsi)
++ {
++ pDestFieldMarshaller->m_numElems = m_numElems;
++ pDestFieldMarshaller->m_BestFitMap = m_BestFitMap;
++ pDestFieldMarshaller->m_ThrowOnUnmappableChar = m_ThrowOnUnmappableChar;
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_FixedCharArrayAnsi)
++
+ private:
+ // # of elements for fixedchararray
+ UINT32 m_numElems;
+@@ -980,6 +1067,16 @@ public:
+ FieldMarshaler::RestoreImpl();
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_FixedArray)
++ {
++ pDestFieldMarshaller->m_arrayType.SetValueMaybeNull(m_arrayType.GetValueMaybeNull());
++ pDestFieldMarshaller->m_numElems = m_numElems;
++ pDestFieldMarshaller->m_vt = m_vt;
++ pDestFieldMarshaller->m_BestFitMap = m_BestFitMap;
++ pDestFieldMarshaller->m_ThrowOnUnmappableChar = m_ThrowOnUnmappableChar;
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_FixedArray)
++
+ #ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+@@ -994,7 +1091,7 @@ public:
+ #endif
+
+ private:
+- FixupPointer<TypeHandle> m_arrayType;
++ RelativeFixupPointer<TypeHandle> m_arrayType;
+ UINT32 m_numElems;
+ VARTYPE m_vt;
+ bool m_BestFitMap:1; // Note: deliberately use small bools to save on working set - this is the largest FieldMarshaler and dominates the cost of the FieldMarshaler array
+@@ -1020,7 +1117,7 @@ public:
+ {
+ WRAPPER_NO_CONTRACT;
+ m_vt = vt;
+- m_pMT.SetValue(pMT);
++ m_pMT.SetValueMaybeNull(pMT);
+ }
+
+ #ifdef FEATURE_PREJIT
+@@ -1049,6 +1146,13 @@ public:
+ FieldMarshaler::RestoreImpl();
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_SafeArray)
++ {
++ pDestFieldMarshaller->m_pMT.SetValueMaybeNull(m_pMT.GetValueMaybeNull());
++ pDestFieldMarshaller->m_vt = m_vt;
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_SafeArray)
++
+ #ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+@@ -1079,7 +1183,7 @@ public:
+ }
+
+ private:
+- FixupPointer<PTR_MethodTable> m_pMT;
++ RelativeFixupPointer<PTR_MethodTable> m_pMT;
+ VARTYPE m_vt;
+ };
+ #endif //FEATURE_CLASSIC_COMINTEROP
+@@ -1094,7 +1198,7 @@ public:
+ FieldMarshaler_Delegate(MethodTable* pMT)
+ {
+ WRAPPER_NO_CONTRACT;
+- m_pNestedMethodTable.SetValue(pMT);
++ m_pNestedMethodTable.SetValueMaybeNull(pMT);
+ }
+
+ VOID UpdateNativeImpl(OBJECTREF* pCLRValue, LPVOID pNativeValue, OBJECTREF *ppCleanupWorkListOnStack) const;
+@@ -1128,6 +1232,12 @@ public:
+ FieldMarshaler::RestoreImpl();
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_Delegate)
++ {
++ pDestFieldMarshaller->m_pNestedMethodTable.SetValueMaybeNull(m_pNestedMethodTable.GetValueMaybeNull());
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_Delegate)
++
+ #ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+@@ -1148,10 +1258,10 @@ public:
+ }
+ CONTRACTL_END;
+
+- return m_pNestedMethodTable.GetValue();
++ return m_pNestedMethodTable.GetValueMaybeNull();
+ }
+
+- FixupPointer<PTR_MethodTable> m_pNestedMethodTable;
++ RelativeFixupPointer<PTR_MethodTable> m_pNestedMethodTable;
+ };
+
+
+@@ -1168,6 +1278,7 @@ public:
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(LPVOID), sizeof(LPVOID))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+ };
+
+
+@@ -1184,6 +1295,7 @@ public:
+ VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(LPVOID), sizeof(LPVOID))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+ };
+
+ #ifdef FEATURE_COMINTEROP
+@@ -1204,8 +1316,8 @@ public:
+ FieldMarshaler_Interface(MethodTable *pClassMT, MethodTable *pItfMT, DWORD dwFlags)
+ {
+ WRAPPER_NO_CONTRACT;
+- m_pClassMT.SetValue(pClassMT);
+- m_pItfMT.SetValue(pItfMT);
++ m_pClassMT.SetValueMaybeNull(pClassMT);
++ m_pItfMT.SetValueMaybeNull(pItfMT);
+ m_dwFlags = dwFlags;
+ }
+
+@@ -1237,6 +1349,14 @@ public:
+ FieldMarshaler::RestoreImpl();
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_Interface)
++ {
++ pDestFieldMarshaller->m_pClassMT.SetValueMaybeNull(m_pClassMT.GetValueMaybeNull());
++ pDestFieldMarshaller->m_pItfMT.SetValueMaybeNull(m_pItfMT.GetValueMaybeNull());
++ pDestFieldMarshaller->m_dwFlags = m_dwFlags;
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_Interface)
++
+ #ifdef _DEBUG
+ BOOL IsRestored() const
+ {
+@@ -1275,7 +1395,7 @@ public:
+ }
+ CONTRACTL_END;
+
+- return m_pClassMT.GetValue();
++ return m_pClassMT.GetValueMaybeNull();
+ }
+
+ MethodTable *GetInterfaceMethodTable() const
+@@ -1289,12 +1409,12 @@ public:
+ }
+ CONTRACTL_END;
+
+- return m_pItfMT.GetValue();
++ return m_pItfMT.GetValueMaybeNull();
+ }
+
+ private:
+- FixupPointer<PTR_MethodTable> m_pClassMT;
+- FixupPointer<PTR_MethodTable> m_pItfMT;
++ RelativeFixupPointer<PTR_MethodTable> m_pClassMT;
++ RelativeFixupPointer<PTR_MethodTable> m_pItfMT;
+ DWORD m_dwFlags;
+ };
+
+@@ -1328,6 +1448,7 @@ public:
+ VOID DestroyNativeImpl(LPVOID pNativeValue) const;
+
+ ELEMENT_SIZE_IMPL(sizeof(VARIANT), 8)
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+ };
+
+ #endif // FEATURE_COMINTEROP
+@@ -1352,7 +1473,13 @@ public:
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const;
+
+ SCALAR_MARSHALER_IMPL(1, 1)
+-
++
++ START_COPY_TO_IMPL(FieldMarshaler_Illegal)
++ {
++ pDestFieldMarshaller->m_resIDWhy = m_resIDWhy;
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_Illegal)
++
+ private:
+ UINT m_resIDWhy;
+ };
+@@ -1369,6 +1496,7 @@ public:
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(1, 1)
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+@@ -1413,6 +1541,7 @@ public:
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(2, 2)
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+@@ -1456,6 +1585,7 @@ public:
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(4, 4)
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+@@ -1499,6 +1629,7 @@ public:
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(8, 8)
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+@@ -1599,6 +1730,13 @@ public:
+ return m_ThrowOnUnmappableChar;
+ }
+
++ START_COPY_TO_IMPL(FieldMarshaler_Ansi)
++ {
++ pDestFieldMarshaller->m_BestFitMap = m_BestFitMap;
++ pDestFieldMarshaller->m_ThrowOnUnmappableChar = m_ThrowOnUnmappableChar;
++ }
++ END_COPY_TO_IMPL(FieldMarshaler_Ansi)
++
+ private:
+ bool m_BestFitMap:1;
+ bool m_ThrowOnUnmappableChar:1;
+@@ -1614,6 +1752,7 @@ public:
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(BOOL), sizeof(BOOL))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+@@ -1661,6 +1800,7 @@ public:
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(VARIANT_BOOL), sizeof(VARIANT_BOOL))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+@@ -1711,6 +1851,7 @@ public:
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(1, 1)
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+@@ -1752,6 +1893,7 @@ public:
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(DECIMAL), 8);
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const
+ {
+@@ -1793,6 +1935,7 @@ public:
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(DATE), sizeof(DATE))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const;
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const;
+@@ -1811,6 +1954,7 @@ public:
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(CURRENCY), sizeof(CURRENCY))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const;
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const;
+@@ -1825,6 +1969,7 @@ public:
+ UNUSED_METHOD_IMPL(VOID UpdateCLRImpl(const VOID *pNativeValue, OBJECTREF *ppProtectedCLRValue, OBJECTREF *ppProtectedOldCLRValue) const)
+
+ SCALAR_MARSHALER_IMPL(sizeof(INT64), sizeof(INT64))
++ COPY_TO_IMPL_BASE_STRUCT_ONLY()
+
+ VOID ScalarUpdateNativeImpl(LPVOID pCLR, LPVOID pNative) const;
+ VOID ScalarUpdateCLRImpl(const VOID *pNative, LPVOID pCLR) const;
+diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
+index a1e9095..e8f3b9c 100644
+--- a/src/vm/methodtablebuilder.cpp
++++ b/src/vm/methodtablebuilder.cpp
+@@ -4206,11 +4206,11 @@ VOID MethodTableBuilder::InitializeFieldDescs(FieldDesc *pFieldDescList,
+ {
+ if (pwalk->m_MD == bmtMetaData->pFields[i])
+ {
+-
+ pLayoutFieldInfo = pwalk;
+- CopyMemory(pNextFieldMarshaler,
+- &(pwalk->m_FieldMarshaler),
+- MAXFIELDMARSHALERSIZE);
++
++ const FieldMarshaler *pSrcFieldMarshaler = (const FieldMarshaler *) &pwalk->m_FieldMarshaler;
++
++ pSrcFieldMarshaler->CopyTo(pNextFieldMarshaler, MAXFIELDMARSHALERSIZE);
+
+ pNextFieldMarshaler->SetFieldDesc(pFD);
+ pNextFieldMarshaler->SetExternalOffset(pwalk->m_offset);
+diff --git a/src/vm/typedesc.cpp b/src/vm/typedesc.cpp
+index 6718068..7da1c84 100644
+--- a/src/vm/typedesc.cpp
++++ b/src/vm/typedesc.cpp
+@@ -40,7 +40,7 @@ BOOL ParamTypeDesc::Verify() {
+ STATIC_CONTRACT_DEBUG_ONLY;
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+- _ASSERTE(m_TemplateMT.IsNull() || m_TemplateMT.GetValue()->SanityCheck());
++ _ASSERTE(m_TemplateMT.IsNull() || GetTemplateMethodTableInternal()->SanityCheck());
+ _ASSERTE(!GetTypeParam().IsNull());
+ BAD_FORMAT_NOTHROW_ASSERT(GetTypeParam().IsTypeDesc() || !GetTypeParam().AsMethodTable()->IsArray());
+ BAD_FORMAT_NOTHROW_ASSERT(CorTypeInfo::IsModifier_NoThrow(GetInternalCorElementType()) ||
+@@ -59,7 +59,7 @@ BOOL ArrayTypeDesc::Verify() {
+ STATIC_CONTRACT_SUPPORTS_DAC;
+
+ // m_TemplateMT == 0 may be null when building types involving TypeVarTypeDesc's
+- BAD_FORMAT_NOTHROW_ASSERT(m_TemplateMT.IsNull() || m_TemplateMT.GetValue()->IsArray());
++ BAD_FORMAT_NOTHROW_ASSERT(m_TemplateMT.IsNull() || GetTemplateMethodTable()->IsArray());
+ BAD_FORMAT_NOTHROW_ASSERT(CorTypeInfo::IsArray_NoThrow(GetInternalCorElementType()));
+ ParamTypeDesc::Verify();
+ return(true);
+@@ -844,7 +844,7 @@ OBJECTREF ParamTypeDesc::GetManagedClassObject()
+ if (OwnsTemplateMethodTable())
+ {
+ // Set the handle on template methodtable as well to make Object.GetType for arrays take the fast path
+- EnsureWritablePages(m_TemplateMT.GetValue()->GetWriteableDataForWrite())->m_hExposedClassObject = m_hExposedClassObject;
++ EnsureWritablePages(GetTemplateMethodTableInternal()->GetWriteableDataForWrite())->m_hExposedClassObject = m_hExposedClassObject;
+ }
+
+ // Log the TypeVarTypeDesc access
+@@ -1011,7 +1011,7 @@ void TypeDesc::DoFullyLoad(Generics::RecursionGraph *pVisited, ClassLoadLevel le
+ // Fully load the template method table
+ if (!pPTD->m_TemplateMT.IsNull())
+ {
+- pPTD->m_TemplateMT.GetValue()->DoFullyLoad(&newVisited, level, pPending, &fBailed, pInstContext);
++ pPTD->GetTemplateMethodTableInternal()->DoFullyLoad(&newVisited, level, pPending, &fBailed, pInstContext);
+ }
+ }
+
+@@ -1189,8 +1189,8 @@ void ParamTypeDesc::Save(DataImage *image)
+ if (OwnsTemplateMethodTable())
+ {
+ // This TypeDesc should be the only one saving this MT
+- _ASSERTE(!image->IsStored(m_TemplateMT.GetValue()));
+- Module::SaveMethodTable(image, m_TemplateMT.GetValue(), 0);
++ _ASSERTE(!image->IsStored(GetTemplateMethodTableInternal()));
++ Module::SaveMethodTable(image, GetTemplateMethodTableInternal(), 0);
+ }
+
+ }
+@@ -1219,8 +1219,8 @@ void ParamTypeDesc::Fixup(DataImage *image)
+ // TypeDesc and the MT are "tightly-knit") In other words if one is present in
+ // an NGEN image then then other will be, and if one is "used" at runtime then
+ // the other will be too.
+- image->FixupPointerField(this, offsetof(ParamTypeDesc, m_TemplateMT));
+- m_TemplateMT.GetValue()->Fixup(image);
++ image->FixupMethodTablePointer(this, &m_TemplateMT);
++ GetTemplateMethodTableInternal()->Fixup(image);
+ }
+ else
+ {
+@@ -1275,14 +1275,14 @@ BOOL ParamTypeDesc::ComputeNeedsRestore(DataImage *image, TypeHandleList *pVisit
+ {
+ if (OwnsTemplateMethodTable())
+ {
+- if (m_TemplateMT.GetValue()->ComputeNeedsRestore(image, pVisited))
++ if (GetTemplateMethodTableInternal()->ComputeNeedsRestore(image, pVisited))
+ {
+ res = TRUE;
+ }
+ }
+ else
+ {
+- if (!image->CanPrerestoreEagerBindToMethodTable(m_TemplateMT.GetValue(), pVisited))
++ if (!image->CanPrerestoreEagerBindToMethodTable(GetTemplateMethodTableInternal(), pVisited))
+ {
+ res = TRUE;
+ }
+@@ -2419,7 +2419,7 @@ ParamTypeDesc::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+
+- PTR_MethodTable pTemplateMT = m_TemplateMT.GetValue();
++ PTR_MethodTable pTemplateMT = GetTemplateMethodTableInternal();
+ if (pTemplateMT.IsValid())
+ {
+ pTemplateMT->EnumMemoryRegions(flags);
+diff --git a/src/vm/typedesc.h b/src/vm/typedesc.h
+index a8b1c25..3e8b0e6 100644
+--- a/src/vm/typedesc.h
++++ b/src/vm/typedesc.h
+@@ -254,7 +254,7 @@ public:
+
+ LIMITED_METHOD_CONTRACT;
+
+- m_TemplateMT.SetValue(pMT);
++ m_TemplateMT.SetValueMaybeNull(pMT);
+
+ // ParamTypeDescs start out life not fully loaded
+ m_typeAndFlags |= TypeDesc::enum_flag_IsNotFullyLoaded;
+@@ -323,8 +323,13 @@ public:
+ friend class ArrayOpLinker;
+ #endif
+ protected:
++ PTR_MethodTable GetTemplateMethodTableInternal() {
++ WRAPPER_NO_CONTRACT;
++ return ReadPointerMaybeNull(this, &ParamTypeDesc::m_TemplateMT);
++ }
++
+ // the m_typeAndFlags field in TypeDesc tell what kind of parameterized type we have
+- FixupPointer<PTR_MethodTable> m_TemplateMT; // The shared method table, some variants do not use this field (it is null)
++ RelativeFixupPointer<PTR_MethodTable> m_TemplateMT; // The shared method table, some variants do not use this field (it is null)
+ TypeHandle m_Arg; // The type that is being modified
+ LOADERHANDLE m_hExposedClassObject; // handle back to the internal reflection Type object
+ };
+@@ -380,8 +385,8 @@ public:
+ WRAPPER_NO_CONTRACT;
+
+ _ASSERTE(!m_TemplateMT.IsNull());
+- _ASSERTE(m_TemplateMT.GetValue()->IsArray());
+- _ASSERTE(m_TemplateMT.GetValue()->ParentEquals(g_pArrayClass));
++ _ASSERTE(GetTemplateMethodTableInternal()->IsArray());
++ _ASSERTE(GetTemplateMethodTableInternal()->ParentEquals(g_pArrayClass));
+
+ return g_pArrayClass;
+ }
+@@ -416,16 +421,16 @@ public:
+ void Fixup(DataImage *image);
+ #endif
+
+- MethodTable * GetTemplateMethodTable() {
++ PTR_MethodTable GetTemplateMethodTable() {
+ WRAPPER_NO_CONTRACT;
+- MethodTable * pTemplateMT = m_TemplateMT.GetValue();
+- _ASSERTE(pTemplateMT->IsArray());
+- return pTemplateMT;
++ PTR_MethodTable ptrTemplateMT = GetTemplateMethodTableInternal();
++ _ASSERTE(ptrTemplateMT->IsArray());
++ return ptrTemplateMT;
+ }
+
+ TADDR GetTemplateMethodTableMaybeTagged() {
+ WRAPPER_NO_CONTRACT;
+- return m_TemplateMT.GetValueMaybeTagged();
++ return m_TemplateMT.GetValueMaybeTagged(dac_cast<TADDR>(this) + offsetof(ArrayTypeDesc, m_TemplateMT));
+ }
+
+ #ifdef FEATURE_COMINTEROP
+diff --git a/src/vm/typedesc.inl b/src/vm/typedesc.inl
+index 4d7416e..312270e 100644
+--- a/src/vm/typedesc.inl
++++ b/src/vm/typedesc.inl
+@@ -31,7 +31,7 @@ inline PTR_MethodTable TypeDesc::GetMethodTable() {
+ if (GetInternalCorElementType() == ELEMENT_TYPE_VALUETYPE)
+ return dac_cast<PTR_MethodTable>(asParam->m_Arg.AsMethodTable());
+ else
+- return(asParam->m_TemplateMT.GetValue());
++ return(asParam->GetTemplateMethodTableInternal());
+ }
+
+ inline TypeHandle TypeDesc::GetTypeParam() {
+--
+2.7.4
+
diff --git a/packaging/0016-Fix-copying-of-FieldMarshaler-structures-in-EEClassL.patch b/packaging/0016-Fix-copying-of-FieldMarshaler-structures-in-EEClassL.patch
new file mode 100644
index 0000000000..4b0513106a
--- /dev/null
+++ b/packaging/0016-Fix-copying-of-FieldMarshaler-structures-in-EEClassL.patch
@@ -0,0 +1,42 @@
+From 1c2a01cd07c9893ea296794d4df542092759ed94 Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <r.ayrapetyan@samsung.com>
+Date: Fri, 7 Jul 2017 15:54:14 +0300
+Subject: [PATCH 16/32] Fix copying of FieldMarshaler structures in
+ EEClassLayoutInfo::CollectLayoutFieldMetadataThrowing.
+
+Related issue: #12643
+---
+ src/vm/fieldmarshaler.cpp | 16 ++++++++++++----
+ 1 file changed, 12 insertions(+), 4 deletions(-)
+
+diff --git a/src/vm/fieldmarshaler.cpp b/src/vm/fieldmarshaler.cpp
+index 9415b94..0685093 100644
+--- a/src/vm/fieldmarshaler.cpp
++++ b/src/vm/fieldmarshaler.cpp
+@@ -1606,11 +1606,19 @@ VOID EEClassLayoutInfo::CollectLayoutFieldMetadataThrowing(
+ {
+ CONSISTENCY_CHECK(fParentHasLayout);
+ PREFAST_ASSUME(pParentLayoutInfo != NULL); // See if (fParentHasLayout) branch above
+-
++
+ UINT numChildCTMFields = pEEClassLayoutInfoOut->m_numCTMFields - pParentLayoutInfo->m_numCTMFields;
+- memcpyNoGCRefs( ((BYTE*)pEEClassLayoutInfoOut->GetFieldMarshalers()) + MAXFIELDMARSHALERSIZE*numChildCTMFields,
+- pParentLayoutInfo->GetFieldMarshalers(),
+- MAXFIELDMARSHALERSIZE * (pParentLayoutInfo->m_numCTMFields) );
++
++ BYTE *pParentCTMFieldSrcArray = (BYTE*)pParentLayoutInfo->GetFieldMarshalers();
++ BYTE *pParentCTMFieldDestArray = ((BYTE*)pEEClassLayoutInfoOut->GetFieldMarshalers()) + MAXFIELDMARSHALERSIZE*numChildCTMFields;
++
++ for (UINT parentCTMFieldIndex = 0; parentCTMFieldIndex < pParentLayoutInfo->m_numCTMFields; parentCTMFieldIndex++)
++ {
++ FieldMarshaler *pParentCTMFieldSrc = (FieldMarshaler *)(pParentCTMFieldSrcArray + MAXFIELDMARSHALERSIZE*parentCTMFieldIndex);
++ FieldMarshaler *pParentCTMFieldDest = (FieldMarshaler *)(pParentCTMFieldDestArray + MAXFIELDMARSHALERSIZE*parentCTMFieldIndex);
++
++ pParentCTMFieldSrc->CopyTo(pParentCTMFieldDest, MAXFIELDMARSHALERSIZE);
++ }
+ }
+
+ }
+--
+2.7.4
+
diff --git a/packaging/0017-Fix-alignment-of-reads-in-MD5Transform.-12800.patch b/packaging/0017-Fix-alignment-of-reads-in-MD5Transform.-12800.patch
new file mode 100644
index 0000000000..f2d736f1ca
--- /dev/null
+++ b/packaging/0017-Fix-alignment-of-reads-in-MD5Transform.-12800.patch
@@ -0,0 +1,51 @@
+From 9483a7c0c4a90bd8a6eaa4faecf3136623318cc2 Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <ruben-ayrapetyan@users.noreply.github.com>
+Date: Mon, 17 Jul 2017 17:10:42 +0300
+Subject: [PATCH 17/32] Fix alignment of reads in MD5Transform. (#12800)
+
+---
+ src/utilcode/md5.cpp | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/src/utilcode/md5.cpp b/src/utilcode/md5.cpp
+index 8d6f8a3..6901ddb 100644
+--- a/src/utilcode/md5.cpp
++++ b/src/utilcode/md5.cpp
+@@ -10,6 +10,7 @@
+ #include "stdafx.h"
+
+ #include <stdlib.h>
++#include "stdmacros.h"
+ #include "md5.h"
+ #include "contract.h"
+
+@@ -76,7 +77,16 @@ void MD5::HashMore(const void* pvInput, ULONG cbInput)
+ // Hash the data in 64-byte runs, starting just after what we've copied
+ while (cbInput >= 64)
+ {
+- MD5Transform(m_state, (ULONG*)pbInput);
++ if (IS_ALIGNED(pbInput, sizeof(ULONG)))
++ {
++ MD5Transform(m_state, (ULONG*)pbInput);
++ }
++ else
++ {
++ ULONG inputCopy[64 / sizeof(ULONG)];
++ memcpy(inputCopy, pbInput, sizeof(inputCopy));
++ MD5Transform(m_state, inputCopy);
++ }
+ pbInput += 64;
+ cbInput -= 64;
+ }
+@@ -213,6 +223,8 @@ void MD5::GetHashValue(MD5HASHDATA* phash)
+ STATIC_CONTRACT_NOTHROW;
+ STATIC_CONTRACT_GC_NOTRIGGER;
+
++ _ASSERTE(IS_ALIGNED(data, sizeof(ULONG)));
++
+ ULONG a=state[0];
+ ULONG b=state[1];
+ ULONG c=state[2];
+--
+2.7.4
+
diff --git a/packaging/0018-Simplify-SHM-allocator-12815.patch b/packaging/0018-Simplify-SHM-allocator-12815.patch
new file mode 100644
index 0000000000..81a2fd6495
--- /dev/null
+++ b/packaging/0018-Simplify-SHM-allocator-12815.patch
@@ -0,0 +1,2647 @@
+From dda1640a541e3c1307ca63d1b570a0282988707f Mon Sep 17 00:00:00 2001
+From: gbalykov <g.balykov@samsung.com>
+Date: Tue, 25 Jul 2017 01:37:38 +0300
+Subject: [PATCH 18/32] Simplify SHM-allocator (#12815)
+
+* Simplify SHM-allocator
+
+* Remove SHMNULL and NULLSharedID
+---
+ src/pal/src/file/shmfilelockmgr.cpp | 40 +-
+ src/pal/src/include/pal/shm.hpp | 45 +-
+ src/pal/src/include/pal/shmemory.h | 212 +----
+ src/pal/src/include/pal/synchcache.hpp | 16 +-
+ src/pal/src/include/pal/synchobjects.hpp | 5 -
+ src/pal/src/objmgr/shmobject.cpp | 70 +-
+ src/pal/src/objmgr/shmobject.hpp | 5 +-
+ src/pal/src/objmgr/shmobjectmanager.cpp | 14 +-
+ src/pal/src/shmemory/shmemory.cpp | 1342 +----------------------------
+ src/pal/src/synchmgr/synchcontrollers.cpp | 20 +-
+ src/pal/src/synchmgr/synchmanager.cpp | 33 +-
+ src/pal/src/synchmgr/synchmanager.hpp | 2 +-
+ 12 files changed, 124 insertions(+), 1680 deletions(-)
+
+diff --git a/src/pal/src/file/shmfilelockmgr.cpp b/src/pal/src/file/shmfilelockmgr.cpp
+index a586abd..6deed61 100644
+--- a/src/pal/src/file/shmfilelockmgr.cpp
++++ b/src/pal/src/file/shmfilelockmgr.cpp
+@@ -102,7 +102,7 @@ CSharedMemoryFileLockMgr::GetLockControllerForFile(
+ )
+ {
+ PAL_ERROR palError = NO_ERROR;
+- SHMPTR shmFileLocks = SHMNULL;
++ SHMPTR shmFileLocks = NULL;
+ SHMFILELOCKS* fileLocks = NULL;
+ CSharedMemoryFileLockController *pController = NULL;
+
+@@ -214,7 +214,7 @@ CSharedMemoryFileLockMgr::GetLockControllerForFile(
+ // don't attempt to free it below.
+ //
+
+- shmFileLocks = SHMNULL;
++ shmFileLocks = NULL;
+
+ /* set the share mode again, it's possible that the share mode is now more
+ restrictive than the previous mode set. */
+@@ -247,7 +247,7 @@ GetLockControllerForFileExit:
+ pController->ReleaseController();
+ }
+
+- if (SHMNULL != shmFileLocks)
++ if (NULL != shmFileLocks)
+ {
+ FILECleanUpLockedRgn(
+ shmFileLocks,
+@@ -269,13 +269,13 @@ CSharedMemoryFileLockMgr::GetFileShareModeForFile(
+ {
+ PAL_ERROR palError = NO_ERROR;
+ *pdwShareMode = SHARE_MODE_NOT_INITALIZED;
+- SHMPTR shmFileLocks = SHMNULL;
++ SHMPTR shmFileLocks = NULL;
+ SHMFILELOCKS* fileLocks = NULL;
+
+ SHMLock();
+
+ palError = FILEGetSHMFileLocks(szFileName, &shmFileLocks, TRUE);
+- if (NO_ERROR != palError || shmFileLocks == SHMNULL)
++ if (NO_ERROR != palError || shmFileLocks == NULL)
+ {
+ goto GetLockControllerForFileExit;
+ }
+@@ -291,7 +291,7 @@ CSharedMemoryFileLockMgr::GetFileShareModeForFile(
+
+ GetLockControllerForFileExit:
+
+- if (SHMNULL != shmFileLocks)
++ if (NULL != shmFileLocks)
+ {
+ FILECleanUpLockedRgn(
+ shmFileLocks,
+@@ -425,7 +425,7 @@ CSharedMemoryFileLockController::ReleaseFileLock(
+ void
+ CSharedMemoryFileLockController::ReleaseController()
+ {
+- if (SHMNULL != m_shmFileLocks)
++ if (NULL != m_shmFileLocks)
+ {
+ FILECleanUpLockedRgn(
+ m_shmFileLocks,
+@@ -667,7 +667,7 @@ FILEUnlockFileRegion(
+ {
+ prevLock->next = curLockRgn->next;
+ }
+- SHMfree(shmcurLockRgn);
++ free(shmcurLockRgn);
+ }
+ else
+ {
+@@ -735,7 +735,7 @@ FILEGetSHMFileLocks(
+ TRACE("Create a new entry in the file lock list in SHM\n");
+
+ /* Create a new entry in the file lock list in SHM */
+- if ((shmPtrRet = SHMalloc(sizeof(SHMFILELOCKS))) == 0)
++ if ((shmPtrRet = malloc(sizeof(SHMFILELOCKS))) == 0)
+ {
+ ERROR("Can't allocate SHMFILELOCKS structure\n");
+ palError = ERROR_NOT_ENOUGH_MEMORY;
+@@ -749,7 +749,7 @@ FILEGetSHMFileLocks(
+ goto CLEANUP1;
+ }
+
+- filelocksPtr->unix_filename = SHMStrDup(filename);
++ filelocksPtr->unix_filename = strdup(filename);
+ if (filelocksPtr->unix_filename == 0)
+ {
+ ERROR("Can't allocate shared memory for filename\n");
+@@ -781,9 +781,9 @@ FILEGetSHMFileLocks(
+ goto EXIT;
+
+ CLEANUP2:
+- SHMfree(filelocksPtr->unix_filename);
++ free(filelocksPtr->unix_filename);
+ CLEANUP1:
+- SHMfree(shmPtrRet);
++ free(shmPtrRet);
+ shmPtrRet = 0;
+ EXIT:
+ SHMRelease();
+@@ -808,7 +808,7 @@ FILEAddNewLockedRgn(
+ {
+ PAL_ERROR palError = NO_ERROR;
+ SHMFILELOCKRGNS *newLockRgn, *lockRgnPtr;
+- SHMPTR shmNewLockRgn = SHMNULL;
++ SHMPTR shmNewLockRgn = NULL;
+
+ if ((fileLocks == NULL) || (pvControllerInstance == NULL))
+ {
+@@ -822,7 +822,7 @@ FILEAddNewLockedRgn(
+ TRACE("Create a new entry for the new lock region (%I64u %I64u)\n",
+ lockRgnStart, nbBytesToLock);
+
+- if ((shmNewLockRgn = SHMalloc(sizeof(SHMFILELOCKRGNS))) == SHMNULL)
++ if ((shmNewLockRgn = malloc(sizeof(SHMFILELOCKRGNS))) == NULL)
+ {
+ ERROR("Can't allocate SHMFILELOCKRGNS structure\n");
+ palError = ERROR_NOT_ENOUGH_MEMORY;
+@@ -897,9 +897,9 @@ FILEAddNewLockedRgn(
+
+ EXIT:
+
+- if (NO_ERROR != palError && SHMNULL != shmNewLockRgn)
++ if (NO_ERROR != palError && NULL != shmNewLockRgn)
+ {
+- SHMfree(shmNewLockRgn);
++ free(shmNewLockRgn);
+ }
+
+ SHMRelease();
+@@ -950,7 +950,7 @@ FILECleanUpLockedRgn(
+ {
+ /* removing the first lock */
+ fileLocks->fileLockedRgns = curLockRgn->next;
+- SHMfree(shmcurLockRgn);
++ free(shmcurLockRgn);
+ shmcurLockRgn = fileLocks->fileLockedRgns;
+ if (SHMPTR_TO_TYPED_PTR_BOOL(SHMFILELOCKRGNS, curLockRgn, shmcurLockRgn) == FALSE)
+ {
+@@ -961,7 +961,7 @@ FILECleanUpLockedRgn(
+ else
+ {
+ prevLock->next = curLockRgn->next;
+- SHMfree(shmcurLockRgn);
++ free(shmcurLockRgn);
+ shmcurLockRgn = prevLock->next;
+ if (SHMPTR_TO_TYPED_PTR_BOOL(SHMFILELOCKRGNS, curLockRgn, shmcurLockRgn) == FALSE)
+ {
+@@ -1020,9 +1020,9 @@ FILECleanUpLockedRgn(
+ }
+
+ if (fileLocks->unix_filename)
+- SHMfree(fileLocks->unix_filename);
++ free(fileLocks->unix_filename);
+
+- SHMfree(shmFileLocks);
++ free(shmFileLocks);
+ }
+ }
+ EXIT:
+diff --git a/src/pal/src/include/pal/shm.hpp b/src/pal/src/include/pal/shm.hpp
+index de1d09e..36d6fd4 100644
+--- a/src/pal/src/include/pal/shm.hpp
++++ b/src/pal/src/include/pal/shm.hpp
+@@ -28,47 +28,7 @@ Abstract:
+ // isn't considered a pointer type...
+ //
+
+-#define SHMNULL 0
+-
+-#ifndef _DEBUG
+-
+-inline
+-void *
+-ShmPtrToPtrFast(SHMPTR shmptr)
+-{
+- void *pv = NULL;
+-
+- if (SHMNULL != shmptr)
+- {
+- int segment = shmptr >> 24;
+-
+- if (segment < shm_numsegments)
+- {
+- pv = reinterpret_cast<void*>(
+- reinterpret_cast<DWORD_PTR>(shm_segment_bases[(uint)segment].Load())
+- + (shmptr & 0x00FFFFFF)
+- );
+- }
+- else
+- {
+- pv = SHMPtrToPtr(shmptr);
+- }
+- }
+-
+- return pv;
+-}
+-
+-//
+-// We could use a function template here to avoid the cast / macro
+-//
+-
+-#define SHMPTR_TO_TYPED_PTR(type, shmptr) reinterpret_cast<type*>(ShmPtrToPtrFast((shmptr)))
+-
+-#else
+-
+-#define SHMPTR_TO_TYPED_PTR(type, shmptr) reinterpret_cast<type*>(SHMPtrToPtr((shmptr)))
+-
+-#endif
++#define SHMPTR_TO_TYPED_PTR(type, shmptr) reinterpret_cast<type*>(shmptr)
+
+ /* Set ptr to NULL if shmPtr == 0, else set ptr to SHMPTR_TO_TYPED_PTR(type, shmptr)
+ return FALSE if SHMPTR_TO_TYPED_PTR returns NULL ptr from non null shmptr,
+@@ -76,8 +36,5 @@ ShmPtrToPtrFast(SHMPTR shmptr)
+ #define SHMPTR_TO_TYPED_PTR_BOOL(type, ptr, shmptr) \
+ ((shmptr != 0) ? ((ptr = SHMPTR_TO_TYPED_PTR(type, shmptr)) != NULL) : ((ptr = NULL) == NULL))
+
+-
+-
+-
+ #endif // _SHM_HPP_
+
+diff --git a/src/pal/src/include/pal/shmemory.h b/src/pal/src/include/pal/shmemory.h
+index 5ca8481..bf12de1 100644
+--- a/src/pal/src/include/pal/shmemory.h
++++ b/src/pal/src/include/pal/shmemory.h
+@@ -15,56 +15,7 @@ Abstract:
+
+ How to use :
+
+-The SHMalloc function can be used to allocate memory in the shared memory area.
+-It returns a value of type SHMPTR, which will be useable in all participating
+-processes. The SHMPTR_TO_PTR macro can be used to convert a SHMPTR value into
+-an address valid *only* within the current process. Do NOT store pointers in
+-shared memory, since those will not be valid for other processes. If you need
+-to construct linked lists or other strctures that usually use pointers, use
+-SHMPTR values instead of pointers. In addition, Lock/Release functions must be
+-used when manipulating data in shared memory, to ensure inter-process synchronization.
+-
+-Example :
+-
+-//a simple linked list type
+-typedef struct
+-{
+-int count;
+-SHMPTR string;
+-SHMPTR next;
+-}SHMLIST;
+-
+-// Allocate a new list item
+-SHMPTR new_item = SHMalloc(sizeof(SHMLIST));
+-
+-// get a pointer to it
+-SHMLIST *item_ptr = (SHMLIST *)SHMPTR_TO_PTR(new_item);
+-
+-// Allocate memory for the "string" member, initialize it
+-item_ptr->string = SHMalloc(strlen("string"));
+-LPSTR str_ptr = (LPSTR)SHMPTR_TO_PTR(item_ptr->string);
+-strcpy(str_ptr, "string");
+-
+-//Take the shared memory lock to prevent anyone from modifying the linked list
+-SHMLock();
+-
+-//get the list's head from somewhere
+-SHMPTR list_head = get_list_head();
+-
+-//link the list to our new item
+-item_ptr->next = list_head
+-
+-//get a pointer to the list head's structure
+-SHMLIST *head_ptr = (SHMLIST *)SHMPTR_TO_PTR(list_head);
+-
+-//set the new item's count value based on the head's count value
+-item_ptr->count = head_ptr->count + 1;
+-
+-//save the new item as the new head of the list
+-set_list_head(new_item);
+-
+-//We're done modifying the list, release the lock
+-SHMRelease
++Lock/Release functions must be used when manipulating data in shared memory, to ensure inter-process synchronization.
+
+
+
+@@ -79,86 +30,17 @@ extern "C"
+ #endif // __cplusplus
+
+ /*
+-Type for shared memory blocks. use SHMPTR_TO_PTR to get a useable address.
++Type for shared memory blocks
+ */
+-typedef DWORD_PTR SHMPTR;
+-
+-#define MAX_SEGMENTS 256
+-
++typedef LPVOID SHMPTR;
+
+ typedef enum {
+- SIID_PROCESS_INFO,/* pointers to PROCESS structures? */
+ SIID_NAMED_OBJECTS,
+ SIID_FILE_LOCKS,
+
+ SIID_LAST
+ } SHM_INFO_ID;
+
+-typedef enum
+-{
+- SHM_NAMED_MAPPINGS, /* structs with map name, file name & flags? */
+- SHM_NAMED_EVENTS, /* structs with event names & ThreadWaitingList struct? */
+- SHM_NAMED_MUTEXS, /* structs with mutext names, and ThreadWaitingList struct */
+-
+- SHM_NAMED_LAST
+-} SHM_NAMED_OBJECTS_ID;
+-
+-typedef struct _SMNO
+-{
+- SHM_NAMED_OBJECTS_ID ObjectType;
+- SHMPTR ShmNext;
+- SHMPTR ShmObjectName;
+- SHMPTR ShmSelf;
+-
+-}SHM_NAMED_OBJECTS, * PSHM_NAMED_OBJECTS;
+-
+-
+-/*
+-SHMPTR_TO_PTR
+-
+-Macro to convert a SHMPTR value into a valid (for this process) pointer.
+-
+-In debug builds, we always call the function to do full checks.
+-In release builds, check if the segment is known, and if it is, do only minimal
+-validation (if segment is unknown, we have to call the function)
+- */
+-#if _DEBUG
+-
+-#define SHMPTR_TO_PTR(shmptr) \
+- SHMPtrToPtr(shmptr)
+-
+-#else /* !_DEBUG */
+-
+-extern int shm_numsegments;
+-
+-/* array containing the base address of each segment */
+-extern Volatile<LPVOID> shm_segment_bases[MAX_SEGMENTS];
+-
+-#define SHMPTR_TO_PTR(shmptr)\
+- ((shmptr)?(((static_cast<int>(shmptr)>>24)<shm_numsegments)?\
+- reinterpret_cast<LPVOID>(reinterpret_cast<size_t>(shm_segment_bases[static_cast<int>(shmptr)>>24].Load())+(static_cast<int>(shmptr)&0x00FFFFFF)):\
+- SHMPtrToPtr(shmptr)): static_cast<LPVOID>(NULL))
+-
+-
+-#endif /* _DEBUG */
+-
+-/* Set ptr to NULL if shmPtr == 0, else set ptr to SHMPTR_TO_PTR(shmptr)
+- return FALSE if SHMPTR_TO_PTR returns NULL ptr from non null shmptr,
+- TRUE otherwise */
+-#define SHMPTR_TO_PTR_BOOL(ptr, shmptr) \
+- ((shmptr != 0) ? ((ptr = SHMPTR_TO_PTR(shmptr)) != NULL) : ((ptr = NULL) == NULL))
+-
+-/*++
+-SHMPtrToPtr
+-
+-Convert a SHMPTR value into a useable pointer.
+-
+-Unlike the macro defined above, this function performs as much validation as
+-possible, and can handle cases when the SHMPTR is located in an aread of shared
+-memory the process doesn't yet know about.
+---*/
+-LPVOID SHMPtrToPtr(SHMPTR shmptr);
+-
+ /*++
+ SHMInitialize
+
+@@ -176,37 +58,6 @@ registered processes, and remove all shared memory files if no process remains
+ void SHMCleanup(void);
+
+ /*++
+-SHMalloc
+-
+-Allocate a block of memory of the specified size
+-
+-Parameters :
+- size_t size : size of block required
+-
+-Return value :
+- A SHMPTR identifying the new block, or 0 on failure. Use SHMPtrToPtr to
+- convert a SHMPTR into a useable pointer (but remember to lock the shared
+- memory first!)
+-
+-Notes :
+- SHMalloc will fail if the requested size is larger than a certain maximum.
+- At the moment, the maximum is 520 bytes (MAX_PATH_FNAME*2).
+---*/
+-SHMPTR SHMalloc(size_t size);
+-
+-/*++
+-SHMfree
+-
+-Release a block of shared memory and put it back in the shared memory pool
+-
+-Parameters :
+- SHMPTR shmptr : identifier of block to release
+-
+-(no return value)
+---*/
+-void SHMfree(SHMPTR shmptr);
+-
+-/*++
+ SHMLock
+
+ Restrict shared memory access to the current thread of the current process
+@@ -266,63 +117,6 @@ Notes :
+ --*/
+ BOOL SHMSetInfo(SHM_INFO_ID element, SHMPTR value);
+
+-
+-/********************** Shared memory help functions ********************/
+-
+-/*++
+-SHMStrDup
+-
+-Duplicates the string in shared memory.
+-
+-Returns the new address as SHMPTR on success.
+-Returns (SHMPTR)NULL on failure.
+---*/
+-SHMPTR SHMStrDup( LPCSTR string );
+-
+-/*++
+-SHMWStrDup
+-
+-Duplicates the wide string in shared memory.
+-
+-Returns the new address as SHMPTR on success.
+-Returns (SHMPTR)NULL on failure.
+---*/
+-SHMPTR SHMWStrDup( LPCWSTR string );
+-
+-
+-/*++
+-SHMFindNamedObjectByName
+-
+-Searches for an object whose name matches the name and ID passed in.
+-
+-Returns a SHMPTR to its location in shared memory. If no object
+-matches the name, the function returns NULL and sets pbNameExists to FALSE.
+-If an object matches the name but is of a different type, the function
+-returns NULL and sets pbNameExists to TRUE.
+-
+---*/
+-SHMPTR SHMFindNamedObjectByName( LPCWSTR lpName, SHM_NAMED_OBJECTS_ID oid,
+- BOOL *pbNameExists );
+-
+-/*++
+-SHMRemoveNamedObject
+-
+-Removes the specified named object from the list
+-
+-No return.
+-
+-note : the caller is reponsible for releasing all associated memory
+---*/
+-void SHMRemoveNamedObject( SHMPTR shmNamedObject );
+-
+-/*++ SHMAddNamedObject
+-
+-Adds the specified named object to the list.
+-
+-No return.
+---*/
+-void SHMAddNamedObject( SHMPTR shmNewNamedObject );
+-
+ #ifdef __cplusplus
+ }
+ #endif // __cplusplus
+diff --git a/src/pal/src/include/pal/synchcache.hpp b/src/pal/src/include/pal/synchcache.hpp
+index c172842..0abc7dd 100644
+--- a/src/pal/src/include/pal/synchcache.hpp
++++ b/src/pal/src/include/pal/synchcache.hpp
+@@ -252,7 +252,7 @@ namespace CorUnix
+
+ SharedID Get(CPalThread * pthrCurrent)
+ {
+- SharedID shridObj = NULLSharedID;
++ SharedID shridObj = NULL;
+
+ Get(pthrCurrent, 1, &shridObj);
+ return shridObj;
+@@ -291,8 +291,8 @@ namespace CorUnix
+ {
+ for (k=0; k<m_iMaxDepth/PreAllocFactor-n+i; k++)
+ {
+- shridObj = RawSharedObjectAlloc(sizeof(USHRSynchCacheStackNode), DefaultSharedPool);
+- if (NULLSharedID == shridObj)
++ shridObj = malloc(sizeof(USHRSynchCacheStackNode));
++ if (NULL == shridObj)
+ {
+ Flush(pthrCurrent, true);
+ break;
+@@ -312,8 +312,8 @@ namespace CorUnix
+
+ for (j=i;j<n;j++)
+ {
+- shridObj = RawSharedObjectAlloc(sizeof(USHRSynchCacheStackNode), DefaultSharedPool);
+- if (NULLSharedID == shridObj)
++ shridObj = malloc(sizeof(USHRSynchCacheStackNode));
++ if (NULL == shridObj)
+ break;
+ #ifdef _DEBUG
+ pvObjRaw = SharedIDToPointer(shridObj);
+@@ -333,7 +333,7 @@ namespace CorUnix
+
+ void Add(CPalThread * pthrCurrent, SharedID shridObj)
+ {
+- if (NULLSharedID == shridObj)
++ if (NULL == shridObj)
+ {
+ return;
+ }
+@@ -360,7 +360,7 @@ namespace CorUnix
+ }
+ else
+ {
+- RawSharedObjectFree(shridObj);
++ free(shridObj);
+ }
+ Unlock(pthrCurrent);
+ }
+@@ -387,7 +387,7 @@ namespace CorUnix
+ pTemp = pNode;
+ pNode = pNode->pointers.pNext;
+ shridTemp = pTemp->pointers.shrid;
+- RawSharedObjectFree(shridTemp);
++ free(shridTemp);
+ }
+ }
+ };
+diff --git a/src/pal/src/include/pal/synchobjects.hpp b/src/pal/src/include/pal/synchobjects.hpp
+index aa3a8f1..62f4017 100644
+--- a/src/pal/src/include/pal/synchobjects.hpp
++++ b/src/pal/src/include/pal/synchobjects.hpp
+@@ -29,13 +29,8 @@ Abstract:
+ #include <pthread.h>
+
+ #define SharedID SHMPTR
+-#define SharedPoolId ULONG_PTR
+-#define DefaultSharedPool ((ULONG_PTR)0)
+-#define NULLSharedID ((SHMPTR)NULL)
+ #define SharedIDToPointer(shID) SHMPTR_TO_TYPED_PTR(PVOID, shID)
+ #define SharedIDToTypePointer(TYPE,shID) SHMPTR_TO_TYPED_PTR(TYPE, shID)
+-#define RawSharedObjectAlloc(szSize, shPoolId) SHMalloc(szSize)
+-#define RawSharedObjectFree(shID) SHMfree(shID)
+
+ namespace CorUnix
+ {
+diff --git a/src/pal/src/objmgr/shmobject.cpp b/src/pal/src/objmgr/shmobject.cpp
+index 1435d5d..2692554 100644
+--- a/src/pal/src/objmgr/shmobject.cpp
++++ b/src/pal/src/objmgr/shmobject.cpp
+@@ -180,7 +180,7 @@ CSharedMemoryObject::InitializeFromExistingSharedData(
+
+ m_ObjectDomain = SharedObject;
+
+- _ASSERTE(SHMNULL != m_shmod);
++ _ASSERTE(NULL != m_shmod);
+
+ psmod = SHMPTR_TO_TYPED_PTR(SHMObjData, m_shmod);
+ if (NULL == psmod)
+@@ -236,7 +236,7 @@ CSharedMemoryObject::InitializeFromExistingSharedData(
+ goto InitializeFromExistingSharedDataExit;
+ }
+
+- if (SHMNULL != psmod->shmObjImmutableData)
++ if (NULL != psmod->shmObjImmutableData)
+ {
+ VOID *pv = SHMPTR_TO_TYPED_PTR(VOID, psmod->shmObjImmutableData);
+ if (NULL != pv)
+@@ -251,7 +251,7 @@ CSharedMemoryObject::InitializeFromExistingSharedData(
+ }
+ }
+
+- if (SHMNULL != psmod->shmObjSharedData)
++ if (NULL != psmod->shmObjSharedData)
+ {
+ m_pvSharedData = SHMPTR_TO_TYPED_PTR(VOID, psmod->shmObjSharedData);
+ if (NULL == m_pvSharedData)
+@@ -301,7 +301,7 @@ CSharedMemoryObject::AllocateSharedDataItems(
+ )
+ {
+ PAL_ERROR palError = NO_ERROR;
+- SHMPTR shmod = SHMNULL;
++ SHMPTR shmod = NULL;
+ SHMObjData *psmod = NULL;
+
+ _ASSERTE(NULL != pshmObjData);
+@@ -321,8 +321,8 @@ CSharedMemoryObject::AllocateSharedDataItems(
+
+ SHMLock();
+
+- shmod = SHMalloc(sizeof(SHMObjData));
+- if (SHMNULL == shmod)
++ shmod = malloc(sizeof(SHMObjData));
++ if (NULL == shmod)
+ {
+ ERROR("Unable to allocate m_shmod for new object\n");
+ palError = ERROR_OUTOFMEMORY;
+@@ -339,9 +339,19 @@ CSharedMemoryObject::AllocateSharedDataItems(
+
+ if (0 != m_oa.sObjectName.GetStringLength())
+ {
++ LPCWSTR str = m_oa.sObjectName.GetString();
++ _ASSERTE(str);
++
+ psmod->dwNameLength = m_oa.sObjectName.GetStringLength();
+- psmod->shmObjName = SHMWStrDup(m_oa.sObjectName.GetString());
+- if (SHMNULL == psmod->shmObjName)
++
++ UINT length = (PAL_wcslen(str) + 1) * sizeof(WCHAR);
++ psmod->shmObjName = malloc(length);
++
++ if (psmod->shmObjName != 0)
++ {
++ memcpy(psmod->shmObjName, str, length);
++ }
++ else
+ {
+ ERROR("Unable to allocate psmod->shmObjName for new object\n");
+ palError = ERROR_OUTOFMEMORY;
+@@ -356,8 +366,8 @@ CSharedMemoryObject::AllocateSharedDataItems(
+ // by CSharedMemoryObjectManager::RegisterObject or PromoteSharedData
+ //
+
+- psmod->shmObjImmutableData = SHMalloc(m_pot->GetImmutableDataSize());
+- if (SHMNULL == psmod->shmObjImmutableData)
++ psmod->shmObjImmutableData = malloc(m_pot->GetImmutableDataSize());
++ if (NULL == psmod->shmObjImmutableData)
+ {
+ ERROR("Unable to allocate psmod->shmObjImmutableData for new object\n");
+ palError = ERROR_OUTOFMEMORY;
+@@ -367,8 +377,8 @@ CSharedMemoryObject::AllocateSharedDataItems(
+
+ if (0 != m_pot->GetSharedDataSize())
+ {
+- psmod->shmObjSharedData = SHMalloc(m_pot->GetSharedDataSize());
+- if (SHMNULL == psmod->shmObjSharedData)
++ psmod->shmObjSharedData = malloc(m_pot->GetSharedDataSize());
++ if (NULL == psmod->shmObjSharedData)
+ {
+ ERROR("Unable to allocate psmod->shmObjSharedData for new object\n");
+ palError = ERROR_OUTOFMEMORY;
+@@ -381,7 +391,7 @@ CSharedMemoryObject::AllocateSharedDataItems(
+
+ AllocateSharedDataItemsExit:
+
+- if (NO_ERROR != palError && SHMNULL != shmod)
++ if (NO_ERROR != palError && NULL != shmod)
+ {
+ FreeSharedDataAreas(shmod);
+ }
+@@ -412,7 +422,7 @@ CSharedMemoryObject::FreeSharedDataAreas(
+ {
+ SHMObjData *psmod;
+
+- _ASSERTE(SHMNULL != shmObjData);
++ _ASSERTE(NULL != shmObjData);
+
+ ENTRY("CSharedMemoryObject::FreeSharedDataAreas"
+ "(shmObjData = %p)\n",
+@@ -424,22 +434,22 @@ CSharedMemoryObject::FreeSharedDataAreas(
+ psmod = SHMPTR_TO_TYPED_PTR(SHMObjData, shmObjData);
+ _ASSERTE(NULL != psmod);
+
+- if (SHMNULL != psmod->shmObjImmutableData)
++ if (NULL != psmod->shmObjImmutableData)
+ {
+- SHMfree(psmod->shmObjImmutableData);
++ free(psmod->shmObjImmutableData);
+ }
+
+- if (SHMNULL != psmod->shmObjSharedData)
++ if (NULL != psmod->shmObjSharedData)
+ {
+- SHMfree(psmod->shmObjSharedData);
++ free(psmod->shmObjSharedData);
+ }
+
+- if (SHMNULL != psmod->shmObjName)
++ if (NULL != psmod->shmObjName)
+ {
+- SHMfree(psmod->shmObjName);
++ free(psmod->shmObjName);
+ }
+
+- SHMfree(shmObjData);
++ free(shmObjData);
+
+ SHMRelease();
+
+@@ -463,7 +473,7 @@ CSharedMemoryObject::PromoteSharedData(
+ SHMObjData *psmod
+ )
+ {
+- _ASSERTE(SHMNULL != shmObjData);
++ _ASSERTE(NULL != shmObjData);
+ _ASSERTE(NULL != psmod);
+
+ ENTRY("CSharedMemoryObject::PromoteSharedData"
+@@ -760,7 +770,7 @@ CSharedMemoryObject::DereferenceSharedData()
+
+ if (!fSharedDataAlreadDereferenced)
+ {
+- if (SHMNULL != m_shmod)
++ if (NULL != m_shmod)
+ {
+ SHMObjData *psmod;
+
+@@ -789,7 +799,7 @@ CSharedMemoryObject::DereferenceSharedData()
+
+ _ASSERTE(0 != psmod->dwNameLength);
+
+- if (SHMNULL != psmod->shmPrevObj)
++ if (NULL != psmod->shmPrevObj)
+ {
+ SHMObjData *psmodPrevious = SHMPTR_TO_TYPED_PTR(SHMObjData, psmod->shmPrevObj);
+ _ASSERTE(NULL != psmodPrevious);
+@@ -809,7 +819,7 @@ CSharedMemoryObject::DereferenceSharedData()
+ }
+ }
+
+- if (SHMNULL != psmod->shmNextObj)
++ if (NULL != psmod->shmNextObj)
+ {
+ SHMObjData *psmodNext = SHMPTR_TO_TYPED_PTR(SHMObjData, psmod->shmNextObj);
+ _ASSERTE(NULL != psmodNext);
+@@ -820,8 +830,8 @@ CSharedMemoryObject::DereferenceSharedData()
+ #if _DEBUG
+ else
+ {
+- _ASSERTE(SHMNULL == psmod->shmPrevObj);
+- _ASSERTE(SHMNULL == psmod->shmNextObj);
++ _ASSERTE(NULL == psmod->shmPrevObj);
++ _ASSERTE(NULL == psmod->shmNextObj);
+ }
+ #endif
+ }
+@@ -871,7 +881,7 @@ CSharedMemoryObject::~CSharedMemoryObject()
+ {
+ free(m_pvSharedData);
+ }
+- else if (SHMNULL != m_shmod && m_fDeleteSharedData)
++ else if (NULL != m_shmod && m_fDeleteSharedData)
+ {
+ FreeSharedDataAreas(m_shmod);
+ }
+@@ -1195,7 +1205,7 @@ CSharedMemoryWaitableObject::EnsureObjectIsShared(
+ {
+ PAL_ERROR palError = NO_ERROR;
+ IDataLock *pDataLock = NULL;
+- SHMPTR shmObjData = SHMNULL;
++ SHMPTR shmObjData = NULL;
+ SHMObjData *psmod;
+ VOID *pvSharedSynchData;
+
+@@ -1278,7 +1288,7 @@ EnsureObjectIsSharedExitNoSHMLockRelease:
+
+ g_pSynchronizationManager->ReleaseProcessLock(pthr);
+
+- if (NO_ERROR != palError && SHMNULL != shmObjData)
++ if (NO_ERROR != palError && NULL != shmObjData)
+ {
+ //
+ // Since shmObjdData is local to this function there's no
+diff --git a/src/pal/src/objmgr/shmobject.hpp b/src/pal/src/objmgr/shmobject.hpp
+index addfda5..66b9ea9 100644
+--- a/src/pal/src/objmgr/shmobject.hpp
++++ b/src/pal/src/objmgr/shmobject.hpp
+@@ -121,8 +121,7 @@ namespace CorUnix
+ // m_fSharedDataDereferenced will be TRUE if DereferenceSharedData
+ // has already been called. (N.B. -- this is a LONG instead of a bool
+ // because it is passed to InterlockedExchange). If the shared data blob
+- // should be freed in the object's destructor (i.e., SHMfree should be
+- // called on the appropriate SHMPTRs) DereferenceSharedData will
++ // should be freed in the object's destructor DereferenceSharedData will
+ // set m_fDeleteSharedData to TRUE.
+ //
+
+@@ -178,7 +177,7 @@ namespace CorUnix
+ :
+ CPalObjectBase(pot),
+ m_pcsObjListLock(pcsObjListLock),
+- m_shmod(SHMNULL),
++ m_shmod(NULL),
+ m_pvSharedData(NULL),
+ m_ObjectDomain(ProcessLocalObject),
+ m_fSharedDataDereferenced(FALSE),
+diff --git a/src/pal/src/objmgr/shmobjectmanager.cpp b/src/pal/src/objmgr/shmobjectmanager.cpp
+index 4275421..755fa46 100644
+--- a/src/pal/src/objmgr/shmobjectmanager.cpp
++++ b/src/pal/src/objmgr/shmobjectmanager.cpp
+@@ -277,7 +277,7 @@ CSharedMemoryObjectManager::RegisterObject(
+
+ if (0 != poa->sObjectName.GetStringLength())
+ {
+- SHMPTR shmObjectListHead = SHMNULL;
++ SHMPTR shmObjectListHead = NULL;
+
+ //
+ // The object must be shared
+@@ -352,7 +352,7 @@ CSharedMemoryObjectManager::RegisterObject(
+ }
+
+ shmObjectListHead = SHMGetInfo(SIID_NAMED_OBJECTS);
+- if (SHMNULL != shmObjectListHead)
++ if (NULL != shmObjectListHead)
+ {
+ SHMObjData *psmodListHead;
+
+@@ -505,8 +505,8 @@ CSharedMemoryObjectManager::LocateObject(
+ {
+ PAL_ERROR palError = NO_ERROR;
+ IPalObject *pobjExisting = NULL;
+- SHMPTR shmSharedObjectData = SHMNULL;
+- SHMPTR shmObjectListEntry = SHMNULL;
++ SHMPTR shmSharedObjectData = NULL;
++ SHMPTR shmObjectListEntry = NULL;
+ SHMObjData *psmod = NULL;
+ LPWSTR pwsz = NULL;
+
+@@ -598,7 +598,7 @@ CSharedMemoryObjectManager::LocateObject(
+ SHMLock();
+
+ shmObjectListEntry = SHMGetInfo(SIID_NAMED_OBJECTS);
+- while (SHMNULL != shmObjectListEntry)
++ while (NULL != shmObjectListEntry)
+ {
+ psmod = SHMPTR_TO_TYPED_PTR(SHMObjData, shmObjectListEntry);
+ if (NULL != psmod)
+@@ -634,7 +634,7 @@ CSharedMemoryObjectManager::LocateObject(
+ }
+ }
+
+- if (SHMNULL != shmSharedObjectData)
++ if (NULL != shmSharedObjectData)
+ {
+ CSharedMemoryObject *pshmobj = NULL;
+ CObjectAttributes oa(pwsz, NULL);
+@@ -1094,7 +1094,7 @@ CSharedMemoryObjectManager::ImportSharedObjectIntoProcess(
+ _ASSERTE(NULL != pthr);
+ _ASSERTE(NULL != pot);
+ _ASSERTE(NULL != poa);
+- _ASSERTE(SHMNULL != shmSharedObjectData);
++ _ASSERTE(NULL != shmSharedObjectData);
+ _ASSERTE(NULL != psmod);
+ _ASSERTE(NULL != ppshmobj);
+
+diff --git a/src/pal/src/shmemory/shmemory.cpp b/src/pal/src/shmemory/shmemory.cpp
+index 35dadd6..a12bd29 100644
+--- a/src/pal/src/shmemory/shmemory.cpp
++++ b/src/pal/src/shmemory/shmemory.cpp
+@@ -14,165 +14,14 @@ Abstract:
+
+ Implementation of shared memory infrastructure for IPC
+
+-Issues :
+-
+- Interprocess synchronization
+-
+-
+-There doesn't seem to be ANY synchronization mechanism that will work
+-inter-process AND be pthread-safe. FreeBSD's pthread implementation has no
+-support for inter-process synchronization (PTHREAD_PROCESS_SHARED);
+-"traditionnal" inter-process syncronization functions, on the other hand, are
+-not pthread-aware, and thus will block entire processes instead of only the
+-calling thread.
+-
+-From suggestions and information obtained on the freebsd-hackers mailing list,
+-I have come up with 2 possible strategies to ensure serialized access to our
+-shared memory region
+-
+-Note that the estimates of relative efficiency are wild guesses; my assumptions
+-are that blocking entire processes is least efficient, busy wait somewhat
+-better, and anything that does neither is preferable. However, the overhead of
+-complex solutions is likely to have an important impact on performance
+-
+-Option 1 : very simple; possibly less efficient. in 2 words : "busy wait"
+-Basically,
+-
+-while(InterlockedCompareExchange(spinlock_in_shared_memory, 1, 0)
+- sched_yield();
+-
+-In other words, if a value is 0, set it to 1; otherwise, try again until we
+-succeed. use shed_yield to give the system a chance to schedule other threads
+-while we wait. (once a thread succeeds at this, it does its work, then sets
+-the value back to 0)
+-One inconvenient : threads will not unblock in the order they are blocked;
+-once a thread releases the mutex, whichever waiting thread is scheduled next
+-will be unblocked. This is what is called the "thundering herd" problem, and in
+-extreme cases, can lead to starvation
+-Update : we'll set the spinlock to our PID instead of 1, that way we can find
+-out if the lock is held by a dead process.
+-
+-Option 2 : possibly more efficient, much more complex, borders on
+-"over-engineered". I'll explain it in stages, in the same way I deduced it.
+-
+-Option 2.1 : probably less efficient, reasonably simple. stop at step 2)
+-
+-1) The minimal, original idea was to use SysV semaphores for synchronization.
+-This didn't work, because semaphores block the entire process, which can easily
+-lead to deadlocks (thread 1 takes sem, thread 2 tries to take sem, blocks
+-process, thread 1 is blocked and never releases sem)
+-
+-2) (this is option 2.1) Protect the use of the semaphores in critical sections.
+-Enter the critical section before taking the semaphore, leave the section after
+-releasing the semaphore. This ensures that 2 threads of the same process will
+-never try to acquire the semaphore at the same time, which avoids deadlocks.
+-However, the entire process still blocks if another process has the semaphore.
+-Here, unblocking order should match blocking order (assuming the semaphores work
+-properly); therefore, no risk of starvation.
+-
+-3) This is where it gets complicated. To avoid blocking whole processes, we
+-can't use semaphores. One suggestion I got was to use multi-ended FIFOs, here's
+-how it would work.
+-
+--as in option 1, use InterlockedCompareExchange on a value in shared memory.
+--if this was not succesful (someone else has locked the shared memory), then :
+- -open a special FIFO for reading; try to read 1 byte. This will block until
+- someone writes to it, and *should* only block the current thread. (note :
+- more than one thread/process can open the same FIFO and block on read(),
+- in this case, only one gets woken up when someone writes to it.
+- *which* one is, again, not predictable; this may lead to starvation)
+- -once we are unblocked, we have the lock.
+--once we have the lock (either from Interlocked...() or from read()),
+- we can do our work
+--once the work is done, we open the FIFO for writing. this will fail if no one
+- is listening.
+--if no one is listening, release the lock by setting the shared memory value
+- back to 0
+--if someone is listening, write 1 byte to the FIFO to wake someone, then close
+- the FIFO. the value in shared memory will remain nonzero until a thread tries
+- to wake the next one and sees no one is listening.
+-
+-problem with this option : it is possible for a thread to call Interlocked...()
+-BETWEEN the failed "open for write" attempt and the subsequent restoration of
+-the SHM value back to zero. In this case, that thread will go to sleep and will
+-not wake up until *another* thread asks for the lock, takes it and releases it.
+-
+-so to fix that, we come to step
+-
+-4) Instead of using InterlockedCompareExchange, use a SysV semaphore :
+--when taking the lock :
+- -take the semaphore
+- -try to take the lock (check if value is zero, change it to 1 if it is)
+- -if we fail : open FIFO for reading, release the semaphore, read() and block
+- -if we succeed : release the semaphore
+--when releasing the lock :
+- -take the semaphore
+- -open FIFO for write
+- -if we succeed, release semaphore, then write value
+- -if we fail, reset SHM value to 0, then release semaphore.
+-
+-Yes, using a SysV semaphore will block the whole process, but for a very short
+-time (unlike option 2.1)
+-problem with this : again, we get deadlocks if 2 threads from a single process
+-try to take the semaphore. So like in option 2.1, we ave to wrap the semaphore
+-usage in a critical section. (complex enough yet?)
+-
+-so the locking sequence becomes EnterCriticalSection - take semaphore - try to
+- lock - open FIFO - release semaphore - LeaveCriticalSection - read
+-and the unlocking sequence becomes EnterCS - take sem - open FIFO - release
+- sem - LeaveCS - write
+-
+-Once again, the unblocking order probably won't match the blocking order.
+-This could be fixed by using multiple FIFOs : waiting thread open their own
+-personal FIFO, write the ID of their FIFO to another FIFO. The thread that wants
+-to release the lock reads ID from that FIFO, determines which FIFO to open for
+-writing and writes a byte to it. This way, whoever wrote its ID to the FIFO
+-first will be first to awake. How's that for complexity?
+-
+-So to summarize, the options are
+-1 - busy wait
+-2.1 - semaphores + critical sections (whole process blocks)
+-2 - semaphores + critical sections + FIFOs (minimal process blocking)
+-2.2 - option 2 with multiple FIFOs (minimal process blocking, order preserved)
+-
+-Considering the overhead involved in options 2 & 2.2, it is our guess that
+-option 1 may in fact be more efficient, and this is how we'll implement it for
+-the moment. Note that other platforms may not present the same difficulties
+-(i.e. other pthread implementations may support inter-process mutexes), and may
+-be able to use a simpler, more efficient approach.
+-
+-B] Reliability.
+-It is important for the shared memory implementation to be as foolproof as
+-possible. Since more than one process will be able to modify the shared data,
+-it becomes possible for one unstable process to destabilize the others. The
+-simplest example is a process that dies while modifying shared memory : if
+-it doesn't release its lock, we're in trouble. (this case will be taken care
+-of by using PIDs in the spinlock; this we we can check if the locking process
+-is still alive).
+-
+
+
+ --*/
+
+-#include "config.h"
+-#include "pal/palinternal.h"
+ #include "pal/dbgmsg.h"
+ #include "pal/shmemory.h"
+ #include "pal/critsect.h"
+-#include "pal/shmemory.h"
+-#include "pal/init.h"
+ #include "pal/process.h"
+-#include "pal/misc.h"
+-
+-#include <sys/types.h>
+-#include <sys/stat.h>
+-#include <sys/mman.h>
+-#include <unistd.h>
+-#include <signal.h>
+-#include <errno.h>
+-#include <string.h>
+-#include <sched.h>
+-#include <pthread.h>
+
+ #if HAVE_YIELD_SYSCALL
+ #include <sys/syscall.h>
+@@ -180,155 +29,25 @@ is still alive).
+
+ SET_DEFAULT_DEBUG_CHANNEL(SHMEM);
+
+-/* Macro-definitions **********************************************************/
+-
+-/* rounds 'val' up to be divisible by 'r'. 'r' must be a power of two. */
+-#ifndef roundup
+-#define roundup(val, r) ( ((val)+(r)-1) & ~( (r)-1 ) )
+-#endif
+-
+-#define SEGMENT_NAME_SUFFIX_LENGTH 10
+-
+-/*
+-SHMPTR structure :
+-High byte is SHM segment number
+-Low bytes are offset in the segment
+- */
+-#define SHMPTR_SEGMENT(shmptr) \
+- (((shmptr)>>24)&0xFF)
+-
+-#define SHMPTR_OFFSET(shmptr) \
+- ((shmptr)&0x00FFFFFF)
+-
+-#define MAKE_SHMPTR(segment,offset) \
+- ((SHMPTR)((((segment)&0xFF)<<24)|((offset)&0x00FFFFFF)))
+-
+-/*#define MAX_SEGMENTS 256*//*definition is now in shmemory.h*/
+-
+-/* Use MAP_NOSYNC to improve performance if it's available */
+-#if defined(MAP_NOSYNC)
+-#define MAPFLAGS MAP_NOSYNC|MAP_SHARED
+-#else
+-#define MAPFLAGS MAP_SHARED
+-#endif
+-
+-
+ /* Type definitions ***********************************************************/
+
+-enum SHM_POOL_SIZES
+-{
+- SPS_16 = 0, /* 16 bytes */
+- SPS_32, /* 32 bytes */
+- SPS_64, /* 64 bytes */
+- SPS_MAXPATHx2, /* 520 bytes, for long Unicode paths */
+-
+- SPS_LAST
+-};
+-/* Block size associated to each SPS identifier */
+-static const int block_sizes[SPS_LAST] = {16,32,64,roundup((MAX_LONGPATH+1)*2, sizeof(INT64))};
+-
+-/*
+-SHM_POOL_INFO
+-Description of a shared memory pool for a specific block size.
+-
+-Note on pool structure :
+-first_free identifies the first available SHMPTR in the block. Free blocks are
+-arranged in a linked list, each free block indicating the location of the next
+-one. To walk the list, do something like this :
+-SHMPTR *shmptr_ptr=(SHMPTR *)SHMPTR_TO_PTR(pool->first_free)
+-while(shm_ptr)
+-{
+- SHMPTR next = *shmptr_ptr;
+- shmptr_ptr = (SHMPTR *)SHMPTR_TO_PTR(next)
+-}
+- */
+-typedef struct
+-{
+- int item_size; /* size of 1 block, in bytes */
+- int num_items; /* total number of blocks in the pool */
+- int free_items; /* number of unused items in the pool */
+- SHMPTR first_free; /* location of first available block in the pool */
+-}SHM_POOL_INFO;
+-
+ /*
+-SHM_SEGMENT_HEADER
+-Description of a single shared memory segment
+-
+-Notes on segment names :
+-next_semgent contains the string generated by mkstemp() when a new segment is
+-generated. This allows processes to map segment files created by other
+-processes. To get the file name of a segment file, concatenate
+-"segment_name_prefix" and "next_segment".
+-
+-Notes on pool segments :
+-Each segment is divided into one pool for each defined block size (SPS_*).
+-These pools are linked with pools in other segment to form one large pool for
+-each block size, so that SHMAlloc() doesn't have to search each segment to find
+-an available block.
+-the first_ and last_pool_blocks indicate the first and last block in a single
+-segment for each block size. This allows SHMFree() to determine the size of a
+-block by comparing its value with these boundaries. (note that within each
+-segment, each pool is composed of a single contiguous block of memory)
+-*/
+-typedef struct
+-{
+- Volatile<SHMPTR> first_pool_blocks[SPS_LAST];
+- Volatile<SHMPTR> last_pool_blocks[SPS_LAST];
+-} SHM_SEGMENT_HEADER;
+-
+-/*
+-SHM_FIRST_HEADER
++SHM_HEADER
+ Global information about the shared memory system
+-In addition to the standard SHM_SEGGMENT_HEADER, the first segment contains some
+-information required to properly use the shared memory system.
+
+ The spinlock is used to ensure that only one process accesses shared memory at
+ the same time. A process can only take the spinlock if its contents is 0, and
+ it takes the spinlock by placing its PID in it. (this allows a process to catch
+ the special case where it tries to take a spinlock it already owns.
+-
+-The first_* members will contain the location of the first element in the
+-various linked lists of shared information
+- */
+-
+-#ifdef TRACK_SHMLOCK_OWNERSHIP
+-
+-#define SHMLOCK_OWNERSHIP_HISTORY_ARRAY_SIZE 5
+-
+-#define CHECK_CANARIES(header) \
+- _ASSERTE(HeadSignature == header->dwHeadCanaries[0]); \
+- _ASSERTE(HeadSignature == header->dwHeadCanaries[1]); \
+- _ASSERTE(TailSignature == header->dwTailCanaries[0]); \
+- _ASSERTE(TailSignature == header->dwTailCanaries[1])
+-
+-typedef struct _pid_and_tid
+-{
+- Volatile<pid_t> pid;
+- Volatile<pthread_t> tid;
+-} pid_and_tid;
+-
+-const DWORD HeadSignature = 0x48454144;
+-const DWORD TailSignature = 0x5441494C;
+-
+-#endif // TRACK_SHMLOCK_OWNERSHIP
++*/
+
+ typedef struct
+ {
+- SHM_SEGMENT_HEADER header;
+-#ifdef TRACK_SHMLOCK_OWNERSHIP
+- Volatile<DWORD> dwHeadCanaries[2];
+-#endif // TRACK_SHMLOCK_OWNERSHIP
+ Volatile<pid_t> spinlock;
+-#ifdef TRACK_SHMLOCK_OWNERSHIP
+- Volatile<DWORD> dwTailCanaries[2];
+- pid_and_tid pidtidCurrentOwner;
+- pid_and_tid pidtidOwners[SHMLOCK_OWNERSHIP_HISTORY_ARRAY_SIZE];
+- Volatile<ULONG> ulOwnersIdx;
+-#endif // TRACK_SHMLOCK_OWNERSHIP
+- SHM_POOL_INFO pools[SPS_LAST]; /* information about each memory pool */
+ Volatile<SHMPTR> shm_info[SIID_LAST]; /* basic blocks of shared information.*/
+-} SHM_FIRST_HEADER;
++} SHM_HEADER;
+
++static SHM_HEADER shm_header;
+
+ /* Static variables ***********************************************************/
+
+@@ -343,12 +62,6 @@ memory. Rationale :
+ */
+ static CRITICAL_SECTION shm_critsec;
+
+-/* number of segments the current process knows about */
+-int shm_numsegments;
+-
+-/* array containing the base address of each segment */
+-Volatile<LPVOID> shm_segment_bases[MAX_SEGMENTS];
+-
+ /* number of locks the process currently holds (SHMLock calls without matching
+ SHMRelease). Because we take the critical section while inside a
+ SHMLock/SHMRelease pair, this is actually the number of locks held by a single
+@@ -359,24 +72,6 @@ static Volatile<LONG> lock_count;
+ SHMGet/SetInfo will verify that the calling thread holds the lock */
+ static Volatile<HANDLE> locking_thread;
+
+-/* Constants ******************************************************************/
+-
+-/* size of a single segment : 256KB */
+-static const int segment_size = 0x40000;
+-
+-/* Static function prototypes *************************************************/
+-
+-static SHMPTR SHMInitPool(SHMPTR first, int block_size, int pool_size,
+- SHM_POOL_INFO *pool);
+-static SHMPTR SHMLinkPool(SHMPTR first, int block_size, int num_blocks);
+-static BOOL SHMMapUnknownSegments(void);
+-static BOOL SHMAddSegment(void);
+-
+-
+-#define init_waste()
+-#define log_waste(x,y)
+-#define save_waste()
+-
+ /* Public function implementations ********************************************/
+
+ /*++
+@@ -390,94 +85,18 @@ BOOL SHMInitialize(void)
+ {
+ InternalInitializeCriticalSection(&shm_critsec);
+
+- init_waste();
+-
+- int size;
+- SHM_FIRST_HEADER *header;
+- SHMPTR pool_start;
+- SHMPTR pool_end;
+- enum SHM_POOL_SIZES sps;
+-
+ TRACE("Now initializing global shared memory system\n");
+-
+- // Not really shared in CoreCLR; we don't try to talk to other CoreCLRs.
+- shm_segment_bases[0] = mmap(NULL, segment_size,PROT_READ|PROT_WRITE,
+- MAP_ANON|MAP_PRIVATE, -1, 0);
+- if(shm_segment_bases[0] == MAP_FAILED)
+- {
+- ERROR("mmap() failed; error is %d (%s)\n", errno, strerror(errno));
+- return FALSE;
+- }
+- TRACE("Mapped first SHM segment at %p\n",shm_segment_bases[0].Load());
+-
+- /* Initialize first segment's header */
+- header = (SHM_FIRST_HEADER *)shm_segment_bases[0].Load();
+
+- InterlockedExchange((LONG *)&header->spinlock, 0);
+-
+-#ifdef TRACK_SHMLOCK_OWNERSHIP
+- header->dwHeadCanaries[0] = HeadSignature;
+- header->dwHeadCanaries[1] = HeadSignature;
+- header->dwTailCanaries[0] = TailSignature;
+- header->dwTailCanaries[1] = TailSignature;
+-
+- // Check spinlock size
+- _ASSERTE(sizeof(DWORD) == sizeof(header->spinlock));
+- // Check spinlock alignment
+- _ASSERTE(0 == ((DWORD_PTR)&header->spinlock % (DWORD_PTR)sizeof(void *)));
+-#endif // TRACK_SHMLOCK_OWNERSHIP
+-
+-#ifdef TRACK_SHMLOCK_OWNERSHIP
+- header->pidtidCurrentOwner.pid = 0;
+- header->pidtidCurrentOwner.tid = 0;
+- memset((void *)header->pidtidOwners, 0, sizeof(header->pidtidOwners));
+- header->ulOwnersIdx = 0;
+-#endif // TRACK_SHMLOCK_OWNERSHIP
++ InterlockedExchange((LONG *)&shm_header.spinlock, 0);
+
+ /* SHM information array starts with NULLs */
+- memset((void *)header->shm_info, 0, SIID_LAST*sizeof(SHMPTR));
+-
+- /* Initialize memory pools */
+-
+- /* first pool starts right after header */
+- pool_start = roundup(sizeof(SHM_FIRST_HEADER), sizeof(INT64));
+-
+- /* Same size for each pool, ensuring alignment is correct */
+- size = ((segment_size-pool_start)/SPS_LAST) & ~(sizeof(INT64)-1);
+-
+- for (sps = static_cast<SHM_POOL_SIZES>(0); sps < SPS_LAST;
+- sps = static_cast<SHM_POOL_SIZES>(sps + 1))
+- {
+- pool_end = SHMInitPool(pool_start, block_sizes[sps], size,
+- (SHM_POOL_INFO *)&header->pools[sps]);
+-
+- if(pool_end ==0)
+- {
+- ERROR("SHMInitPool failed.\n");
+- munmap(shm_segment_bases[0],segment_size);
+- return FALSE;
+- }
+- /* save first and last element of each pool for this segment */
+- header->header.first_pool_blocks[sps] = pool_start;
+- header->header.last_pool_blocks[sps] = pool_end;
+-
+- /* next pool starts immediately after this one */
+- pool_start +=size;
+- }
++ memset((void *)shm_header.shm_info, 0, SIID_LAST*sizeof(SHMPTR));
+
+ TRACE("Global shared memory initialization complete.\n");
+
+- shm_numsegments = 1;
+ lock_count = 0;
+ locking_thread = 0;
+
+- /* hook into all SHM segments */
+- if(!SHMMapUnknownSegments())
+- {
+- ERROR("Error while mapping segments!\n");
+- SHMCleanup();
+- return FALSE;
+- }
+ return TRUE;
+ }
+
+@@ -494,7 +113,6 @@ in PALCommonCleanup.
+ --*/
+ void SHMCleanup(void)
+ {
+- SHM_FIRST_HEADER *header;
+ pid_t my_pid;
+
+ TRACE("Starting shared memory cleanup\n");
+@@ -505,9 +123,8 @@ void SHMCleanup(void)
+ /* We should not be holding the spinlock at this point. If we are, release
+ the spinlock. by setting it to 0 */
+ my_pid = gPID;
+- header = (SHM_FIRST_HEADER *)shm_segment_bases[0].Load();
+
+- _ASSERT_MSG(header->spinlock != my_pid,
++ _ASSERT_MSG(shm_header.spinlock != my_pid,
+ "SHMCleanup called while the current process still owns the lock "
+ "[owner thread=%u, current thread: %u]\n",
+ locking_thread.Load(), THREADSilentGetCurrentThreadId());
+@@ -515,237 +132,10 @@ void SHMCleanup(void)
+ /* Now for the interprocess stuff. */
+ DeleteCriticalSection(&shm_critsec);
+
+-
+- /* Unmap memory segments */
+- while(shm_numsegments)
+- {
+- shm_numsegments--;
+- if ( -1 == munmap( shm_segment_bases[ shm_numsegments ],
+- segment_size ) )
+- {
+- ASSERT( "munmap() failed; errno is %d (%s).\n",
+- errno, strerror( errno ) );
+- }
+- }
+-
+- save_waste();
+ TRACE("SHMCleanup complete!\n");
+ }
+
+ /*++
+-SHMalloc
+-
+-Allocate a block of memory of the specified size
+-
+-Parameters :
+- size_t size : size of block required
+-
+-Return value :
+- A SHMPTR identifying the new block, or 0 on failure. Use SHMPTR_TO_PTR to
+- convert a SHMPTR into a useable pointer (but remember to lock the shared
+- memory first!)
+-
+-Notes :
+- SHMalloc will fail if the requested size is larger than a certain maximum.
+- At the moment, the maximum is 520 bytes (MAX_LONGPATH*2).
+---*/
+-SHMPTR SHMalloc(size_t size)
+-{
+- enum SHM_POOL_SIZES sps;
+- SHMPTR first_free;
+- SHMPTR next_free;
+- SHM_FIRST_HEADER *header;
+- SHMPTR *shmptr_ptr;
+-
+- TRACE("SHMalloc() called; requested size is %u\n", size);
+-
+- if(0 == size)
+- {
+- WARN("Got a request for a 0-byte block! returning 0\n");
+- return 0;
+- }
+-
+- /* Find the first block size >= requested size */
+- for (sps = static_cast<SHM_POOL_SIZES>(0); sps < SPS_LAST;
+- sps = static_cast<SHM_POOL_SIZES>(sps + 1))
+- {
+- if (size <= static_cast<size_t>(block_sizes[sps]))
+- {
+- break;
+- }
+- }
+-
+- /* If no block size is found, requested size was too large. */
+- if( SPS_LAST == sps )
+- {
+- ASSERT("Got request for shared memory block of %u bytes; maximum block "
+- "size is %d.\n", size, block_sizes[SPS_LAST-1]);
+- return 0;
+- }
+-
+- TRACE("Best block size is %d (%d bytes wasted)\n",
+- block_sizes[sps], block_sizes[sps]-size );
+-
+- log_waste(sps, block_sizes[sps]-size);
+-
+- SHMLock();
+- header = (SHM_FIRST_HEADER *)shm_segment_bases[0].Load();
+-
+- /* If there are no free items of the specified size left, it's time to
+- allocate a new shared memory segment.*/
+- if(header->pools[sps].free_items == 0)
+- {
+- TRACE("No blocks of %d bytes left; allocating new segment.\n",
+- block_sizes[sps]);
+- if(!SHMAddSegment())
+- {
+- ERROR("Unable to allocate new shared memory segment!\n");
+- SHMRelease();
+- return 0;
+- }
+- }
+-
+- /* Remove the first free block from the pool */
+- first_free = header->pools[sps].first_free;
+- shmptr_ptr = static_cast<SHMPTR*>(SHMPTR_TO_PTR(first_free));
+-
+- if( 0 == first_free )
+- {
+- ASSERT("First free block in %d-byte pool (%08x) was invalid!\n",
+- block_sizes[sps], first_free);
+- SHMRelease();
+- return 0;
+- }
+-
+- /* the block "first_free" is the head of a linked list of free blocks;
+- take the next link in the list and set it as new head of list. */
+- next_free = *shmptr_ptr;
+- header->pools[sps].first_free = next_free;
+- header->pools[sps].free_items--;
+-
+- /* make sure we're still in a sane state */
+- if(( 0 == header->pools[sps].free_items && 0 != next_free) ||
+- ( 0 != header->pools[sps].free_items && 0 == next_free))
+- {
+- ASSERT("free block count is %d, but next free block is %#x\n",
+- header->pools[sps].free_items, next_free);
+- /* assume all remaining blocks in the pool are corrupt */
+- header->pools[sps].first_free = 0;
+- header->pools[sps].free_items = 0;
+- }
+- else if (0 != next_free && 0 == SHMPTR_TO_PTR(next_free) )
+- {
+- ASSERT("Next free block (%#x) in %d-byte pool is invalid!\n",
+- next_free, block_sizes[sps]);
+- /* assume all remaining blocks in the pool are corrupt */
+- header->pools[sps].first_free = 0;
+- header->pools[sps].free_items = 0;
+- }
+-
+- SHMRelease();
+-
+- TRACE("Allocation successful; %d blocks of %d bytes left. Returning %08x\n",
+- header->pools[sps].free_items, block_sizes[sps], first_free);
+- return first_free;
+-}
+-
+-/*++
+-SHMfree
+-
+-Release a block of shared memory and put it back in the shared memory pool
+-
+-Parameters :
+- SHMPTR shmptr : identifier of block to release
+-
+-(no return value)
+---*/
+-void SHMfree(SHMPTR shmptr)
+-{
+- int segment;
+- int offset;
+- SHM_SEGMENT_HEADER *header;
+- SHM_FIRST_HEADER *first_header;
+- enum SHM_POOL_SIZES sps;
+- SHMPTR *shmptr_ptr;
+-
+- if(0 == shmptr)
+- {
+- WARN("can't SHMfree() a NULL SHMPTR!\n");
+- return;
+- }
+- SHMLock();
+-
+- TRACE("Releasing SHMPTR 0x%08x\n", shmptr);
+-
+- shmptr_ptr = static_cast<SHMPTR*>(SHMPTR_TO_PTR(shmptr));
+-
+- if(!shmptr_ptr)
+- {
+- ASSERT("Tried to free an invalid shared memory pointer 0x%08x\n", shmptr);
+- SHMRelease();
+- return;
+- }
+-
+- /* note : SHMPTR_TO_PTR has already validated the segment/offset pair */
+- segment = SHMPTR_SEGMENT(shmptr);
+- header = (SHM_SEGMENT_HEADER *)shm_segment_bases[segment].Load();
+-
+- /* Find out the size of this block. Each segment tells where are its first
+- and last blocks for each block size, so we simply need to check in which
+- interval the block fits */
+- for (sps = static_cast<SHM_POOL_SIZES>(0); sps < SPS_LAST;
+- sps = static_cast<SHM_POOL_SIZES>(sps + 1))
+- {
+- if(header->first_pool_blocks[sps]<=shmptr &&
+- header->last_pool_blocks[sps]>=shmptr)
+- {
+- break;
+- }
+- }
+-
+- /* If we didn't find an interval, then the block doesn't really belong in
+- this segment (shouldn't happen, the offset check in SHMPTR_TO_PTR should
+- have caught this.) */
+- if(sps == SPS_LAST)
+- {
+- ASSERT("Shared memory pointer 0x%08x is out of bounds!\n", shmptr);
+- SHMRelease();
+- return;
+- }
+-
+- TRACE("SHMPTR 0x%08x is a %d-byte block located in segment %d\n",
+- shmptr, block_sizes[sps], segment);
+-
+- /* Determine the offset of this block (in bytes) relative to the first
+- block of the same size in this segment */
+- offset = shmptr - header->first_pool_blocks[sps];
+-
+- /* Make sure that the offset is a multiple of the block size; otherwise,
+- this isn't a real SHMPTR */
+- if( 0 != ( offset % block_sizes[sps] ) )
+- {
+- ASSERT("Shared memory pointer 0x%08x is misaligned!\n", shmptr);
+- SHMRelease();
+- return;
+- }
+-
+- /* Put the SHMPTR back in its pool. */
+- first_header = (SHM_FIRST_HEADER *)shm_segment_bases[0].Load();
+-
+- /* first_free is the head of a linked list of free SHMPTRs. All we need to
+- do is make shmptr point to first_free, and set shmptr as the new head
+- of the list. */
+- *shmptr_ptr = first_header->pools[sps].first_free;
+- first_header->pools[sps].first_free = shmptr;
+- first_header->pools[sps].free_items++;
+-
+- TRACE("SHMPTR 0x%08x released; there are now %d blocks of %d bytes "
+- "available\n", shmptr, first_header->pools[sps].free_items,
+- block_sizes[sps]);
+- SHMRelease();
+-}
+-
+-/*++
+ SHMLock
+
+ Restrict shared memory access to the current thread of the current process
+@@ -769,17 +159,11 @@ int SHMLock(void)
+
+ if(lock_count == 0)
+ {
+- SHM_FIRST_HEADER *header;
+ pid_t my_pid, tmp_pid;
+ int spincount = 1;
+-#ifdef TRACK_SHMLOCK_OWNERSHIP
+- ULONG ulIdx;
+-#endif // TRACK_SHMLOCK_OWNERSHIP
+
+ TRACE("First-level SHM lock : taking spinlock\n");
+
+- header = (SHM_FIRST_HEADER *)shm_segment_bases[0].Load();
+-
+ // Store the id of the current thread as the (only) one that is
+ // trying to grab the spinlock from the current process
+ locking_thread = (HANDLE)pthread_self();
+@@ -788,21 +172,10 @@ int SHMLock(void)
+
+ while(TRUE)
+ {
+-#ifdef TRACK_SHMLOCK_OWNERSHIP
+- _ASSERTE(0 != my_pid);
+- _ASSERTE(getpid() == my_pid);
+- _ASSERTE(my_pid != header->spinlock);
+- CHECK_CANARIES(header);
+-#endif // TRACK_SHMLOCK_OWNERSHIP
+-
+ //
+ // Try to grab the spinlock
+ //
+- tmp_pid = InterlockedCompareExchange((LONG *) &header->spinlock, my_pid,0);
+-
+-#ifdef TRACK_SHMLOCK_OWNERSHIP
+- CHECK_CANARIES(header);
+-#endif // TRACK_SHMLOCK_OWNERSHIP
++ tmp_pid = InterlockedCompareExchange((LONG *) &shm_header.spinlock, my_pid,0);
+
+ if (0 == tmp_pid)
+ {
+@@ -821,7 +194,7 @@ int SHMLock(void)
+ TRACE("SHM spinlock owner (%08x) is dead; releasing its lock\n",
+ tmp_pid);
+
+- InterlockedCompareExchange((LONG *) &header->spinlock, 0, tmp_pid);
++ InterlockedCompareExchange((LONG *) &shm_header.spinlock, 0, tmp_pid);
+ }
+ else
+ {
+@@ -856,31 +229,15 @@ int SHMLock(void)
+ spincount++;
+ }
+
+- _ASSERT_MSG(my_pid == header->spinlock,
++ _ASSERT_MSG(my_pid == shm_header.spinlock,
+ "\n(my_pid = %u) != (header->spinlock = %u)\n"
+ "tmp_pid = %u\n"
+ "spincount = %d\n"
+ "locking_thread = %u\n",
+- (DWORD)my_pid, (DWORD)header->spinlock,
++ (DWORD)my_pid, (DWORD)shm_header.spinlock,
+ (DWORD)tmp_pid,
+ (int)spincount,
+ (HANDLE)locking_thread);
+-
+-#ifdef TRACK_SHMLOCK_OWNERSHIP
+- _ASSERTE(0 == header->pidtidCurrentOwner.pid);
+- _ASSERTE(0 == header->pidtidCurrentOwner.tid);
+-
+- header->pidtidCurrentOwner.pid = my_pid;
+- header->pidtidCurrentOwner.tid = locking_thread;
+-
+- ulIdx = header->ulOwnersIdx % (sizeof(header->pidtidOwners) / sizeof(header->pidtidOwners[0]));
+-
+- header->pidtidOwners[ulIdx].pid = my_pid;
+- header->pidtidOwners[ulIdx].tid = locking_thread;
+-
+- header->ulOwnersIdx += 1;
+-#endif // TRACK_SHMLOCK_OWNERSHIP
+-
+ }
+
+ lock_count++;
+@@ -919,32 +276,15 @@ int SHMRelease(void)
+ set the spinlock back to 0. */
+ if(lock_count == 0)
+ {
+- SHM_FIRST_HEADER *header;
+ pid_t my_pid, tmp_pid;
+
+ TRACE("Releasing first-level SHM lock : resetting spinlock\n");
+
+ my_pid = gPID;
+-
+- header = (SHM_FIRST_HEADER *)shm_segment_bases[0].Load();
+-
+-#ifdef TRACK_SHMLOCK_OWNERSHIP
+- CHECK_CANARIES(header);
+- _ASSERTE(0 != my_pid);
+- _ASSERTE(getpid() == my_pid);
+- _ASSERTE(my_pid == header->spinlock);
+- _ASSERTE(header->pidtidCurrentOwner.pid == my_pid);
+- _ASSERTE(pthread_self() == header->pidtidCurrentOwner.tid);
+- _ASSERTE((pthread_t)locking_thread == header->pidtidCurrentOwner.tid);
+-
+- header->pidtidCurrentOwner.pid = 0;
+- header->pidtidCurrentOwner.tid = 0;
+-#endif // TRACK_SHMLOCK_OWNERSHIP
+-
+
+ /* Make sure we don't touch the spinlock if we don't own it. We're
+ supposed to own it if we get here, but just in case... */
+- tmp_pid = InterlockedCompareExchange((LONG *) &header->spinlock, 0, my_pid);
++ tmp_pid = InterlockedCompareExchange((LONG *) &shm_header.spinlock, 0, my_pid);
+
+ if (tmp_pid != my_pid)
+ {
+@@ -956,10 +296,6 @@ int SHMRelease(void)
+
+ /* indicate no thread (in this process) holds the SHM lock */
+ locking_thread = 0;
+-
+-#ifdef TRACK_SHMLOCK_OWNERSHIP
+- CHECK_CANARIES(header);
+-#endif // TRACK_SHMLOCK_OWNERSHIP
+ }
+
+ TRACE("SHM lock level is now %d\n", lock_count.Load());
+@@ -974,99 +310,6 @@ int SHMRelease(void)
+ }
+
+ /*++
+-SHMPtrToPtr
+-
+-Convert a SHMPTR value to a valid pointer within the address space of the
+-current process
+-
+-Parameters :
+- SHMPTR shmptr : SHMPTR value to convert into a pointer
+-
+-Return value :
+- Address corresponding to the given SHMPTR, valid for the current process
+-
+-Notes :
+-(see notes for SHMPTR_SEGMENT macro for details on SHMPTR structure)
+-
+-It is possible for the segment index to be greater than the known total number
+-of segments (shm_numsegments); this means that the SHMPTR points to a memory
+-block in a shared memory segment this process doesn't know about. In this case,
+-we must obtain an address for that new segment and add it to our array
+-(see SHMMapUnknownSegments for details)
+-
+-In the simplest case (no need to map new segments), there is no need to hold
+-the lock, since we don't access any information that can change
+---*/
+-LPVOID SHMPtrToPtr(SHMPTR shmptr)
+-{
+- void *retval;
+- int segment;
+- int offset;
+-
+- TRACE("Converting SHMPTR 0x%08x to a valid pointer...\n", shmptr);
+- if(!shmptr)
+- {
+- WARN("Got SHMPTR \"0\"; returning NULL pointer\n");
+- return NULL;
+- }
+-
+- segment = SHMPTR_SEGMENT(shmptr);
+-
+- /* If segment isn't known, it may have been added by another process. We
+- need to map all new segments into our address space. */
+- if(segment>= shm_numsegments)
+- {
+- TRACE("SHMPTR is in segment %d, we know only %d. We must now map all "
+- "unknowns.\n", segment, shm_numsegments);
+- SHMMapUnknownSegments();
+-
+- /* if segment is still unknown, then it doesn't exist */
+- if(segment>=shm_numsegments)
+- {
+- ASSERT("Segment %d still unknown; returning NULL\n", segment);
+- return NULL;
+- }
+- TRACE("Segment %d found; continuing\n", segment);
+- }
+-
+- /* Make sure the offset doesn't point outside the segment */
+- offset = SHMPTR_OFFSET(shmptr);
+- if(offset>=segment_size)
+- {
+- ASSERT("Offset %d is larger than segment size (%d)! returning NULL\n",
+- offset, segment_size);
+- return NULL;
+-
+- }
+-
+- /* Make sure the offset doesn't point in the segment's header */
+- if(segment == 0)
+- {
+- if (static_cast<size_t>(offset) < roundup(sizeof(SHM_FIRST_HEADER), sizeof(INT64)))
+- {
+- ASSERT("Offset %d is in segment header! returning NULL\n", offset);
+- return NULL;
+- }
+- }
+- else
+- {
+- if (static_cast<size_t>(offset) < sizeof(SHM_SEGMENT_HEADER))
+- {
+- ASSERT("Offset %d is in segment header! returning NULL\n", offset);
+- return NULL;
+- }
+- }
+-
+- retval = shm_segment_bases[segment];
+- retval = static_cast<BYTE*>(retval) + offset;
+-
+- TRACE("SHMPTR %#x is at offset %d in segment %d; maps to address %p\n",
+- shmptr, offset, segment, retval);
+- return retval;
+-}
+-
+-
+-/*++
+ Function :
+ SHMGetInfo
+
+@@ -1083,7 +326,6 @@ Notes :
+ --*/
+ SHMPTR SHMGetInfo(SHM_INFO_ID element)
+ {
+- SHM_FIRST_HEADER *header = NULL;
+ SHMPTR retval = 0;
+
+ if(element < 0 || element >= SIID_LAST)
+@@ -1099,9 +341,7 @@ SHMPTR SHMGetInfo(SHM_INFO_ID element)
+ ASSERT("SHMGetInfo called while thread does not hold the SHM lock!\n");
+ }
+
+- header = (SHM_FIRST_HEADER *)shm_segment_bases[0].Load();
+-
+- retval = header->shm_info[element];
++ retval = shm_header.shm_info[element];
+
+ TRACE("SHM info element %d is %08x\n", element, retval );
+ return retval;
+@@ -1126,8 +366,6 @@ Notes :
+ --*/
+ BOOL SHMSetInfo(SHM_INFO_ID element, SHMPTR value)
+ {
+- SHM_FIRST_HEADER *header;
+-
+ if(element < 0 || element >= SIID_LAST)
+ {
+ ASSERT("Invalid SHM info element %d\n", element);
+@@ -1141,558 +379,10 @@ BOOL SHMSetInfo(SHM_INFO_ID element, SHMPTR value)
+ ASSERT("SHMGetInfo called while thread does not hold the SHM lock!\n");
+ }
+
+- header = (SHM_FIRST_HEADER*)shm_segment_bases[0].Load();
+-
+ TRACE("Setting SHM info element %d to %08x; used to be %08x\n",
+- element, value, header->shm_info[element].Load() );
++ element, value, shm_header.shm_info[element].Load() );
+
+- header->shm_info[element] = value;
++ shm_header.shm_info[element] = value;
+
+ return TRUE;
+-}
+-
+-
+-/* Static function implementations ********************************************/
+-
+-/*++
+-SHMInitPool
+-
+-Perform one-time initialization for a shared memory pool.
+-
+-Parameters :
+- SHMPTR first : SHMPTR of first memory block in the pool
+- int block_size : size (in bytes) of a memory block in this pool
+- int pool_size : total size (in bytes) of this pool
+- SHM_POOL_INFO *pool : pointer to initialize with information about the pool
+-
+-Return value :
+- SHMPTR of last memory block in the pool
+-
+-Notes :
+-This function is used to initialize the memory pools of the first SHM segment.
+-In addition to creating a linked list of SHMPTRs, it initializes the given
+-SHM_POOL_INFO based on the given information.
+---*/
+-static SHMPTR SHMInitPool(SHMPTR first, int block_size, int pool_size,
+- SHM_POOL_INFO *pool)
+-{
+- int num_blocks;
+- SHMPTR last;
+-
+- TRACE("Initializing SHM pool for %d-byte blocks\n", block_size);
+-
+- /* Number of memory blocks of size "block_size" that can fit in "pool_size"
+- bytes (rounded down) */
+- num_blocks = pool_size/block_size;
+-
+- /* Create the initial linked list of free blocks */
+- last = SHMLinkPool(first, block_size, num_blocks);
+- if( 0 == last )
+- {
+- ERROR("Failed to create linked list of free blocks!\n");
+- return 0;
+- }
+-
+- /* Initialize SHM_POOL_INFO */
+- pool->first_free = first;
+- pool->free_items = num_blocks;
+- pool->item_size = block_size;
+- pool->num_items = num_blocks;
+-
+- TRACE("New SHM pool extends from SHMPTR 0x%08x to 0x%08x\n", first, last);
+- return last;
+-}
+-
+-/*++
+-SHMLinkPool
+-
+-Joins contiguous blocks of memory into a linked list..
+-
+-Parameters :
+- SHMPTR first : First SHMPTR in the memory pool; first link in the list
+- int block_size : size (in bytes) of the memory blocks
+- int num_blocks : number of contiguous blocks to link
+-
+-Return value :
+- SHMPTR of last memory block in the pool
+-
+-Notes :
+-The linked list is created by saving the value of the next SHMPTR in the list
+-in the memory location corresponding to the previous SHMPTR :
+-*(SHMPTR *)SHMPTR_TO_PTR(previous) = previous + block_size
+---*/
+-static SHMPTR SHMLinkPool(SHMPTR first, int block_size, int num_blocks)
+-{
+- LPBYTE item_ptr;
+- SHMPTR *shmptr_ptr;
+- SHMPTR next_shmptr;
+- int i;
+-
+- TRACE("Linking %d blocks of %d bytes, starting at 0x%08x\n",
+- num_blocks, block_size, first);
+-
+- item_ptr = static_cast<LPBYTE>(
+- static_cast<LPBYTE>(shm_segment_bases[SHMPTR_SEGMENT(first)].Load()) +
+- (SHMPTR_OFFSET(first)));
+- next_shmptr = first/*+block_size*/;
+-
+- /* Link blocks together */
+- for(i=0; i<num_blocks; i++)
+- {
+- next_shmptr += block_size;
+-
+- /* item_ptr is char * (so we can increment with +=blocksize), we cast
+- it to a SHMPTR * and set its content to the next SHMPTR in the list*/
+- shmptr_ptr = (SHMPTR *)item_ptr;
+- *shmptr_ptr = next_shmptr;
+-
+- item_ptr+=block_size;
+- }
+- /* Last SHMPTR in the list must point to NULL */
+- item_ptr-=block_size;
+- shmptr_ptr = (SHMPTR *)item_ptr;
+- *shmptr_ptr = 0;
+-
+- /* Return SHMPTR of last element in the list */
+- next_shmptr -= block_size;
+-
+- TRACE("New linked pool goes from 0x%08x to 0x%08x\n", first, next_shmptr);
+- return next_shmptr;
+-}
+-
+-/*++
+-SHMMapUnknownSegments
+-
+-Map into this process all SHM segments not yet mapped
+-
+-(no parameters)
+-
+-Return value :
+- TRUE on success, FALSE in case of error
+---*/
+-static BOOL SHMMapUnknownSegments(void)
+-{
+- return TRUE;
+-}
+-
+-/*++
+-SHMAddSegment
+-
+-Create a new SHM segment, map it into this process, initialize it, then link it
+-to the other SHM segments
+-
+-(no parameters)
+-
+-Return value :
+- TRUE on success, FALSE in case of error
+-
+-Notes :
+- This function assumes the SHM lock is held.
+---*/
+-static BOOL SHMAddSegment(void)
+-{
+- LPVOID segment_base;
+- SHM_SEGMENT_HEADER *header;
+- SHM_FIRST_HEADER *first_header;
+- SHMPTR first_shmptr;
+- SHMPTR *shmptr_ptr;
+- int sps;
+- int used_size;
+- int new_size;
+- int current_pool_size;
+- int used_pool_size;
+- int new_pool_size;
+- int num_new_items;
+-
+- /* Map all segments this process doesn't yet know about, so we link the new
+- segment at the right place */
+- if(!SHMMapUnknownSegments())
+- {
+- ERROR("SHMMapUnknownSegments failed!\n");
+- return FALSE;
+- }
+-
+- /* Avoid overflowing */
+- if(shm_numsegments == MAX_SEGMENTS)
+- {
+- ERROR("Can't map more segments : maximum number (%d) reached!\n",
+- MAX_SEGMENTS);
+- return FALSE;
+- }
+-
+- TRACE("Creating SHM segment #%d\n", shm_numsegments);
+-
+- segment_base = mmap(NULL, segment_size, PROT_READ|PROT_WRITE,
+- MAP_ANON|MAP_PRIVATE,-1, 0);
+-
+- if(segment_base == MAP_FAILED)
+- {
+- ERROR("mmap() failed! error is %d (%s)\n", errno, strerror(errno));
+- return FALSE;
+- }
+-
+- shm_segment_bases[shm_numsegments] = segment_base;
+-
+- /* Save name (well, suffix) of new segment in the header of the old last
+- segment, so that other processes know where it is. */
+- header = (SHM_SEGMENT_HEADER *)shm_segment_bases[shm_numsegments-1].Load();
+-
+- /* Indicate that the new segment is the last one */
+- header = (SHM_SEGMENT_HEADER *)segment_base;
+-
+- /* We're now ready to update our memory pools */
+-
+- first_header = (SHM_FIRST_HEADER *)shm_segment_bases[0].Load();
+-
+- /* Calculate total amount of used memory (in bytes) */
+- used_size = 0;
+- for(sps = 0; sps<SPS_LAST;sps++)
+- {
+- /* Add total size of this pool */
+- used_size += first_header->pools[sps].num_items*block_sizes[sps];
+-
+- /* Remove unused size of this pool */
+- used_size -= first_header->pools[sps].free_items*block_sizes[sps];
+- }
+-
+- /* Determine how to divide the new segment between the pools for the
+- different block sizes, then update the pool inforamtion accordingly
+- Allocation strategy :
+- 1) Calculate the proportion of used memory used by each pool
+- 2) Allocate this proportion of the new segment to each pool
+- */
+-
+- /* Add the new segment to the total amount of SHM memory */
+- new_size = segment_size-roundup(sizeof(SHM_SEGMENT_HEADER), sizeof(INT64));
+-
+- /* Calculate value of first SHMPTR in the new segment : segment is
+- shm_numsegments (not yet incremented); offset is the first byte after
+- the segment header */
+- first_shmptr = MAKE_SHMPTR(shm_numsegments,roundup(sizeof(SHM_SEGMENT_HEADER), sizeof(INT64)));
+-
+- TRACE("Updating SHM pool information; Total memory used is %d bytes; "
+- "we are adding %d bytes\n", used_size, new_size);
+-
+- /* We want to allocate at least 1 block of each size (to avoid adding
+- special cases everywhere). We remove the required space for these blocks
+- from the size used in the calculations, then add 1 to each block count */
+- for(sps=0;sps<SPS_LAST;sps++)
+- new_size -= block_sizes[sps];
+-
+- /* Loop through all block sizes */
+- for(sps=0; sps<SPS_LAST; sps++)
+- {
+- TRACE("Now processing block size \"%d\"...\n", block_sizes[sps]);
+- /* amount of memory currently reserved for this block size */
+- current_pool_size = first_header->pools[sps].num_items*block_sizes[sps];
+-
+- /* how much of that is actually used? */
+- used_pool_size = current_pool_size -
+- first_header->pools[sps].free_items*block_sizes[sps];
+-
+- DBGOUT("%d bytes of %d bytes used (%d%%)\n", used_pool_size,
+- current_pool_size, (used_pool_size*100)/current_pool_size);
+-
+- /* amount of memory we want to add to the pool for this block size :
+- amount used by this pool/total amount used * new segment's size */
+- new_pool_size = (((LONGLONG)used_pool_size)*new_size)/used_size;
+-
+- DBGOUT("Allocating %d bytes of %d to %d-byte pool\n",
+- new_pool_size, new_size, block_sizes[sps]);
+-
+- /* determine the number of blocks that can fit in the chosen amount */
+- num_new_items = new_pool_size/block_sizes[sps];
+-
+- /* make sure we allocate at least 1 block of each size */
+- num_new_items +=1;
+-
+- DBGOUT("Adding %d new blocks\n", num_new_items);
+-
+- /* Save the first and last block of the current block size in the new
+- segment; join all blocks in between in a linked list */
+- header->first_pool_blocks[sps] = first_shmptr;
+- header->last_pool_blocks[sps] = SHMLinkPool(first_shmptr,
+- block_sizes[sps],
+- num_new_items);
+-
+- /* Link the last block in the new linked list to the first block of the
+- old global linked list. We don't use SHMPTR_TO_PTR because the pool
+- data isn't updated yet */
+- shmptr_ptr = reinterpret_cast<SHMPTR*>(
+- static_cast<LPBYTE>(shm_segment_bases[SHMPTR_SEGMENT(header->last_pool_blocks[sps])].Load()) +
+- SHMPTR_OFFSET(header->last_pool_blocks[sps]));
+-
+- *shmptr_ptr = first_header->pools[sps].first_free;
+-
+- /* Save the first block of the new linked list as the new beginning of
+- the global linked list; the global list now contains all new blocks
+- AND all blocks that were already free */
+- first_header->pools[sps].first_free = header->first_pool_blocks[sps];
+-
+- /* Update block counts to include new blocks */
+- first_header->pools[sps].free_items+=num_new_items;
+- first_header->pools[sps].num_items+=num_new_items;
+-
+- DBGOUT("There are now %d %d-byte blocks, %d are free\n",
+- first_header->pools[sps].num_items, block_sizes[sps],
+- first_header->pools[sps].free_items);
+-
+- /* Update first_shmptr to first byte after the new pool */
+- first_shmptr+=num_new_items*block_sizes[sps];
+- }
+- shm_numsegments++;
+-
+- return TRUE;
+-}
+-
+-/*++
+-SHMStrDup
+-
+-Duplicates the string in shared memory.
+-
+-Returns the new address as SHMPTR on success.
+-Returns (SHMPTR)NULL on failure.
+---*/
+-SHMPTR SHMStrDup( LPCSTR string )
+-{
+- UINT length = 0;
+- SHMPTR retVal = 0;
+-
+- if ( string )
+- {
+- length = strlen( string );
+-
+- retVal = SHMalloc( ++length );
+-
+- if ( retVal != 0 )
+- {
+- LPVOID ptr = SHMPTR_TO_PTR( retVal );
+- _ASSERT_MSG(ptr != NULL, "SHMPTR_TO_PTR returned NULL.\n");
+- if (ptr != NULL)
+- {
+- memcpy( ptr, string, length );
+- }
+- else
+- {
+- // This code should never be reached. If a valid pointer
+- // is passed to SHMPTR_TO_PTR and NULL is returned, then
+- // there's a problem in either the macro, or the underlying
+- // call to SHMPtrToPtr. In case the impossible happens,
+- // though, free the memory and return NULL rather than
+- // returning uninitialized memory.
+- SHMfree( retVal );
+- retVal = NULL;
+- }
+- }
+- }
+- return retVal;
+-}
+-
+-/*++
+-SHMWStrDup
+-
+-Duplicates the wide string in shared memory.
+-
+-Returns the new address as SHMPTR on success.
+-Returns (SHMPTR)NULL on failure.
+---*/
+-SHMPTR SHMWStrDup( LPCWSTR string )
+-{
+- UINT length = 0;
+- SHMPTR retVal = 0;
+-
+- if ( string )
+- {
+- length = ( PAL_wcslen( string ) + 1 ) * sizeof( WCHAR );
+-
+- retVal = SHMalloc( length );
+-
+- if ( retVal != 0 )
+- {
+- LPVOID ptr = SHMPTR_TO_PTR(retVal);
+- _ASSERT_MSG(ptr != NULL, "SHMPTR_TO_PTR returned NULL.\n");
+- if (ptr != NULL)
+- {
+- memcpy( ptr, string, length );
+- }
+- else
+- {
+- // This code should never be reached. If a valid pointer
+- // is passed to SHMPTR_TO_PTR and NULL is returned, then
+- // there's a problem in either the macro, or the underlying
+- // call to SHMPtrToPtr. In case the impossible happens,
+- // though, free the memory and return NULL rather than
+- // returning uninitialized memory.
+- SHMfree( retVal );
+- retVal = NULL;
+- }
+- }
+- }
+- return retVal;
+-}
+-
+-
+-
+-/*++
+-SHMFindNamedObjectByName
+-
+-Searches for an object whose name matches the name and ID passed in.
+-
+-Returns a SHMPTR to its location in shared memory. If no object
+-matches the name, the function returns NULL and sets pbNameExists to FALSE.
+-If an object matches the name but is of a different type, the function
+-returns NULL and sets pbNameExists to TRUE.
+-
+---*/
+-SHMPTR SHMFindNamedObjectByName( LPCWSTR lpName, SHM_NAMED_OBJECTS_ID oid,
+- BOOL *pbNameExists )
+-{
+- PSHM_NAMED_OBJECTS pNamedObject = NULL;
+- SHMPTR shmNamedObject = 0;
+- LPWSTR object_name = NULL;
+-
+- if(oid==SHM_NAMED_LAST)
+- {
+- ASSERT("Invalid named object type.\n");
+- return 0;
+- }
+-
+- if (pbNameExists == NULL)
+- {
+- ASSERT("pbNameExists must be non-NULL.\n");
+- }
+-
+- SHMLock();
+-
+- *pbNameExists = FALSE;
+- shmNamedObject = SHMGetInfo( SIID_NAMED_OBJECTS );
+-
+- TRACE( "Entering SHMFindNamedObjectByName looking for %S .\n",
+- lpName?lpName:W16_NULLSTRING );
+-
+- while ( shmNamedObject )
+- {
+- pNamedObject = (PSHM_NAMED_OBJECTS)SHMPTR_TO_PTR( shmNamedObject );
+- if(NULL == pNamedObject)
+- {
+- ASSERT("Got invalid SHMPTR value; list of named objects is "
+- "corrupted.\n");
+- break;
+- }
+-
+- if ( pNamedObject->ShmObjectName )
+- {
+- object_name = (LPWSTR)SHMPTR_TO_PTR( pNamedObject->ShmObjectName );
+- }
+-
+- if ( object_name &&
+- PAL_wcscmp( lpName, object_name ) == 0 )
+- {
+- if(oid == pNamedObject->ObjectType)
+- {
+- TRACE( "Returning the kernel object %p.\n", pNamedObject );
+- }
+- else
+- {
+- shmNamedObject = 0;
+- *pbNameExists = TRUE;
+- }
+- goto Exit;
+- }
+- shmNamedObject = pNamedObject->ShmNext;
+- }
+-
+- shmNamedObject = 0;
+- TRACE( "No matching kernel object was found.\n" );
+-
+-Exit:
+- SHMRelease();
+- return shmNamedObject;
+-
+-}
+-
+-/*++
+-SHMRemoveNamedObject
+-
+-Removes the specified named object from the list
+-
+-No return.
+-
+-note : the caller is reponsible for releasing all associated memory
+---*/
+-void SHMRemoveNamedObject( SHMPTR shmNamedObject )
+-{
+- PSHM_NAMED_OBJECTS pshmLast = 0;
+- PSHM_NAMED_OBJECTS pshmCurrent = 0;
+-
+- TRACE( "Entered SHMDeleteNamedObject shmNamedObject = %d\n", shmNamedObject );
+- SHMLock();
+-
+- pshmCurrent =
+- (PSHM_NAMED_OBJECTS)SHMPTR_TO_PTR( SHMGetInfo( SIID_NAMED_OBJECTS ) );
+- pshmLast = pshmCurrent;
+-
+- while ( pshmCurrent )
+- {
+- if ( pshmCurrent->ShmSelf == shmNamedObject )
+- {
+- TRACE( "Patching the list.\n" );
+-
+- /* Patch the list, and delete the object. */
+- if ( pshmLast->ShmSelf == pshmCurrent->ShmSelf )
+- {
+- /* Either the first element or no elements left. */
+- SHMSetInfo( SIID_NAMED_OBJECTS, pshmCurrent->ShmNext );
+- }
+- else if ( (PSHM_NAMED_OBJECTS)SHMPTR_TO_PTR( pshmCurrent->ShmNext ) )
+- {
+- pshmLast->ShmNext = pshmCurrent->ShmNext;
+- }
+- else
+- {
+- /* Only one left. */
+- pshmLast->ShmNext = 0;
+- }
+-
+- break;
+- }
+- else
+- {
+- pshmLast = pshmCurrent;
+- pshmCurrent = (PSHM_NAMED_OBJECTS)SHMPTR_TO_PTR( pshmCurrent->ShmNext );
+- }
+- }
+-
+- SHMRelease();
+- return;
+-}
+-
+-/*++ SHMAddNamedObject
+-
+-Adds the specified named object to the list.
+-
+-No return.
+---*/
+-void SHMAddNamedObject( SHMPTR shmNewNamedObject )
+-{
+- PSHM_NAMED_OBJECTS pshmNew = 0;
+-
+- pshmNew = (PSHM_NAMED_OBJECTS)SHMPTR_TO_PTR( shmNewNamedObject );
+-
+- if ( pshmNew == NULL )
+- {
+- ASSERT( "pshmNew should not be NULL\n" );
+- }
+-
+- SHMLock();
+-
+- pshmNew->ShmNext = SHMGetInfo( SIID_NAMED_OBJECTS );
+-
+- if ( !SHMSetInfo( SIID_NAMED_OBJECTS, shmNewNamedObject ) )
+- {
+- ASSERT( "Unable to add the mapping object to shared memory.\n" );
+- }
+-
+- SHMRelease();
+- return;
+-}
++}
+\ No newline at end of file
+diff --git a/src/pal/src/synchmgr/synchcontrollers.cpp b/src/pal/src/synchmgr/synchcontrollers.cpp
+index f7df5ea..68fe429 100644
+--- a/src/pal/src/synchmgr/synchcontrollers.cpp
++++ b/src/pal/src/synchmgr/synchcontrollers.cpp
+@@ -268,7 +268,7 @@ namespace CorUnix
+
+ PAL_ERROR palErr = NO_ERROR;
+ WaitingThreadsListNode * pwtlnNewNode = NULL;
+- SharedID shridNewNode = NULLSharedID;
++ SharedID shridNewNode = NULL;
+ ThreadWaitInfo * ptwiWaitInfo;
+ DWORD * pdwWaitState;
+ bool fSharedObject = (SharedObject == m_odObjectDomain);
+@@ -299,7 +299,7 @@ namespace CorUnix
+
+ if (!pwtlnNewNode)
+ {
+- if (fSharedObject && (NULLSharedID != shridNewNode))
++ if (fSharedObject && (NULL != shridNewNode))
+ {
+ ASSERT("Bad Shared Memory ptr %p\n", shridNewNode);
+ palErr = ERROR_INTERNAL_ERROR;
+@@ -335,7 +335,7 @@ namespace CorUnix
+ }
+ }
+
+- pwtlnNewNode->shridSHRThis = NULLSharedID;
++ pwtlnNewNode->shridSHRThis = NULL;
+ pwtlnNewNode->ptwiWaitInfo = ptwiWaitInfo;
+ pwtlnNewNode->dwObjIndex = dwIndex;
+ pwtlnNewNode->dwProcessId = gPID;
+@@ -442,7 +442,7 @@ namespace CorUnix
+ {
+ m_psdSynchData->Release(m_pthrOwner);
+ }
+- if ((fSharedObject) && (NULLSharedID != shridNewNode))
++ if ((fSharedObject) && (NULL != shridNewNode))
+ {
+ pSynchManager->CacheAddSharedWTListNode(m_pthrOwner, shridNewNode);
+ }
+@@ -781,7 +781,7 @@ namespace CorUnix
+ CPalSynchronizationManager::GetInstance();
+ bool fSharedObject = (SharedObject == m_odObjectDomain);
+
+- _ASSERT_MSG((fSharedObject && (NULLSharedID == m_ptrWTLHead.shrid)) ||
++ _ASSERT_MSG((fSharedObject && (NULL == m_ptrWTLHead.shrid)) ||
+ (!fSharedObject && (NULL == m_ptrWTLHead.ptr)),
+ "Final Release on CSynchData with threads still in "
+ "the waiting list\n");
+@@ -1082,7 +1082,7 @@ namespace CorUnix
+ bool fDelegatedSignaling = false;
+ DWORD * pdwWaitState;
+ DWORD dwObjIdx;
+- SharedID shridItem = NULLSharedID, shridNextItem = NULLSharedID;
++ SharedID shridItem = NULL, shridNextItem = NULL;
+ WaitingThreadsListNode * pwtlnItem, * pwtlnNextItem;
+ DWORD dwPid = gPID;
+ CPalSynchronizationManager * pSynchManager =
+@@ -1400,7 +1400,7 @@ namespace CorUnix
+ bool fSharedObject = (SharedObject == GetObjectDomain());
+ DWORD * pdwWaitState;
+ DWORD dwObjIdx;
+- SharedID shridItem = NULLSharedID, shridNextItem = NULLSharedID;
++ SharedID shridItem = NULL, shridNextItem = NULL;
+ WaitingThreadsListNode * pwtlnItem, * pwtlnNextItem;
+ DWORD dwPid = gPID;
+ CPalSynchronizationManager * pSynchManager =
+@@ -1893,14 +1893,14 @@ namespace CorUnix
+
+ VALIDATEOBJECT(pwtlnNewNode);
+
+- pwtlnNewNode->ptrNext.shrid = NULLSharedID;
++ pwtlnNewNode->ptrNext.shrid = NULL;
+ if (NULL == pwtlnCurrLast)
+ {
+- _ASSERT_MSG(NULLSharedID == m_ptrWTLHead.shrid,
++ _ASSERT_MSG(NULL == m_ptrWTLHead.shrid,
+ "Corrupted waiting list on shared CSynchData at "
+ "{shrid=%p, p=%p}\n", m_shridThis, this);
+
+- pwtlnNewNode->ptrPrev.shrid = NULLSharedID;
++ pwtlnNewNode->ptrPrev.shrid = NULL;
+ m_ptrWTLHead.shrid = shridNewNode;
+ m_ptrWTLTail.shrid = shridNewNode;
+ }
+diff --git a/src/pal/src/synchmgr/synchmanager.cpp b/src/pal/src/synchmgr/synchmanager.cpp
+index 73b5644..a683255 100644
+--- a/src/pal/src/synchmgr/synchmanager.cpp
++++ b/src/pal/src/synchmgr/synchmanager.cpp
+@@ -949,7 +949,7 @@ namespace CorUnix
+ if (SharedObject == odObjectDomain)
+ {
+ SharedID shridSynchData = m_cacheSHRSynchData.Get(pthrCurrent);
+- if (NULLSharedID == shridSynchData)
++ if (NULL == shridSynchData)
+ {
+ ERROR("Unable to allocate shared memory\n");
+ return ERROR_NOT_ENOUGH_MEMORY;
+@@ -962,8 +962,8 @@ namespace CorUnix
+ _ASSERT_MSG(NULL != psdSynchData, "Bad shared memory pointer\n");
+
+ // Initialize waiting list pointers
+- psdSynchData->SetWTLHeadShrPtr(NULLSharedID);
+- psdSynchData->SetWTLTailShrPtr(NULLSharedID);
++ psdSynchData->SetWTLHeadShrPtr(NULL);
++ psdSynchData->SetWTLTailShrPtr(NULL);
+
+ // Store shared pointer to this object
+ psdSynchData->SetSharedThis(shridSynchData);
+@@ -984,7 +984,7 @@ namespace CorUnix
+ psdSynchData->SetWTLTailPtr(NULL);
+
+ // Set shared this pointer to NULL
+- psdSynchData->SetSharedThis(NULLSharedID);
++ psdSynchData->SetSharedThis(NULL);
+
+ *ppvSynchData = static_cast<void *>(psdSynchData);
+ }
+@@ -2019,7 +2019,7 @@ namespace CorUnix
+ if (SynchWorkerCmdRemoteSignal == swcWorkerCmd ||
+ SynchWorkerCmdDelegatedObjectSignaling == swcWorkerCmd)
+ {
+- SharedID shridMarshaledId = NULLSharedID;
++ SharedID shridMarshaledId = NULL;
+
+ TRACE("Received %s cmd\n",
+ (swcWorkerCmd == SynchWorkerCmdRemoteSignal) ?
+@@ -2499,7 +2499,7 @@ namespace CorUnix
+ WaitingThreadsListNode * pWLNode = SharedIDToTypePointer(WaitingThreadsListNode, shridWLNode);
+
+ _ASSERT_MSG(gPID != pWLNode->dwProcessId, "WakeUpRemoteThread called on local thread\n");
+- _ASSERT_MSG(NULLSharedID != shridWLNode, "NULL shared identifier\n");
++ _ASSERT_MSG(NULL != shridWLNode, "NULL shared identifier\n");
+ _ASSERT_MSG(NULL != pWLNode, "Bad shared wait list node identifier (%p)\n", (VOID*)shridWLNode);
+ _ASSERT_MSG(MsgSize <= PIPE_BUF, "Message too long [MsgSize=%d PIPE_BUF=%d]\n", MsgSize, (int)PIPE_BUF);
+
+@@ -2556,7 +2556,7 @@ namespace CorUnix
+ SharedIDToTypePointer(CSynchData, shridSynchData);
+
+ _ASSERT_MSG(gPID != dwTargetProcessId, " called on local thread\n");
+- _ASSERT_MSG(NULLSharedID != shridSynchData, "NULL shared identifier\n");
++ _ASSERT_MSG(NULL != shridSynchData, "NULL shared identifier\n");
+ _ASSERT_MSG(NULL != psdSynchData, "Bad shared SynchData identifier (%p)\n", (VOID*)shridSynchData);
+ _ASSERT_MSG(MsgSize <= PIPE_BUF, "Message too long [MsgSize=%d PIPE_BUF=%d]\n", MsgSize, (int)PIPE_BUF);
+
+@@ -3737,7 +3737,7 @@ namespace CorUnix
+ PAL_ERROR palError = NO_ERROR;
+ CSynchData *psdLocal = reinterpret_cast<CSynchData *>(pvLocalSynchData);
+ CSynchData *psdShared = NULL;
+- SharedID shridSynchData = NULLSharedID;
++ SharedID shridSynchData = NULL;
+ SharedID *rgshridWTLNodes = NULL;
+ CObjectType *pot = NULL;
+ ULONG ulcWaitingThreads;
+@@ -3759,7 +3759,7 @@ namespace CorUnix
+ //
+
+ shridSynchData = m_cacheSHRSynchData.Get(pthrCurrent);
+- if (NULLSharedID == shridSynchData)
++ if (NULL == shridSynchData)
+ {
+ ERROR("Unable to allocate shared memory\n");
+ palError = ERROR_NOT_ENOUGH_MEMORY;
+@@ -3837,8 +3837,8 @@ namespace CorUnix
+ // for the waiting threads
+ //
+
+- psdShared->SetWTLHeadShrPtr(NULLSharedID);
+- psdShared->SetWTLTailShrPtr(NULLSharedID);
++ psdShared->SetWTLHeadShrPtr(NULL);
++ psdShared->SetWTLTailShrPtr(NULL);
+
+ if (0 < ulcWaitingThreads)
+ {
+@@ -4020,7 +4020,7 @@ namespace CorUnix
+
+ CThreadSynchronizationInfo::CThreadSynchronizationInfo() :
+ m_tsThreadState(TS_IDLE),
+- m_shridWaitAwakened(NULLSharedID),
++ m_shridWaitAwakened(NULL),
+ m_lLocalSynchLockCount(0),
+ m_lSharedSynchLockCount(0),
+ m_ownedNamedMutexListHead(nullptr)
+@@ -4037,9 +4037,9 @@ namespace CorUnix
+ CThreadSynchronizationInfo::~CThreadSynchronizationInfo()
+ {
+ DeleteCriticalSection(&m_ownedNamedMutexListLock);
+- if (NULLSharedID != m_shridWaitAwakened)
++ if (NULL != m_shridWaitAwakened)
+ {
+- RawSharedObjectFree(m_shridWaitAwakened);
++ free(m_shridWaitAwakened);
+ }
+ }
+
+@@ -4091,9 +4091,8 @@ namespace CorUnix
+ pthread_condattr_t attrs;
+ pthread_condattr_t *attrsPtr = nullptr;
+
+- m_shridWaitAwakened = RawSharedObjectAlloc(sizeof(DWORD),
+- DefaultSharedPool);
+- if (NULLSharedID == m_shridWaitAwakened)
++ m_shridWaitAwakened = malloc(sizeof(DWORD));
++ if (NULL == m_shridWaitAwakened)
+ {
+ ERROR("Fail allocating thread wait status shared object\n");
+ palErr = ERROR_NOT_ENOUGH_MEMORY;
+diff --git a/src/pal/src/synchmgr/synchmanager.hpp b/src/pal/src/synchmgr/synchmanager.hpp
+index 883d5b8..b0cc2e7 100644
+--- a/src/pal/src/synchmgr/synchmanager.hpp
++++ b/src/pal/src/synchmgr/synchmanager.hpp
+@@ -172,7 +172,7 @@ namespace CorUnix
+
+ public:
+ CSynchData()
+- : m_ulcWaitingThreads(0), m_shridThis(NULLSharedID), m_lRefCount(1),
++ : m_ulcWaitingThreads(0), m_shridThis(NULL), m_lRefCount(1),
+ m_lSignalCount(0), m_lOwnershipCount(0), m_dwOwnerPid(0),
+ m_dwOwnerTid(0), m_pOwnerThread(NULL),
+ m_poolnOwnedObjectListNode(NULL), m_fAbandoned(false)
+--
+2.7.4
+
diff --git a/packaging/0019-Remove-relocations-from-SECTION_Readonly-for-fields-.patch b/packaging/0019-Remove-relocations-from-SECTION_Readonly-for-fields-.patch
new file mode 100644
index 0000000000..2b4e53c176
--- /dev/null
+++ b/packaging/0019-Remove-relocations-from-SECTION_Readonly-for-fields-.patch
@@ -0,0 +1,903 @@
+From 9440822ceb7b6e58b840cfa4f4118e6410375df5 Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Wed, 21 Jun 2017 19:48:49 +0300
+Subject: [PATCH 19/32] Remove relocations from SECTION_Readonly for fields not
+ accessed from jit code on ARM
+
+---
+ src/debug/daccess/nidump.cpp | 6 ++--
+ src/inc/fixuppointer.h | 28 +++++++++++++---
+ src/vm/ceeload.cpp | 31 ++++++++++--------
+ src/vm/ceeload.h | 26 ++++++++++-----
+ src/vm/dataimage.h | 78 ++++++++++++++++++++++++++++++++++++++++++++
+ src/vm/methodtable.cpp | 54 +++++++++++++++++-------------
+ src/vm/methodtable.h | 25 +++++++++-----
+ src/vm/methodtable.inl | 58 +++++++++++++++++---------------
+ 8 files changed, 218 insertions(+), 88 deletions(-)
+
+diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
+index 2ec5d9a..673aa39 100644
+--- a/src/debug/daccess/nidump.cpp
++++ b/src/debug/daccess/nidump.cpp
+@@ -3959,7 +3959,7 @@ void NativeImageDumper::DumpModule( PTR_Module module )
+ DisplayWriteFieldInt( numElementsHot, ctorInfo->numElementsHot,
+ ModuleCtorInfo, SLIM_MODULE_TBLS );
+ DisplayWriteFieldAddress( ppMT, DPtrToPreferredAddr(ctorInfo->ppMT),
+- ctorInfo->numElements * sizeof(MethodTable*),
++ ctorInfo->numElements * sizeof(RelativePointer<MethodTable*>),
+ ModuleCtorInfo, SLIM_MODULE_TBLS );
+ /* REVISIT_TODO Tue 03/21/2006
+ * is cctorInfoHot and cctorInfoCold actually have anything interesting
+@@ -7301,7 +7301,7 @@ NativeImageDumper::DumpMethodTable( PTR_MethodTable mt, const char * name,
+ {
+ PTR_InterfaceInfo ifMap = mt->GetInterfaceMap();
+ m_display->StartArrayWithOffset( "InterfaceMap",
+- offsetof(MethodTable, m_pMultipurposeSlot2),
++ offsetof(MethodTable, m_pInterfaceMap),
+ sizeof(void*),
+ NULL );
+ for( unsigned i = 0; i < mt->GetNumInterfaces(); ++i )
+@@ -7335,7 +7335,7 @@ NativeImageDumper::DumpMethodTable( PTR_MethodTable mt, const char * name,
+ DPtrToPreferredAddr(genStatics),
+ sizeof(*genStatics) );
+
+- PTR_FieldDesc fieldDescs = genStatics->m_pFieldDescs;
++ PTR_FieldDesc fieldDescs = ReadPointerMaybeNull((GenericsStaticsInfo *) genStatics, &GenericsStaticsInfo::m_pFieldDescs);
+ if( fieldDescs == NULL )
+ {
+ DisplayWriteFieldPointer( m_pFieldDescs, NULL, GenericsStaticsInfo,
+diff --git a/src/inc/fixuppointer.h b/src/inc/fixuppointer.h
+index 83ff20e..20eb9d8 100644
+--- a/src/inc/fixuppointer.h
++++ b/src/inc/fixuppointer.h
+@@ -141,6 +141,12 @@ public:
+ LIMITED_METHOD_CONTRACT;
+ SetValueMaybeNull((TADDR)this, addr);
+ }
++
++ FORCEINLINE void SetValueVolatile(PTR_TYPE addr)
++ {
++ LIMITED_METHOD_CONTRACT;
++ SetValue(addr);
++ }
+ #endif
+
+ #ifndef DACCESS_COMPILE
+@@ -264,6 +270,9 @@ public:
+ RelativeFixupPointer<PTR_TYPE>& operator = (const RelativeFixupPointer<PTR_TYPE> &) =delete;
+ RelativeFixupPointer<PTR_TYPE>& operator = (RelativeFixupPointer<PTR_TYPE> &&) =delete;
+
++ // Default constructor
++ RelativeFixupPointer<PTR_TYPE>() {}
++
+ // Returns whether the encoded pointer is NULL.
+ BOOL IsNull() const
+ {
+@@ -468,7 +477,6 @@ public:
+ PTR_TYPE GetValue(TADDR base) const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+- PRECONDITION(!IsNull());
+ return dac_cast<PTR_TYPE>(m_ptr);
+ }
+
+@@ -480,6 +488,13 @@ public:
+ }
+
+ // Returns value of the encoded pointer. The pointer can be NULL.
++ PTR_TYPE GetValueMaybeNull() const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return dac_cast<PTR_TYPE>(m_ptr);
++ }
++
++ // Returns value of the encoded pointer. The pointer can be NULL.
+ PTR_TYPE GetValueMaybeNull(TADDR base) const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+@@ -493,6 +508,7 @@ public:
+ return dac_cast<DPTR(PlainPointer<PTR_TYPE>)>(base)->GetValueMaybeNull(base);
+ }
+
++#ifndef DACCESS_COMPILE
+ void SetValue(PTR_TYPE addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+@@ -503,7 +519,6 @@ public:
+ void SetValue(TADDR base, PTR_TYPE addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+- PRECONDITION(addr != NULL);
+ m_ptr = dac_cast<TADDR>(addr);
+ }
+
+@@ -521,7 +536,6 @@ public:
+ m_ptr = dac_cast<TADDR>(addr);
+ }
+
+-#ifndef DACCESS_COMPILE
+ // Set encoded value of the pointer. The value can be NULL.
+ // Does not need explicit base and thus can be used in non-DAC builds only.
+ FORCEINLINE void SetValueMaybeNull(PTR_TYPE addr)
+@@ -529,7 +543,6 @@ public:
+ LIMITED_METHOD_CONTRACT;
+ return SetValueMaybeNull((TADDR)this, addr);
+ }
+-#endif
+
+ // Static version of SetValueMaybeNull. It is meant to simplify access to arrays of pointers.
+ FORCEINLINE static void SetValueMaybeNullAtPtr(TADDR base, PTR_TYPE addr)
+@@ -538,6 +551,13 @@ public:
+ dac_cast<DPTR(PlainPointer<PTR_TYPE>)>(base)->SetValueMaybeNull(base, addr);
+ }
+
++ FORCEINLINE void SetValueVolatile(PTR_TYPE addr)
++ {
++ LIMITED_METHOD_CONTRACT;
++ VolatileStore((PTR_TYPE *)(&m_ptr), addr);
++ }
++#endif
++
+ private:
+ TADDR m_ptr;
+ };
+diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
+index cd40ad7..3a85a52 100644
+--- a/src/vm/ceeload.cpp
++++ b/src/vm/ceeload.cpp
+@@ -2892,7 +2892,7 @@ BOOL Module::IsPreV4Assembly()
+ }
+
+
+-ArrayDPTR(FixupPointer<PTR_MethodTable>) ModuleCtorInfo::GetGCStaticMTs(DWORD index)
++ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) ModuleCtorInfo::GetGCStaticMTs(DWORD index)
+ {
+ LIMITED_METHOD_CONTRACT;
+
+@@ -8282,6 +8282,8 @@ void Module::SaveTypeHandle(DataImage * image,
+ #endif // _DEBUG
+ }
+
++#ifndef DACCESS_COMPILE
++
+ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+ {
+ STANDARD_VM_CONTRACT;
+@@ -8303,7 +8305,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+ // items numElementsHot...i-1 are cold
+ for (i = 0; i < numElements; i++)
+ {
+- MethodTable *ppMTTemp = ppMT[i];
++ MethodTable *ppMTTemp = ppMT[i].GetValue();
+
+ // Count the number of boxed statics along the way
+ totalBoxedStatics += ppMTTemp->GetNumBoxedRegularStatics();
+@@ -8317,8 +8319,8 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+ if (hot)
+ {
+ // swap ppMT[i] and ppMT[numElementsHot] to maintain the loop invariant
+- ppMT[i] = ppMT[numElementsHot];
+- ppMT[numElementsHot] = ppMTTemp;
++ ppMT[i].SetValue(ppMT[numElementsHot].GetValue());
++ ppMT[numElementsHot].SetValue(ppMTTemp);
+
+ numElementsHot++;
+ }
+@@ -8343,11 +8345,11 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+
+ for (i = 0; i < numElementsHot; i++)
+ {
+- hashArray[i] = GenerateHash(ppMT[i], HOT);
++ hashArray[i] = GenerateHash(ppMT[i].GetValue(), HOT);
+ }
+ for (i = numElementsHot; i < numElements; i++)
+ {
+- hashArray[i] = GenerateHash(ppMT[i], COLD);
++ hashArray[i] = GenerateHash(ppMT[i].GetValue(), COLD);
+ }
+
+ // Sort the two arrays by hash values to create regions with the same hash values.
+@@ -8410,7 +8412,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+ // make cctorInfoCold point to the first cold element
+ cctorInfoCold = cctorInfoHot + numElementsHot;
+
+- ppHotGCStaticsMTs = (totalBoxedStatics != 0) ? new FixupPointer<PTR_MethodTable>[totalBoxedStatics] : NULL;
++ ppHotGCStaticsMTs = (totalBoxedStatics != 0) ? new RelativeFixupPointer<PTR_MethodTable>[totalBoxedStatics] : NULL;
+ numHotGCStaticsMTs = totalBoxedStatics;
+
+ DWORD iGCStaticMT = 0;
+@@ -8426,7 +8428,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+ ppColdGCStaticsMTs = ppHotGCStaticsMTs + numHotGCStaticsMTs;
+ }
+
+- MethodTable* pMT = ppMT[i];
++ MethodTable* pMT = ppMT[i].GetValue();
+ ClassCtorInfoEntry* pEntry = &cctorInfoHot[i];
+
+ WORD numBoxedStatics = pMT->GetNumBoxedRegularStatics();
+@@ -8456,7 +8458,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+ == (iGCStaticMT - pEntry->firstBoxedStaticMTIndex) * sizeof(MethodTable*));
+
+ TypeHandle th = pField->GetFieldTypeHandleThrowing();
+- ppHotGCStaticsMTs[iGCStaticMT++].SetValue(th.GetMethodTable());
++ ppHotGCStaticsMTs[iGCStaticMT++].SetValueMaybeNull(th.GetMethodTable());
+
+ numFoundBoxedStatics++;
+ }
+@@ -8479,7 +8481,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+
+ if (numElements > 0)
+ image->StoreStructure(ppMT,
+- sizeof(MethodTable *) * numElements,
++ sizeof(RelativePointer<MethodTable *>) * numElements,
+ DataImage::ITEM_MODULE_CCTOR_INFO_HOT);
+
+ if (numElements > numElementsHot)
+@@ -8496,7 +8498,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+ if ( numHotGCStaticsMTs )
+ {
+ // Save the mt templates
+- image->StoreStructure( ppHotGCStaticsMTs, numHotGCStaticsMTs * sizeof(MethodTable*),
++ image->StoreStructure( ppHotGCStaticsMTs, numHotGCStaticsMTs * sizeof(RelativeFixupPointer<MethodTable*>),
+ DataImage::ITEM_GC_STATIC_HANDLES_HOT);
+ }
+ else
+@@ -8507,7 +8509,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+ if ( numColdGCStaticsMTs )
+ {
+ // Save the hot mt templates
+- image->StoreStructure( ppColdGCStaticsMTs, numColdGCStaticsMTs * sizeof(MethodTable*),
++ image->StoreStructure( ppColdGCStaticsMTs, numColdGCStaticsMTs * sizeof(RelativeFixupPointer<MethodTable*>),
+ DataImage::ITEM_GC_STATIC_HANDLES_COLD);
+ }
+ else
+@@ -8516,6 +8518,7 @@ void ModuleCtorInfo::Save(DataImage *image, CorProfileData *profileData)
+ }
+ }
+
++#endif // !DACCESS_COMPILE
+
+ bool Module::AreAllClassesFullyLoaded()
+ {
+@@ -9658,7 +9661,7 @@ void ModuleCtorInfo::Fixup(DataImage *image)
+
+ for (DWORD i=0; i<numElements; i++)
+ {
+- image->FixupPointerField(ppMT, i * sizeof(ppMT[0]));
++ image->FixupRelativePointerField(ppMT, i * sizeof(ppMT[0]));
+ }
+ }
+ else
+@@ -14064,7 +14067,7 @@ ModuleCtorInfo::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+
+ // This class is contained so do not enumerate 'this'.
+ DacEnumMemoryRegion(dac_cast<TADDR>(ppMT), numElements *
+- sizeof(TADDR));
++ sizeof(RelativePointer<MethodTable *>));
+ DacEnumMemoryRegion(dac_cast<TADDR>(cctorInfoHot), numElementsHot *
+ sizeof(ClassCtorInfoEntry));
+ DacEnumMemoryRegion(dac_cast<TADDR>(cctorInfoCold),
+diff --git a/src/vm/ceeload.h b/src/vm/ceeload.h
+index fa61089..e82f279 100644
+--- a/src/vm/ceeload.h
++++ b/src/vm/ceeload.h
+@@ -620,7 +620,7 @@ struct ModuleCtorInfo
+ DWORD numElements;
+ DWORD numLastAllocated;
+ DWORD numElementsHot;
+- DPTR(PTR_MethodTable) ppMT; // size is numElements
++ DPTR(RelativePointer<PTR_MethodTable>) ppMT; // size is numElements
+ PTR_ClassCtorInfoEntry cctorInfoHot; // size is numElementsHot
+ PTR_ClassCtorInfoEntry cctorInfoCold; // size is numElements-numElementsHot
+
+@@ -629,8 +629,8 @@ struct ModuleCtorInfo
+ DWORD numHotHashes;
+ DWORD numColdHashes;
+
+- ArrayDPTR(FixupPointer<PTR_MethodTable>) ppHotGCStaticsMTs; // hot table
+- ArrayDPTR(FixupPointer<PTR_MethodTable>) ppColdGCStaticsMTs; // cold table
++ ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) ppHotGCStaticsMTs; // hot table
++ ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) ppColdGCStaticsMTs; // cold table
+
+ DWORD numHotGCStaticsMTs;
+ DWORD numColdGCStaticsMTs;
+@@ -666,7 +666,13 @@ struct ModuleCtorInfo
+ return hashVal;
+ };
+
+- ArrayDPTR(FixupPointer<PTR_MethodTable>) GetGCStaticMTs(DWORD index);
++ ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) GetGCStaticMTs(DWORD index);
++
++ PTR_MethodTable GetMT(DWORD i)
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return ppMT[i].GetValue(dac_cast<TADDR>(ppMT) + i * sizeof(RelativePointer<PTR_MethodTable>));
++ }
+
+ #ifdef FEATURE_PREJIT
+
+@@ -677,11 +683,11 @@ struct ModuleCtorInfo
+ class ClassCtorInfoEntryArraySort : public CQuickSort<DWORD>
+ {
+ private:
+- PTR_MethodTable *m_pBase1;
++ DPTR(RelativePointer<PTR_MethodTable>) m_pBase1;
+
+ public:
+ //Constructor
+- ClassCtorInfoEntryArraySort(DWORD *base, PTR_MethodTable *base1, int count)
++ ClassCtorInfoEntryArraySort(DWORD *base, DPTR(RelativePointer<PTR_MethodTable>) base1, int count)
+ : CQuickSort<DWORD>(base, count)
+ {
+ WRAPPER_NO_CONTRACT;
+@@ -702,6 +708,7 @@ struct ModuleCtorInfo
+ return 1;
+ }
+
++#ifndef DACCESS_COMPILE
+ // Swap is overwriten so that we can sort both the MethodTable pointer
+ // array and the ClassCtorInfoEntry array in parrallel.
+ FORCEINLINE void Swap(SSIZE_T iFirst, SSIZE_T iSecond)
+@@ -717,10 +724,11 @@ struct ModuleCtorInfo
+ m_pBase[iFirst] = m_pBase[iSecond];
+ m_pBase[iSecond] = sTemp;
+
+- sTemp1 = m_pBase1[iFirst];
+- m_pBase1[iFirst] = m_pBase1[iSecond];
+- m_pBase1[iSecond] = sTemp1;
++ sTemp1 = m_pBase1[iFirst].GetValueMaybeNull();
++ m_pBase1[iFirst].SetValueMaybeNull(m_pBase1[iSecond].GetValueMaybeNull());
++ m_pBase1[iSecond].SetValueMaybeNull(sTemp1);
+ }
++#endif // !DACCESS_COMPILE
+ };
+ #endif // FEATURE_PREJIT
+ };
+diff --git a/src/vm/dataimage.h b/src/vm/dataimage.h
+index 5d48a71..0167ec5 100644
+--- a/src/vm/dataimage.h
++++ b/src/vm/dataimage.h
+@@ -309,8 +309,58 @@ public:
+ void FixupPointerField(PVOID p, SSIZE_T offset);
+ void FixupRelativePointerField(PVOID p, SSIZE_T offset);
+
++ template<typename T, typename PT>
++ void FixupPlainOrRelativePointerField(const T *base, const RelativePointer<PT> T::* pPointerFieldMember)
++ {
++ STANDARD_VM_CONTRACT;
++ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
++ FixupRelativePointerField((PVOID)base, offset);
++ }
++
++ template<typename T, typename C, typename PT>
++ void FixupPlainOrRelativePointerField(const T *base, const C T::* pFirstPointerFieldMember, const RelativePointer<PT> C::* pSecondPointerFieldMember)
++ {
++ STANDARD_VM_CONTRACT;
++ const RelativePointer<PT> *ptr = &(base->*pFirstPointerFieldMember.*pSecondPointerFieldMember);
++ SSIZE_T offset = (SSIZE_T) ptr - (SSIZE_T) base;
++ FixupRelativePointerField((PVOID)base, offset);
++ }
++
++ template<typename T, typename PT>
++ void FixupPlainOrRelativePointerField(const T *base, const PlainPointer<PT> T::* pPointerFieldMember)
++ {
++ STANDARD_VM_CONTRACT;
++ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
++ FixupPointerField((PVOID)base, offset);
++ }
++
++ template<typename T, typename C, typename PT>
++ void FixupPlainOrRelativePointerField(const T *base, const C T::* pFirstPointerFieldMember, const PlainPointer<PT> C::* pSecondPointerFieldMember)
++ {
++ STANDARD_VM_CONTRACT;
++ const PlainPointer<PT> *ptr = &(base->*pFirstPointerFieldMember.*pSecondPointerFieldMember);
++ SSIZE_T offset = (SSIZE_T) ptr - (SSIZE_T) base;
++ FixupPointerField((PVOID)base, offset);
++ }
++
+ void FixupField(PVOID p, SSIZE_T offset, PVOID pTarget, SSIZE_T targetOffset = 0, ZapRelocationType type = IMAGE_REL_BASED_PTR);
+
++ template<typename T, typename PT>
++ void FixupPlainOrRelativeField(const T *base, const RelativePointer<PT> T::* pPointerFieldMember, PVOID pTarget, SSIZE_T targetOffset = 0)
++ {
++ STANDARD_VM_CONTRACT;
++ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
++ FixupField((PVOID)base, offset, pTarget, targetOffset, IMAGE_REL_BASED_RELPTR);
++ }
++
++ template<typename T, typename PT>
++ void FixupPlainOrRelativeField(const T *base, const PlainPointer<PT> T::* pPointerFieldMember, PVOID pTarget, SSIZE_T targetOffset = 0)
++ {
++ STANDARD_VM_CONTRACT;
++ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
++ FixupField((PVOID)base, offset, pTarget, targetOffset, IMAGE_REL_BASED_PTR);
++ }
++
+ void FixupFieldToNode(PVOID p, SSIZE_T offset, ZapNode * pTarget, SSIZE_T targetOffset = 0, ZapRelocationType type = IMAGE_REL_BASED_PTR);
+
+ void FixupFieldToNode(PVOID p, SSIZE_T offset, ZapStoredStructure * pTarget, SSIZE_T targetOffset = 0, ZapRelocationType type = IMAGE_REL_BASED_PTR)
+@@ -318,6 +368,34 @@ public:
+ return FixupFieldToNode(p, offset, (ZapNode *)pTarget, targetOffset, type);
+ }
+
++ template<typename T, typename PT>
++ void FixupPlainOrRelativeFieldToNode(const T *base, const RelativePointer<PT> T::* pPointerFieldMember, ZapNode * pTarget, SSIZE_T targetOffset = 0)
++ {
++ STANDARD_VM_CONTRACT;
++ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
++ FixupFieldToNode((PVOID)base, offset, pTarget, targetOffset, IMAGE_REL_BASED_RELPTR);
++ }
++
++ template<typename T, typename PT>
++ void FixupPlainOrRelativeFieldToNode(const T *base, const RelativePointer<PT> T::* pPointerFieldMember, ZapStoredStructure * pTarget, SSIZE_T targetOffset = 0)
++ {
++ return FixupPlainOrRelativeFieldToNode(base, pPointerFieldMember, (ZapNode *)pTarget, targetOffset);
++ }
++
++ template<typename T, typename PT>
++ void FixupPlainOrRelativeFieldToNode(const T *base, const PlainPointer<PT> T::* pPointerFieldMember, ZapNode * pTarget, SSIZE_T targetOffset = 0)
++ {
++ STANDARD_VM_CONTRACT;
++ SSIZE_T offset = (SSIZE_T) &(base->*pPointerFieldMember) - (SSIZE_T) base;
++ FixupFieldToNode((PVOID)base, offset, pTarget, targetOffset, IMAGE_REL_BASED_PTR);
++ }
++
++ template<typename T, typename PT>
++ void FixupPlainOrRelativeFieldToNode(const T *base, const PlainPointer<PT> T::* pPointerFieldMember, ZapStoredStructure * pTarget, SSIZE_T targetOffset = 0)
++ {
++ return FixupPlainOrRelativeFieldToNode(base, pPointerFieldMember, (ZapNode *)pTarget, targetOffset);
++ }
++
+ BOOL IsStored(const void *data)
+ { WRAPPER_NO_CONTRACT; return m_structures.LookupPtr(data) != NULL; }
+
+diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
+index 598759a..e219bb7 100644
+--- a/src/vm/methodtable.cpp
++++ b/src/vm/methodtable.cpp
+@@ -1014,7 +1014,7 @@ void MethodTable::SetInterfaceMap(WORD wNumInterfaces, InterfaceInfo_t* iMap)
+ m_wNumInterfaces = wNumInterfaces;
+
+ CONSISTENCY_CHECK(IS_ALIGNED(iMap, sizeof(void*)));
+- m_pInterfaceMap = iMap;
++ m_pInterfaceMap.SetValue(iMap);
+ }
+
+ //==========================================================================================
+@@ -1247,7 +1247,8 @@ void MethodTable::AddDynamicInterface(MethodTable *pItfMT)
+ *(((DWORD_PTR *)pNewItfMap) - 1) = NumDynAddedInterfaces + 1;
+
+ // Switch the old interface map with the new one.
+- VolatileStore(EnsureWritablePages(&m_pInterfaceMap), pNewItfMap);
++ EnsureWritablePages(&m_pInterfaceMap);
++ m_pInterfaceMap.SetValueVolatile(pNewItfMap);
+
+ // Log the fact that we leaked the interface vtable map.
+ #ifdef _DEBUG
+@@ -1288,7 +1289,7 @@ void MethodTable::SetupGenericsStaticsInfo(FieldDesc* pStaticFieldDescs)
+ pInfo->m_DynamicTypeID = (SIZE_T)-1;
+ }
+
+- pInfo->m_pFieldDescs = pStaticFieldDescs;
++ pInfo->m_pFieldDescs.SetValueMaybeNull(pStaticFieldDescs);
+ }
+
+ #endif // !DACCESS_COMPILE
+@@ -3148,7 +3149,7 @@ void MethodTable::AllocateRegularStaticBoxes()
+ OBJECTREF* pStaticSlots = (OBJECTREF*)(pStaticBase + pClassCtorInfoEntry->firstBoxedStaticOffset);
+ GCPROTECT_BEGININTERIOR(pStaticSlots);
+
+- ArrayDPTR(FixupPointer<PTR_MethodTable>) ppMTs = GetLoaderModule()->GetZapModuleCtorInfo()->
++ ArrayDPTR(RelativeFixupPointer<PTR_MethodTable>) ppMTs = GetLoaderModule()->GetZapModuleCtorInfo()->
+ GetGCStaticMTs(pClassCtorInfoEntry->firstBoxedStaticMTIndex);
+
+ DWORD numBoxedStatics = pClassCtorInfoEntry->numBoxedStatics;
+@@ -4123,7 +4124,7 @@ void ModuleCtorInfo::AddElement(MethodTable *pMethodTable)
+ {
+ _ASSERTE(numElements == numLastAllocated);
+
+- MethodTable ** ppOldMTEntries = ppMT;
++ RelativePointer<MethodTable *> *ppOldMTEntries = ppMT;
+
+ #ifdef _PREFAST_
+ #pragma warning(push)
+@@ -4134,12 +4135,19 @@ void ModuleCtorInfo::AddElement(MethodTable *pMethodTable)
+ #pragma warning(pop)
+ #endif // _PREFAST_
+
+- ppMT = new MethodTable* [numNewAllocated];
++ ppMT = new RelativePointer<MethodTable *> [numNewAllocated];
+
+ _ASSERTE(ppMT);
+
+- memcpy(ppMT, ppOldMTEntries, sizeof(MethodTable *) * numLastAllocated);
+- memset(ppMT + numLastAllocated, 0, sizeof(MethodTable *) * (numNewAllocated - numLastAllocated));
++ for (unsigned index = 0; index < numLastAllocated; ++index)
++ {
++ ppMT[index].SetValueMaybeNull(ppOldMTEntries[index].GetValueMaybeNull());
++ }
++
++ for (unsigned index = numLastAllocated; index < numNewAllocated; ++index)
++ {
++ ppMT[index].SetValueMaybeNull(NULL);
++ }
+
+ delete[] ppOldMTEntries;
+
+@@ -4151,7 +4159,7 @@ void ModuleCtorInfo::AddElement(MethodTable *pMethodTable)
+ // Note the use of two "parallel" arrays. We do this to keep the workingset smaller since we
+ // often search (in GetClassCtorInfoIfExists) for a methodtable pointer but never actually find it.
+
+- ppMT[numElements] = pMethodTable;
++ ppMT[numElements].SetValue(pMethodTable);
+ numElements++;
+ }
+
+@@ -4691,7 +4699,7 @@ void MethodTable::Fixup(DataImage *image)
+ if (IsCanonicalMethodTable())
+ {
+ // Pointer to EEClass
+- image->FixupPointerField(this, offsetof(MethodTable, m_pEEClass));
++ image->FixupPlainOrRelativePointerField(this, &MethodTable::m_pEEClass);
+ }
+ else
+ {
+@@ -4706,7 +4714,7 @@ void MethodTable::Fixup(DataImage *image)
+ if (image->CanHardBindToZapModule(pCanonMT->GetLoaderModule()))
+ {
+ // Pointer to canonical methodtable
+- image->FixupField(this, offsetof(MethodTable, m_pCanonMT), pCanonMT, UNION_METHODTABLE);
++ image->FixupPlainOrRelativeField(this, &MethodTable::m_pCanonMT, pCanonMT, UNION_METHODTABLE);
+ }
+ else
+ {
+@@ -4724,11 +4732,11 @@ void MethodTable::Fixup(DataImage *image)
+
+ if (pImport != NULL)
+ {
+- image->FixupFieldToNode(this, offsetof(MethodTable, m_pCanonMT), pImport, UNION_INDIRECTION);
++ image->FixupPlainOrRelativeFieldToNode(this, &MethodTable::m_pCanonMT, pImport, UNION_INDIRECTION);
+ }
+ }
+
+- image->FixupField(this, offsetof(MethodTable, m_pLoaderModule), pZapModule);
++ image->FixupField(this, offsetof(MethodTable, m_pLoaderModule), pZapModule, 0, IMAGE_REL_BASED_RELPTR);
+
+ #ifdef _DEBUG
+ image->FixupPointerField(this, offsetof(MethodTable, debug_m_szClassName));
+@@ -4797,7 +4805,7 @@ void MethodTable::Fixup(DataImage *image)
+
+ if (HasInterfaceMap())
+ {
+- image->FixupPointerField(this, offsetof(MethodTable, m_pMultipurposeSlot2));
++ image->FixupPlainOrRelativePointerField(this, &MethodTable::m_pInterfaceMap);
+
+ FixupExtraInterfaceInfo(image);
+ }
+@@ -5033,7 +5041,7 @@ void MethodTable::Fixup(DataImage *image)
+ {
+ GenericsStaticsInfo *pInfo = GetGenericsStaticsInfo();
+
+- image->FixupPointerField(this, (BYTE *)&pInfo->m_pFieldDescs - (BYTE *)this);
++ image->FixupRelativePointerField(this, (BYTE *)&pInfo->m_pFieldDescs - (BYTE *)this);
+ if (!isCanonical)
+ {
+ for (DWORD i = 0; i < GetClass()->GetNumStaticFields(); i++)
+@@ -5950,9 +5958,9 @@ void MethodTable::DoRestoreTypeKey()
+
+ // If we have an indirection cell then restore the m_pCanonMT and its module pointer
+ //
+- if (union_getLowBits(m_pCanonMT) == UNION_INDIRECTION)
++ if (union_getLowBits(m_pCanonMT.GetValue()) == UNION_INDIRECTION)
+ {
+- Module::RestoreMethodTablePointerRaw((MethodTable **)(union_getPointer(m_pCanonMT)),
++ Module::RestoreMethodTablePointerRaw((MethodTable **)(union_getPointer(m_pCanonMT.GetValue())),
+ GetLoaderModule(), CLASS_LOAD_UNRESTORED);
+ }
+
+@@ -7593,7 +7601,7 @@ BOOL MethodTable::SanityCheck()
+ // strings have component size2, all other non-arrays should have 0
+ _ASSERTE((GetComponentSize() <= 2) || IsArray());
+
+- if (m_pEEClass == NULL)
++ if (m_pEEClass.IsNull())
+ {
+ if (IsAsyncPinType())
+ {
+@@ -7733,7 +7741,7 @@ ClassCtorInfoEntry* MethodTable::GetClassCtorInfoIfExists()
+ if (HasBoxedRegularStatics())
+ {
+ ModuleCtorInfo *pModuleCtorInfo = GetZapModule()->GetZapModuleCtorInfo();
+- DPTR(PTR_MethodTable) ppMT = pModuleCtorInfo->ppMT;
++ DPTR(RelativePointer<PTR_MethodTable>) ppMT = pModuleCtorInfo->ppMT;
+ PTR_DWORD hotHashOffsets = pModuleCtorInfo->hotHashOffsets;
+ PTR_DWORD coldHashOffsets = pModuleCtorInfo->coldHashOffsets;
+
+@@ -7744,8 +7752,8 @@ ClassCtorInfoEntry* MethodTable::GetClassCtorInfoIfExists()
+
+ for (DWORD i = hotHashOffsets[hash]; i != hotHashOffsets[hash + 1]; i++)
+ {
+- _ASSERTE(ppMT[i]);
+- if (dac_cast<TADDR>(ppMT[i]) == dac_cast<TADDR>(this))
++ _ASSERTE(!ppMT[i].IsNull());
++ if (dac_cast<TADDR>(pModuleCtorInfo->GetMT(i)) == dac_cast<TADDR>(this))
+ {
+ return pModuleCtorInfo->cctorInfoHot + i;
+ }
+@@ -7759,8 +7767,8 @@ ClassCtorInfoEntry* MethodTable::GetClassCtorInfoIfExists()
+
+ for (DWORD i = coldHashOffsets[hash]; i != coldHashOffsets[hash + 1]; i++)
+ {
+- _ASSERTE(ppMT[i]);
+- if (dac_cast<TADDR>(ppMT[i]) == dac_cast<TADDR>(this))
++ _ASSERTE(!ppMT[i].IsNull());
++ if (dac_cast<TADDR>(pModuleCtorInfo->GetMT(i)) == dac_cast<TADDR>(this))
+ {
+ return pModuleCtorInfo->cctorInfoCold + (i - pModuleCtorInfo->numElementsHot);
+ }
+diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
+index 93a9ae2..f8b34ae 100644
+--- a/src/vm/methodtable.h
++++ b/src/vm/methodtable.h
+@@ -248,7 +248,7 @@ typedef DPTR(GenericsDictInfo) PTR_GenericsDictInfo;
+ struct GenericsStaticsInfo
+ {
+ // Pointer to field descs for statics
+- PTR_FieldDesc m_pFieldDescs;
++ RelativePointer<PTR_FieldDesc> m_pFieldDescs;
+
+ // Method table ID for statics
+ SIZE_T m_DynamicTypeID;
+@@ -2210,12 +2210,12 @@ public:
+ inline void SetClass(EEClass *pClass)
+ {
+ LIMITED_METHOD_CONTRACT;
+- m_pEEClass = pClass;
++ m_pEEClass.SetValue(pClass);
+ }
+
+ inline void SetCanonicalMethodTable(MethodTable * pMT)
+ {
+- m_pCanonMT = (TADDR)pMT | MethodTable::UNION_METHODTABLE;
++ m_pCanonMT.SetValue((TADDR)pMT | MethodTable::UNION_METHODTABLE);
+ }
+ #endif
+
+@@ -2640,7 +2640,7 @@ public:
+ {
+ WRAPPER_NO_CONTRACT;
+ _ASSERTE(HasGenericsStaticsInfo());
+- return GetGenericsStaticsInfo()->m_pFieldDescs;
++ return ReadPointerMaybeNull((GenericsStaticsInfo *)GetGenericsStaticsInfo(), &GenericsStaticsInfo::m_pFieldDescs);
+ }
+
+ BOOL HasCrossModuleGenericStaticsInfo()
+@@ -4059,7 +4059,7 @@ private:
+ // for enum_flag_HasIndirectParentMethodTable.
+ TADDR m_pParentMethodTable;
+
+- PTR_Module m_pLoaderModule; // LoaderModule. It is equal to the ZapModule in ngened images
++ RelativePointer<PTR_Module> m_pLoaderModule; // LoaderModule. It is equal to the ZapModule in ngened images
+
+ PTR_MethodTableWriteableData m_pWriteableData;
+
+@@ -4073,8 +4073,13 @@ private:
+ static const TADDR UNION_MASK = 3;
+
+ union {
+- EEClass * m_pEEClass;
+- TADDR m_pCanonMT;
++#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
++ RelativePointer<DPTR(EEClass)> m_pEEClass;
++ RelativePointer<TADDR> m_pCanonMT;
++#else
++ PlainPointer<DPTR(EEClass)> m_pEEClass;
++ PlainPointer<TADDR> m_pCanonMT;
++#endif
+ };
+
+ __forceinline static LowBits union_getLowBits(TADDR pCanonMT)
+@@ -4103,7 +4108,11 @@ private:
+ public:
+ union
+ {
+- InterfaceInfo_t * m_pInterfaceMap;
++#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
++ RelativePointer<PTR_InterfaceInfo> m_pInterfaceMap;
++#else
++ PlainPointer<PTR_InterfaceInfo> m_pInterfaceMap;
++#endif
+ TADDR m_pMultipurposeSlot2;
+ };
+
+diff --git a/src/vm/methodtable.inl b/src/vm/methodtable.inl
+index 9b72d24..eb1abb0 100644
+--- a/src/vm/methodtable.inl
++++ b/src/vm/methodtable.inl
+@@ -23,24 +23,26 @@ inline PTR_EEClass MethodTable::GetClass_NoLogging()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
++ TADDR addr = ReadPointer(this, &MethodTable::m_pCanonMT);
++
+ #ifdef _DEBUG
+- LowBits lowBits = union_getLowBits(m_pCanonMT);
++ LowBits lowBits = union_getLowBits(addr);
+ if (lowBits == UNION_EECLASS)
+ {
+- return PTR_EEClass(m_pCanonMT);
++ return PTR_EEClass(addr);
+ }
+ else if (lowBits == UNION_METHODTABLE)
+ {
+ // pointer to canonical MethodTable.
+- TADDR canonicalMethodTable = union_getPointer(m_pCanonMT);
+- return PTR_EEClass(PTR_MethodTable(canonicalMethodTable)->m_pCanonMT);
++ TADDR canonicalMethodTable = union_getPointer(addr);
++ return PTR_EEClass(ReadPointer((MethodTable *) PTR_MethodTable(canonicalMethodTable), &MethodTable::m_pCanonMT));
+ }
+ #ifdef FEATURE_PREJIT
+ else if (lowBits == UNION_INDIRECTION)
+ {
+ // pointer to indirection cell that points to canonical MethodTable
+- TADDR canonicalMethodTable = *PTR_TADDR(union_getPointer(m_pCanonMT));
+- return PTR_EEClass(PTR_MethodTable(canonicalMethodTable)->m_pCanonMT);
++ TADDR canonicalMethodTable = *PTR_TADDR(union_getPointer(addr));
++ return PTR_EEClass(ReadPointer((MethodTable *) PTR_MethodTable(canonicalMethodTable), &MethodTable::m_pCanonMT));
+ }
+ #endif
+ #ifdef DACCESS_COMPILE
+@@ -52,8 +54,6 @@ inline PTR_EEClass MethodTable::GetClass_NoLogging()
+
+ #else
+
+- TADDR addr = m_pCanonMT;
+-
+ if ((addr & 2) == 0)
+ {
+ // pointer to EEClass
+@@ -65,12 +65,12 @@ inline PTR_EEClass MethodTable::GetClass_NoLogging()
+ {
+ // pointer to indirection cell that points to canonical MethodTable
+ TADDR canonicalMethodTable = *PTR_TADDR(addr - 3);
+- return PTR_EEClass(PTR_MethodTable(canonicalMethodTable)->m_pCanonMT);
++ return PTR_EEClass(ReadPointer((MethodTable *) PTR_MethodTable(canonicalMethodTable), &MethodTable::m_pCanonMT));
+ }
+ #endif
+
+ // pointer to canonical MethodTable.
+- return PTR_EEClass(PTR_MethodTable(addr - 2)->m_pCanonMT);
++ return PTR_EEClass(ReadPointer((MethodTable *) PTR_MethodTable(addr - 2), &MethodTable::m_pCanonMT));
+ #endif
+ }
+
+@@ -113,25 +113,27 @@ inline BOOL MethodTable::IsClassPointerValid()
+ WRAPPER_NO_CONTRACT;
+ SUPPORTS_DAC;
+
+- LowBits lowBits = union_getLowBits(m_pCanonMT);
++ TADDR addr = ReadPointer(this, &MethodTable::m_pCanonMT);
++
++ LowBits lowBits = union_getLowBits(addr);
+ if (lowBits == UNION_EECLASS)
+ {
+- return (m_pEEClass != NULL);
++ return !m_pEEClass.IsNull();
+ }
+ else if (lowBits == UNION_METHODTABLE)
+ {
+ // pointer to canonical MethodTable.
+- TADDR canonicalMethodTable = union_getPointer(m_pCanonMT);
+- return (PTR_MethodTable(canonicalMethodTable)->m_pEEClass != NULL);
++ TADDR canonicalMethodTable = union_getPointer(addr);
++ return !PTR_MethodTable(canonicalMethodTable)->m_pEEClass.IsNull();
+ }
+ #ifdef FEATURE_PREJIT
+ else if (lowBits == UNION_INDIRECTION)
+ {
+ // pointer to indirection cell that points to canonical MethodTable
+- TADDR canonicalMethodTable = *PTR_TADDR(union_getPointer(m_pCanonMT));
++ TADDR canonicalMethodTable = *PTR_TADDR(union_getPointer(addr));
+ if (CORCOMPILE_IS_POINTER_TAGGED(canonicalMethodTable))
+ return FALSE;
+- return (PTR_MethodTable(canonicalMethodTable)->m_pEEClass != NULL);
++ return !PTR_MethodTable(canonicalMethodTable)->m_pEEClass.IsNull();
+ }
+ #endif
+ _ASSERTE(!"Malformed m_pEEClass in MethodTable");
+@@ -161,7 +163,7 @@ inline PTR_Module MethodTable::GetZapModule()
+ PTR_Module zapModule = NULL;
+ if (IsZapped())
+ {
+- zapModule = m_pLoaderModule;
++ zapModule = ReadPointer(this, &MethodTable::m_pLoaderModule);
+ }
+
+ return zapModule;
+@@ -171,7 +173,7 @@ inline PTR_Module MethodTable::GetZapModule()
+ inline PTR_Module MethodTable::GetLoaderModule()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+- return m_pLoaderModule;
++ return ReadPointer(this, &MethodTable::m_pLoaderModule);
+ }
+
+ inline PTR_LoaderAllocator MethodTable::GetLoaderAllocator()
+@@ -187,7 +189,7 @@ inline PTR_LoaderAllocator MethodTable::GetLoaderAllocator()
+ inline void MethodTable::SetLoaderModule(Module* pModule)
+ {
+ WRAPPER_NO_CONTRACT;
+- m_pLoaderModule = pModule;
++ m_pLoaderModule.SetValue(pModule);
+ }
+
+ inline void MethodTable::SetLoaderAllocator(LoaderAllocator* pAllocator)
+@@ -1145,8 +1147,10 @@ inline PTR_MethodTable MethodTable::GetCanonicalMethodTable()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
++ TADDR addr = ReadPointer(this, &MethodTable::m_pCanonMT);
++
+ #ifdef _DEBUG
+- LowBits lowBits = union_getLowBits(m_pCanonMT);
++ LowBits lowBits = union_getLowBits(addr);
+ if (lowBits == UNION_EECLASS)
+ {
+ return dac_cast<PTR_MethodTable>(this);
+@@ -1154,18 +1158,17 @@ inline PTR_MethodTable MethodTable::GetCanonicalMethodTable()
+ else if (lowBits == UNION_METHODTABLE)
+ {
+ // pointer to canonical MethodTable.
+- return PTR_MethodTable(union_getPointer(m_pCanonMT));
++ return PTR_MethodTable(union_getPointer(addr));
+ }
+ #ifdef FEATURE_PREJIT
+ else if (lowBits == UNION_INDIRECTION)
+ {
+- return PTR_MethodTable(*PTR_TADDR(union_getPointer(m_pCanonMT)));
++ return PTR_MethodTable(*PTR_TADDR(union_getPointer(addr)));
+ }
+ #endif
+ _ASSERTE(!"Malformed m_pCanonMT in MethodTable");
+ return NULL;
+ #else
+- TADDR addr = m_pCanonMT;
+
+ if ((addr & 2) == 0)
+ return dac_cast<PTR_MethodTable>(this);
+@@ -1185,11 +1188,12 @@ inline TADDR MethodTable::GetCanonicalMethodTableFixup()
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ #ifdef FEATURE_PREJIT
+- LowBits lowBits = union_getLowBits(m_pCanonMT);
++ TADDR addr = ReadPointer(this, &MethodTable::m_pCanonMT);
++ LowBits lowBits = union_getLowBits(addr);
+ if (lowBits == UNION_INDIRECTION)
+ {
+ // pointer to canonical MethodTable.
+- return *PTR_TADDR(union_getPointer(m_pCanonMT));
++ return *PTR_TADDR(union_getPointer(addr));
+ }
+ else
+ #endif
+@@ -1304,7 +1308,7 @@ inline BOOL MethodTable::IsCanonicalMethodTable()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+- return (union_getLowBits(m_pCanonMT) == UNION_EECLASS);
++ return (union_getLowBits(ReadPointer(this, &MethodTable::m_pCanonMT)) == UNION_EECLASS);
+ }
+
+ //==========================================================================================
+@@ -1338,7 +1342,7 @@ inline PTR_InterfaceInfo MethodTable::GetInterfaceMap()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+- return dac_cast<PTR_InterfaceInfo>(m_pMultipurposeSlot2); // m_pInterfaceMap
++ return ReadPointer(this, &MethodTable::m_pInterfaceMap);
+ }
+
+ //==========================================================================================
+--
+2.7.4
+
diff --git a/packaging/0020-Add-FixupPlainOrRelativePointerField-for-MethodDesc-.patch b/packaging/0020-Add-FixupPlainOrRelativePointerField-for-MethodDesc-.patch
new file mode 100644
index 0000000000..91b0639ea9
--- /dev/null
+++ b/packaging/0020-Add-FixupPlainOrRelativePointerField-for-MethodDesc-.patch
@@ -0,0 +1,49 @@
+From 0d08b9471178521765ea41dff5f95ea49f5e10c5 Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Wed, 21 Jun 2017 20:53:41 +0300
+Subject: [PATCH 20/32] Add FixupPlainOrRelativePointerField for MethodDesc
+ fields
+
+---
+ src/vm/method.cpp | 18 ++----------------
+ 1 file changed, 2 insertions(+), 16 deletions(-)
+
+diff --git a/src/vm/method.cpp b/src/vm/method.cpp
+index 1407264..6bd49fb 100644
+--- a/src/vm/method.cpp
++++ b/src/vm/method.cpp
+@@ -3651,14 +3651,7 @@ MethodDesc::Fixup(
+ }
+ }
+
+- if (decltype(InstantiatedMethodDesc::m_pPerInstInfo)::isRelative)
+- {
+- image->FixupRelativePointerField(this, offsetof(InstantiatedMethodDesc, m_pPerInstInfo));
+- }
+- else
+- {
+- image->FixupPointerField(this, offsetof(InstantiatedMethodDesc, m_pPerInstInfo));
+- }
++ image->FixupPlainOrRelativePointerField((InstantiatedMethodDesc*) this, &InstantiatedMethodDesc::m_pPerInstInfo);
+
+ // Generic methods are dealt with specially to avoid encoding the formal method type parameters
+ if (IsTypicalMethodDefinition())
+@@ -3737,14 +3730,7 @@ MethodDesc::Fixup(
+
+ NDirectMethodDesc *pNMD = (NDirectMethodDesc *)this;
+
+- if (decltype(NDirectMethodDesc::ndirect.m_pWriteableData)::isRelative)
+- {
+- image->FixupRelativePointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pWriteableData));
+- }
+- else
+- {
+- image->FixupPointerField(this, offsetof(NDirectMethodDesc, ndirect.m_pWriteableData));
+- }
++ image->FixupPlainOrRelativePointerField(pNMD, &NDirectMethodDesc::ndirect, &decltype(NDirectMethodDesc::ndirect)::m_pWriteableData);
+
+ NDirectWriteableData *pWriteableData = pNMD->GetWriteableData();
+ NDirectImportThunkGlue *pImportThunkGlue = pNMD->GetNDirectImportThunkGlue();
+--
+2.7.4
+
diff --git a/packaging/0021-Additional-fixes-for-RelativePointer-FixupPointer-Re.patch b/packaging/0021-Additional-fixes-for-RelativePointer-FixupPointer-Re.patch
new file mode 100644
index 0000000000..d3599f03e7
--- /dev/null
+++ b/packaging/0021-Additional-fixes-for-RelativePointer-FixupPointer-Re.patch
@@ -0,0 +1,332 @@
+From 7f7ddcef2af73ce4993120778d9333da9620d81c Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Thu, 22 Jun 2017 19:28:32 +0300
+Subject: [PATCH 21/32] Additional fixes for RelativePointer, FixupPointer,
+ RelativeFixupPointer, PlainPointer
+
+---
+ src/inc/fixuppointer.h | 155 ++++++++++++++++++++++++++++++++++++++++++-------
+ src/vm/ceeload.cpp | 12 ++--
+ src/vm/debughelp.cpp | 2 +-
+ 3 files changed, 142 insertions(+), 27 deletions(-)
+
+diff --git a/src/inc/fixuppointer.h b/src/inc/fixuppointer.h
+index 20eb9d8..a711418 100644
+--- a/src/inc/fixuppointer.h
++++ b/src/inc/fixuppointer.h
+@@ -214,15 +214,38 @@ public:
+ return dac_cast<PTR_TYPE>(addr);
+ }
+
++ // Returns value of the encoded pointer.
++ FORCEINLINE PTR_TYPE GetValueMaybeNull() const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return GetValue();
++ }
++
++#ifndef DACCESS_COMPILE
+ // Returns the pointer to the indirection cell.
+ PTR_TYPE * GetValuePtr() const
+ {
+- LIMITED_METHOD_DAC_CONTRACT;
++ LIMITED_METHOD_CONTRACT;
+ TADDR addr = m_addr;
+ if ((addr & FIXUP_POINTER_INDIRECTION) != 0)
+- return dac_cast<DPTR(PTR_TYPE)>(addr - FIXUP_POINTER_INDIRECTION);
++ return (PTR_TYPE *)(addr - FIXUP_POINTER_INDIRECTION);
+ return (PTR_TYPE *)&m_addr;
+ }
++#endif // !DACCESS_COMPILE
++
++ // Static version of GetValue. It is meant to simplify access to arrays of pointers.
++ FORCEINLINE static PTR_TYPE GetValueAtPtr(TADDR base)
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return dac_cast<DPTR(FixupPointer<PTR_TYPE>)>(base)->GetValue();
++ }
++
++ // Static version of GetValueMaybeNull. It is meant to simplify access to arrays of pointers.
++ FORCEINLINE static PTR_TYPE GetValueMaybeNullAtPtr(TADDR base)
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return dac_cast<DPTR(FixupPointer<PTR_TYPE>)>(base)->GetValueMaybeNull();
++ }
+
+ // Returns value of the encoded pointer.
+ // Allows the value to be tagged.
+@@ -235,12 +258,20 @@ public:
+ return addr;
+ }
+
++#ifndef DACCESS_COMPILE
+ void SetValue(PTR_TYPE addr)
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_addr = dac_cast<TADDR>(addr);
+ }
+
++ void SetValueMaybeNull(PTR_TYPE addr)
++ {
++ LIMITED_METHOD_CONTRACT;
++ SetValue(addr);
++ }
++#endif // !DACCESS_COMPILE
++
+ private:
+ TADDR m_addr;
+ };
+@@ -270,9 +301,6 @@ public:
+ RelativeFixupPointer<PTR_TYPE>& operator = (const RelativeFixupPointer<PTR_TYPE> &) =delete;
+ RelativeFixupPointer<PTR_TYPE>& operator = (RelativeFixupPointer<PTR_TYPE> &&) =delete;
+
+- // Default constructor
+- RelativeFixupPointer<PTR_TYPE>() {}
+-
+ // Returns whether the encoded pointer is NULL.
+ BOOL IsNull() const
+ {
+@@ -292,11 +320,12 @@ public:
+ }
+
+ #ifndef DACCESS_COMPILE
++ // Returns whether the indirection cell contain fixup that has not been converted to real pointer yet.
++ // Does not need explicit base and thus can be used in non-DAC builds only.
+ FORCEINLINE BOOL IsTagged() const
+ {
+ LIMITED_METHOD_CONTRACT;
+- TADDR base = (TADDR) this;
+- return IsTagged(base);
++ return IsTagged((TADDR)this);
+ }
+ #endif // !DACCESS_COMPILE
+
+@@ -391,21 +420,14 @@ public:
+ }
+ #endif
+
+- // Returns the pointer to the indirection cell.
+- PTR_TYPE * GetValuePtr(TADDR base) const
+- {
+- LIMITED_METHOD_CONTRACT;
+- TADDR addr = base + m_delta;
+- _ASSERTE((addr & FIXUP_POINTER_INDIRECTION) != 0);
+- return dac_cast<DPTR(PTR_TYPE)>(addr - FIXUP_POINTER_INDIRECTION);
+- }
+-
+ #ifndef DACCESS_COMPILE
++ // Returns the pointer to the indirection cell.
+ PTR_TYPE * GetValuePtr() const
+ {
+ LIMITED_METHOD_CONTRACT;
+- TADDR base = (TADDR) this;
+- return GetValuePtr(base);
++ TADDR addr = ((TADDR)this) + m_delta;
++ _ASSERTE((addr & FIXUP_POINTER_INDIRECTION) != 0);
++ return (PTR_TYPE *)(addr - FIXUP_POINTER_INDIRECTION);
+ }
+ #endif // !DACCESS_COMPILE
+
+@@ -421,6 +443,48 @@ public:
+ return addr;
+ }
+
++ // Returns whether pointer is indirect. Assumes that the value is not NULL.
++ bool IsIndirectPtr(TADDR base) const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ PRECONDITION(!IsNull());
++
++ TADDR addr = base + m_delta;
++
++ return (addr & FIXUP_POINTER_INDIRECTION) != 0;
++ }
++
++#ifndef DACCESS_COMPILE
++ // Returns whether pointer is indirect. Assumes that the value is not NULL.
++ // Does not need explicit base and thus can be used in non-DAC builds only.
++ bool IsIndirectPtr() const
++ {
++ LIMITED_METHOD_CONTRACT;
++ return IsIndirectPtr((TADDR)this);
++ }
++#endif
++
++ // Returns whether pointer is indirect. The value can be NULL.
++ bool IsIndirectPtrMaybeNull(TADDR base) const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ if (m_delta == 0)
++ return false;
++
++ return IsIndirectPtr(base);
++ }
++
++#ifndef DACCESS_COMPILE
++ // Returns whether pointer is indirect. The value can be NULL.
++ // Does not need explicit base and thus can be used in non-DAC builds only.
++ bool IsIndirectPtrMaybeNull() const
++ {
++ LIMITED_METHOD_CONTRACT;
++ return IsIndirectPtrMaybeNull((TADDR)this);
++ }
++#endif
++
+ private:
+ #ifndef DACCESS_COMPILE
+ Volatile<TADDR> m_delta;
+@@ -453,10 +517,20 @@ public:
+ }
+
+ // Returns whether the indirection cell contain fixup that has not been converted to real pointer yet.
++ BOOL IsTagged(TADDR base) const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return IsTagged();
++ }
++
++ // Returns whether the indirection cell contain fixup that has not been converted to real pointer yet.
+ BOOL IsTagged() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+- return m_ptr & 1;
++ TADDR addr = m_ptr;
++ if ((addr & FIXUP_POINTER_INDIRECTION) != 0)
++ return (*PTR_TADDR(addr - FIXUP_POINTER_INDIRECTION) & 1) != 0;
++ return FALSE;
+ }
+
+ // Returns value of the encoded pointer.
+@@ -466,12 +540,17 @@ public:
+ return dac_cast<PTR_TYPE>(m_ptr);
+ }
+
++#ifndef DACCESS_COMPILE
+ // Returns the pointer to the indirection cell.
+ PTR_TYPE * GetValuePtr() const
+ {
+- LIMITED_METHOD_DAC_CONTRACT;
++ LIMITED_METHOD_CONTRACT;
++ TADDR addr = m_ptr;
++ if ((addr & FIXUP_POINTER_INDIRECTION) != 0)
++ return (PTR_TYPE *)(addr - FIXUP_POINTER_INDIRECTION);
+ return (PTR_TYPE *)&m_ptr;
+ }
++#endif // !DACCESS_COMPILE
+
+ // Returns value of the encoded pointer. Assumes that the pointer is not NULL.
+ PTR_TYPE GetValue(TADDR base) const
+@@ -508,6 +587,42 @@ public:
+ return dac_cast<DPTR(PlainPointer<PTR_TYPE>)>(base)->GetValueMaybeNull(base);
+ }
+
++ // Returns whether pointer is indirect. Assumes that the value is not NULL.
++ bool IsIndirectPtr(TADDR base) const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return (m_ptr & FIXUP_POINTER_INDIRECTION) != 0;
++ }
++
++#ifndef DACCESS_COMPILE
++ // Returns whether pointer is indirect. Assumes that the value is not NULL.
++ // Does not need explicit base and thus can be used in non-DAC builds only.
++ bool IsIndirectPtr() const
++ {
++ LIMITED_METHOD_CONTRACT;
++ return IsIndirectPtr((TADDR)this);
++ }
++#endif
++
++ // Returns whether pointer is indirect. The value can be NULL.
++ bool IsIndirectPtrMaybeNull(TADDR base) const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return IsIndirectPtr(base);
++ }
++
++#ifndef DACCESS_COMPILE
++ // Returns whether pointer is indirect. The value can be NULL.
++ // Does not need explicit base and thus can be used in non-DAC builds only.
++ bool IsIndirectPtrMaybeNull() const
++ {
++ LIMITED_METHOD_CONTRACT;
++ return IsIndirectPtrMaybeNull((TADDR)this);
++ }
++#endif
++
+ #ifndef DACCESS_COMPILE
+ void SetValue(PTR_TYPE addr)
+ {
+diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
+index 3a85a52..43e2c14 100644
+--- a/src/vm/ceeload.cpp
++++ b/src/vm/ceeload.cpp
+@@ -10295,11 +10295,11 @@ void Module::RestoreMethodTablePointer(RelativeFixupPointer<PTR_MethodTable> * p
+
+ if (ppMT->IsTagged((TADDR)ppMT))
+ {
+- RestoreMethodTablePointerRaw(ppMT->GetValuePtr((TADDR)ppMT), pContainingModule, level);
++ RestoreMethodTablePointerRaw(ppMT->GetValuePtr(), pContainingModule, level);
+ }
+ else
+ {
+- ClassLoader::EnsureLoaded(ppMT->GetValue((TADDR)ppMT), level);
++ ClassLoader::EnsureLoaded(ppMT->GetValue(), level);
+ }
+ }
+
+@@ -10434,7 +10434,7 @@ PTR_Module Module::RestoreModulePointerIfLoaded(DPTR(RelativeFixupPointer<PTR_Mo
+ return ppModule->GetValue(dac_cast<TADDR>(ppModule));
+
+ #ifndef DACCESS_COMPILE
+- PTR_Module * ppValue = ppModule->GetValuePtr(dac_cast<TADDR>(ppModule));
++ PTR_Module * ppValue = ppModule->GetValuePtr();
+
+ // Ensure that the compiler won't fetch the value twice
+ TADDR fixup = VolatileLoadWithoutBarrier((TADDR *)ppValue);
+@@ -10487,7 +10487,7 @@ void Module::RestoreModulePointer(RelativeFixupPointer<PTR_Module> * ppModule, M
+ if (!ppModule->IsTagged((TADDR)ppModule))
+ return;
+
+- PTR_Module * ppValue = ppModule->GetValuePtr((TADDR)ppModule);
++ PTR_Module * ppValue = ppModule->GetValuePtr();
+
+ // Ensure that the compiler won't fetch the value twice
+ TADDR fixup = VolatileLoadWithoutBarrier((TADDR *)ppValue);
+@@ -10641,7 +10641,7 @@ void Module::RestoreTypeHandlePointer(RelativeFixupPointer<TypeHandle> * pHandle
+
+ if (pHandle->IsTagged((TADDR)pHandle))
+ {
+- RestoreTypeHandlePointerRaw(pHandle->GetValuePtr((TADDR)pHandle), pContainingModule, level);
++ RestoreTypeHandlePointerRaw(pHandle->GetValuePtr(), pContainingModule, level);
+ }
+ else
+ {
+@@ -10743,7 +10743,7 @@ void Module::RestoreMethodDescPointer(RelativeFixupPointer<PTR_MethodDesc> * ppM
+
+ if (ppMD->IsTagged((TADDR)ppMD))
+ {
+- RestoreMethodDescPointerRaw(ppMD->GetValuePtr((TADDR)ppMD), pContainingModule, level);
++ RestoreMethodDescPointerRaw(ppMD->GetValuePtr(), pContainingModule, level);
+ }
+ else
+ {
+diff --git a/src/vm/debughelp.cpp b/src/vm/debughelp.cpp
+index 3e66f14..0769feb 100644
+--- a/src/vm/debughelp.cpp
++++ b/src/vm/debughelp.cpp
+@@ -318,7 +318,7 @@ MethodDesc* AsMethodDesc(size_t addr)
+ // extra indirection if the address is tagged (the low bit is set).
+ // That could AV if we don't check it first.
+
+- if (!ppMT->IsTagged((TADDR)ppMT) || isMemoryReadable((TADDR)ppMT->GetValuePtr((TADDR)ppMT), sizeof(MethodTable*)))
++ if (!ppMT->IsTagged((TADDR)ppMT) || isMemoryReadable((TADDR)ppMT->GetValuePtr(), sizeof(MethodTable*)))
+ {
+ if (AsMethodTable((size_t)RelativeFixupPointer<PTR_MethodTable>::GetValueAtPtr((TADDR)ppMT)) != 0)
+ {
+--
+2.7.4
+
diff --git a/packaging/0022-Remove-relocations-for-InterfaceInfo_t-m_pMethodTabl.patch b/packaging/0022-Remove-relocations-for-InterfaceInfo_t-m_pMethodTabl.patch
new file mode 100644
index 0000000000..3f39a249f0
--- /dev/null
+++ b/packaging/0022-Remove-relocations-for-InterfaceInfo_t-m_pMethodTabl.patch
@@ -0,0 +1,138 @@
+From 53ec75f90fc77f7d03309a6aaeae182d875148d4 Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Thu, 22 Jun 2017 20:24:54 +0300
+Subject: [PATCH 22/32] Remove relocations for InterfaceInfo_t::m_pMethodTable
+ for Linux ARM
+
+---
+ src/vm/array.cpp | 7 +++++--
+ src/vm/methodtable.cpp | 33 +++++++++++++++++++++++++++------
+ src/vm/methodtable.h | 23 +++++++++++++++++++----
+ 3 files changed, 51 insertions(+), 12 deletions(-)
+
+diff --git a/src/vm/array.cpp b/src/vm/array.cpp
+index d679294..3f5a8aa 100644
+--- a/src/vm/array.cpp
++++ b/src/vm/array.cpp
+@@ -509,8 +509,11 @@ MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementTy
+ #endif // !defined(_WIN64) && (DATA_ALIGNMENT > 4)
+ pMT->SetBaseSize(baseSize);
+ // Because of array method table persisting, we need to copy the map
+- memcpy(pMTHead + imapOffset, pParentClass->GetInterfaceMap(),
+- pParentClass->GetNumInterfaces() * sizeof(InterfaceInfo_t));
++ for (unsigned index = 0; index < pParentClass->GetNumInterfaces(); ++index)
++ {
++ InterfaceInfo_t *pIntInfo = (InterfaceInfo_t *) (pMTHead + imapOffset + index * sizeof(InterfaceInfo_t));
++ pIntInfo->SetMethodTable((pParentClass->GetInterfaceMap() + index)->GetMethodTable());
++ }
+ pMT->SetInterfaceMap(pParentClass->GetNumInterfaces(), (InterfaceInfo_t *)(pMTHead + imapOffset));
+
+ // Copy down flags for these interfaces as well. This is simplified a bit since we know that System.Array
+diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
+index e219bb7..b849746 100644
+--- a/src/vm/methodtable.cpp
++++ b/src/vm/methodtable.cpp
+@@ -1237,7 +1237,12 @@ void MethodTable::AddDynamicInterface(MethodTable *pItfMT)
+ if (TotalNumInterfaces > 0) {
+ InterfaceInfo_t *pInterfaceMap = GetInterfaceMap();
+ PREFIX_ASSUME(pInterfaceMap != NULL);
+- memcpy(pNewItfMap, pInterfaceMap, TotalNumInterfaces * sizeof(InterfaceInfo_t));
++
++ for (unsigned index = 0; index < TotalNumInterfaces; ++index)
++ {
++ InterfaceInfo_t *pIntInfo = (InterfaceInfo_t *) (pNewItfMap + index);
++ pIntInfo->SetMethodTable((pInterfaceMap + index)->GetMethodTable());
++ }
+ }
+
+ // Add the new interface at the end of the map.
+@@ -4285,16 +4290,32 @@ void MethodTable::Save(DataImage *image, DWORD profilingFlags)
+ // Dynamic interface maps have an additional DWORD_PTR preceding the InterfaceInfo_t array
+ if (HasDynamicInterfaceMap())
+ {
+- ZapStoredStructure * pInterfaceMapNode = image->StoreInternedStructure(((DWORD_PTR *)GetInterfaceMap()) - 1,
+- GetInterfaceMapSize(),
+- DataImage::ITEM_INTERFACE_MAP);
+-
++ ZapStoredStructure * pInterfaceMapNode;
++ if (decltype(InterfaceInfo_t::m_pMethodTable)::isRelative)
++ {
++ pInterfaceMapNode = image->StoreStructure(((DWORD_PTR *)GetInterfaceMap()) - 1,
++ GetInterfaceMapSize(),
++ DataImage::ITEM_INTERFACE_MAP);
++ }
++ else
++ {
++ pInterfaceMapNode = image->StoreInternedStructure(((DWORD_PTR *)GetInterfaceMap()) - 1,
++ GetInterfaceMapSize(),
++ DataImage::ITEM_INTERFACE_MAP);
++ }
+ image->BindPointer(GetInterfaceMap(), pInterfaceMapNode, sizeof(DWORD_PTR));
+ }
+ else
+ #endif // FEATURE_COMINTEROP
+ {
+- image->StoreInternedStructure(GetInterfaceMap(), GetInterfaceMapSize(), DataImage::ITEM_INTERFACE_MAP);
++ if (decltype(InterfaceInfo_t::m_pMethodTable)::isRelative)
++ {
++ image->StoreStructure(GetInterfaceMap(), GetInterfaceMapSize(), DataImage::ITEM_INTERFACE_MAP);
++ }
++ else
++ {
++ image->StoreInternedStructure(GetInterfaceMap(), GetInterfaceMapSize(), DataImage::ITEM_INTERFACE_MAP);
++ }
+ }
+
+ SaveExtraInterfaceInfo(image);
+diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
+index f8b34ae..ef0cb44 100644
+--- a/src/vm/methodtable.h
++++ b/src/vm/methodtable.h
+@@ -111,25 +111,40 @@ struct InterfaceInfo_t
+ friend class NativeImageDumper;
+ #endif
+
+- FixupPointer<PTR_MethodTable> m_pMethodTable; // Method table of the interface
++ // Method table of the interface
++#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
++ RelativeFixupPointer<PTR_MethodTable> m_pMethodTable;
++#else
++ FixupPointer<PTR_MethodTable> m_pMethodTable;
++#endif
+
+ public:
+ FORCEINLINE PTR_MethodTable GetMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+- return m_pMethodTable.GetValue();
++ return ReadPointerMaybeNull(this, &InterfaceInfo_t::m_pMethodTable);
+ }
+
+ #ifndef DACCESS_COMPILE
+ void SetMethodTable(MethodTable * pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+- m_pMethodTable.SetValue(pMT);
++ m_pMethodTable.SetValueMaybeNull(pMT);
+ }
+
+ // Get approximate method table. This is used by the type loader before the type is fully loaded.
+ PTR_MethodTable GetApproxMethodTable(Module * pContainingModule);
+-#endif
++#endif // !DACCESS_COMPILE
++
++#ifndef DACCESS_COMPILE
++ InterfaceInfo_t(InterfaceInfo_t &right)
++ {
++ m_pMethodTable.SetValueMaybeNull(right.m_pMethodTable.GetValueMaybeNull());
++ }
++#else // !DACCESS_COMPILE
++private:
++ InterfaceInfo_t(InterfaceInfo_t &right);
++#endif // !DACCESS_COMPILE
+ }; // struct InterfaceInfo_t
+
+ typedef DPTR(InterfaceInfo_t) PTR_InterfaceInfo;
+--
+2.7.4
+
diff --git a/packaging/0023-Remove-relocations-for-MethodTable-m_pWriteableData-.patch b/packaging/0023-Remove-relocations-for-MethodTable-m_pWriteableData-.patch
new file mode 100644
index 0000000000..f8b8321f3f
--- /dev/null
+++ b/packaging/0023-Remove-relocations-for-MethodTable-m_pWriteableData-.patch
@@ -0,0 +1,207 @@
+From 74f3dd15eada76e26fd74c2ec8db5076a7d4b86f Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Thu, 22 Jun 2017 20:39:03 +0300
+Subject: [PATCH 23/32] Remove relocations for MethodTable::m_pWriteableData
+ for Linux ARM
+
+---
+ src/debug/daccess/nidump.cpp | 2 +-
+ src/vm/methodtable.cpp | 26 ++++++++++++++------------
+ src/vm/methodtable.h | 23 +++++++++++++++--------
+ src/vm/methodtable.inl | 2 +-
+ src/vm/methodtablebuilder.cpp | 2 +-
+ 5 files changed, 32 insertions(+), 23 deletions(-)
+
+diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
+index 673aa39..04d610e 100644
+--- a/src/debug/daccess/nidump.cpp
++++ b/src/debug/daccess/nidump.cpp
+@@ -7045,7 +7045,7 @@ NativeImageDumper::DumpMethodTable( PTR_MethodTable mt, const char * name,
+ DPtrToPreferredAddr(mt->GetLoaderModule()),
+ MethodTable, METHODTABLES );
+
+- PTR_MethodTableWriteableData wd = mt->m_pWriteableData;
++ PTR_MethodTableWriteableData wd = ReadPointer((MethodTable *)mt, &MethodTable::m_pWriteableData);
+ _ASSERTE(wd != NULL);
+ DisplayStartStructureWithOffset( m_pWriteableData, DPtrToPreferredAddr(wd),
+ sizeof(*wd), MethodTable, METHODTABLES );
+diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
+index b849746..e93f63d 100644
+--- a/src/vm/methodtable.cpp
++++ b/src/vm/methodtable.cpp
+@@ -4832,8 +4832,8 @@ void MethodTable::Fixup(DataImage *image)
+ }
+
+ _ASSERTE(GetWriteableData());
+- image->FixupPointerField(this, offsetof(MethodTable, m_pWriteableData));
+- m_pWriteableData->Fixup(image, this, needsRestore);
++ image->FixupPlainOrRelativePointerField(this, &MethodTable::m_pWriteableData);
++ m_pWriteableData.GetValue()->Fixup(image, this, needsRestore);
+
+ #ifdef FEATURE_COMINTEROP
+ if (HasGuidInfo())
+@@ -5074,12 +5074,12 @@ void MethodTable::Fixup(DataImage *image)
+
+ if (NeedsCrossModuleGenericsStaticsInfo())
+ {
+- MethodTableWriteableData * pNewWriteableData = (MethodTableWriteableData *)image->GetImagePointer(m_pWriteableData);
++ MethodTableWriteableData * pNewWriteableData = (MethodTableWriteableData *)image->GetImagePointer(m_pWriteableData.GetValue());
+ CrossModuleGenericsStaticsInfo * pNewCrossModuleGenericsStaticsInfo = pNewWriteableData->GetCrossModuleGenericsStaticsInfo();
+
+ pNewCrossModuleGenericsStaticsInfo->m_DynamicTypeID = pInfo->m_DynamicTypeID;
+
+- image->ZeroPointerField(m_pWriteableData, sizeof(MethodTableWriteableData) + offsetof(CrossModuleGenericsStaticsInfo, m_pModuleForStatics));
++ image->ZeroPointerField(m_pWriteableData.GetValue(), sizeof(MethodTableWriteableData) + offsetof(CrossModuleGenericsStaticsInfo, m_pModuleForStatics));
+
+ pNewMT->SetFlag(enum_flag_StaticsMask_IfGenericsThenCrossModule);
+ }
+@@ -9194,9 +9194,10 @@ MethodTable::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ DacEnumMemoryRegion(dac_cast<TADDR>(it.GetIndirectionSlot()), it.GetSize());
+ }
+
+- if (m_pWriteableData.IsValid())
++ PTR_MethodTableWriteableData pWriteableData = ReadPointer(this, &MethodTable::m_pWriteableData);
++ if (pWriteableData.IsValid())
+ {
+- m_pWriteableData.EnumMem();
++ pWriteableData.EnumMem();
+ }
+
+ if (flags != CLRDATA_ENUM_MEM_MINI && flags != CLRDATA_ENUM_MEM_TRIAGE)
+@@ -9659,8 +9660,6 @@ bool MethodTable::ClassRequiresUnmanagedCodeCheck()
+ return false;
+ }
+
+-#endif // !DACCESS_COMPILE
+-
+
+
+ BOOL MethodTable::Validate()
+@@ -9670,13 +9669,14 @@ BOOL MethodTable::Validate()
+ ASSERT_AND_CHECK(SanityCheck());
+
+ #ifdef _DEBUG
+- if (m_pWriteableData == NULL)
++ if (m_pWriteableData.IsNull())
+ {
+ _ASSERTE(IsAsyncPinType());
+ return TRUE;
+ }
+
+- DWORD dwLastVerifiedGCCnt = m_pWriteableData->m_dwLastVerifedGCCnt;
++ MethodTableWriteableData *pWriteableData = m_pWriteableData.GetValue();
++ DWORD dwLastVerifiedGCCnt = pWriteableData->m_dwLastVerifedGCCnt;
+ // Here we used to assert that (dwLastVerifiedGCCnt <= GCHeapUtilities::GetGCHeap()->GetGcCount()) but
+ // this is no longer true because with background gc. Since the purpose of having
+ // m_dwLastVerifedGCCnt is just to only verify the same method table once for each GC
+@@ -9707,13 +9707,15 @@ BOOL MethodTable::Validate()
+ #ifdef _DEBUG
+ // It is not a fatal error to fail the update the counter. We will run slower and retry next time,
+ // but the system will function properly.
+- if (EnsureWritablePagesNoThrow(m_pWriteableData, sizeof(MethodTableWriteableData)))
+- m_pWriteableData->m_dwLastVerifedGCCnt = GCHeapUtilities::GetGCHeap()->GetGcCount();
++ if (EnsureWritablePagesNoThrow(pWriteableData, sizeof(MethodTableWriteableData)))
++ pWriteableData->m_dwLastVerifedGCCnt = GCHeapUtilities::GetGCHeap()->GetGcCount();
+ #endif //_DEBUG
+
+ return TRUE;
+ }
+
++#endif // !DACCESS_COMPILE
++
+ NOINLINE BYTE *MethodTable::GetLoaderAllocatorObjectForGC()
+ {
+ WRAPPER_NO_CONTRACT;
+diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
+index ef0cb44..63d35e6 100644
+--- a/src/vm/methodtable.h
++++ b/src/vm/methodtable.h
+@@ -3148,36 +3148,39 @@ public:
+ // Private part of MethodTable
+ // ------------------------------------------------------------------
+
++#ifndef DACCESS_COMPILE
+ inline void SetWriteableData(PTR_MethodTableWriteableData pMTWriteableData)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(pMTWriteableData);
+- m_pWriteableData = pMTWriteableData;
++ m_pWriteableData.SetValue(pMTWriteableData);
+ }
+-
++#endif
++
+ inline PTR_Const_MethodTableWriteableData GetWriteableData() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ g_IBCLogger.LogMethodTableWriteableDataAccess(this);
+- return m_pWriteableData;
++ return GetWriteableData_NoLogging();
+ }
+
+ inline PTR_Const_MethodTableWriteableData GetWriteableData_NoLogging() const
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+- return m_pWriteableData;
++ return ReadPointer(this, &MethodTable::m_pWriteableData);
+ }
+
+ inline PTR_MethodTableWriteableData GetWriteableDataForWrite()
+ {
+- LIMITED_METHOD_CONTRACT;
++ LIMITED_METHOD_DAC_CONTRACT;
+ g_IBCLogger.LogMethodTableWriteableDataWriteAccess(this);
+- return m_pWriteableData;
++ return GetWriteableDataForWrite_NoLogging();
+ }
+
+ inline PTR_MethodTableWriteableData GetWriteableDataForWrite_NoLogging()
+ {
+- return m_pWriteableData;
++ LIMITED_METHOD_DAC_CONTRACT;
++ return ReadPointer(this, &MethodTable::m_pWriteableData);
+ }
+
+ //-------------------------------------------------------------------
+@@ -4076,7 +4079,11 @@ private:
+
+ RelativePointer<PTR_Module> m_pLoaderModule; // LoaderModule. It is equal to the ZapModule in ngened images
+
+- PTR_MethodTableWriteableData m_pWriteableData;
++#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
++ RelativePointer<PTR_MethodTableWriteableData> m_pWriteableData;
++#else
++ PlainPointer<PTR_MethodTableWriteableData> m_pWriteableData;
++#endif
+
+ // The value of lowest two bits describe what the union contains
+ enum LowBits {
+diff --git a/src/vm/methodtable.inl b/src/vm/methodtable.inl
+index eb1abb0..4c808ee 100644
+--- a/src/vm/methodtable.inl
++++ b/src/vm/methodtable.inl
+@@ -1737,7 +1737,7 @@ FORCEINLINE PTR_Module MethodTable::GetGenericsStaticsModuleAndID(DWORD * pID)
+ _ASSERTE(!IsStringOrArray());
+ if (m_dwFlags & enum_flag_StaticsMask_IfGenericsThenCrossModule)
+ {
+- CrossModuleGenericsStaticsInfo *pInfo = m_pWriteableData->GetCrossModuleGenericsStaticsInfo();
++ CrossModuleGenericsStaticsInfo *pInfo = ReadPointer(this, &MethodTable::m_pWriteableData)->GetCrossModuleGenericsStaticsInfo();
+ _ASSERTE(FitsIn<DWORD>(pInfo->m_DynamicTypeID) || pInfo->m_DynamicTypeID == (SIZE_T)-1);
+ *pID = static_cast<DWORD>(pInfo->m_DynamicTypeID);
+ return pInfo->m_pModuleForStatics;
+diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
+index e8f3b9c..5d77463 100644
+--- a/src/vm/methodtablebuilder.cpp
++++ b/src/vm/methodtablebuilder.cpp
+@@ -10215,7 +10215,7 @@ MethodTable * MethodTableBuilder::AllocateNewMT(Module *pLoaderModule,
+ }
+
+ #ifdef _DEBUG
+- pMT->m_pWriteableData->m_dwLastVerifedGCCnt = (DWORD)-1;
++ pMT->m_pWriteableData.GetValue()->m_dwLastVerifedGCCnt = (DWORD)-1;
+ #endif // _DEBUG
+
+ RETURN(pMT);
+--
+2.7.4
+
diff --git a/packaging/0024-Remove-relocations-for-MethodTable-m_pPerInstInfo-fo.patch b/packaging/0024-Remove-relocations-for-MethodTable-m_pPerInstInfo-fo.patch
new file mode 100644
index 0000000000..4bc117b4ed
--- /dev/null
+++ b/packaging/0024-Remove-relocations-for-MethodTable-m_pPerInstInfo-fo.patch
@@ -0,0 +1,541 @@
+From ca3a3d9695301697d9011f4187eefaa05595f9e7 Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Thu, 12 Apr 2018 12:49:39 +0300
+Subject: [PATCH 24/32] Remove relocations for MethodTable::m_pPerInstInfo for
+ Linux ARM
+
+FIX: fix No.4, manually applied patch due to code differencies
+---
+ .../superpmi/superpmi-shared/methodcontext.cpp | 6 +++
+ .../superpmi/superpmi-shared/methodcontext.h | 1 +
+ src/debug/daccess/nidump.cpp | 6 ++-
+ src/inc/corinfo.h | 7 ++++
+ src/jit/importer.cpp | 6 +--
+ src/vm/ceeload.cpp | 11 ++++-
+ src/vm/class.cpp | 7 +++-
+ src/vm/genericdict.cpp | 2 +-
+ src/vm/generics.cpp | 5 ++-
+ src/vm/jitinterface.cpp | 13 ++++++
+ src/vm/methodtable.cpp | 47 +++++++++++++++++-----
+ src/vm/methodtable.h | 29 +++++++++----
+ src/vm/methodtable.inl | 2 +-
+ src/vm/methodtablebuilder.cpp | 7 ++--
+ src/vm/prestub.cpp | 13 ++++++
+ 15 files changed, 129 insertions(+), 33 deletions(-)
+
+diff --git a/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp b/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
+index 4c5fb61..4406b85 100644
+--- a/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
++++ b/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
+@@ -1574,6 +1574,7 @@ void MethodContext::recGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ value.stubLookup.runtimeLookup.testForNull = (DWORD)pResult->stubLookup.runtimeLookup.testForNull;
+ value.stubLookup.runtimeLookup.testForFixup = (DWORD)pResult->stubLookup.runtimeLookup.testForFixup;
+ value.stubLookup.runtimeLookup.indirectFirstOffset = (DWORD)pResult->stubLookup.runtimeLookup.indirectFirstOffset;
++ value.stubLookup.runtimeLookup.indirectSecondOffset = (DWORD)pResult->stubLookup.runtimeLookup.indirectSecondOffset;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ value.stubLookup.runtimeLookup.offsets[i] = (DWORDLONG)pResult->stubLookup.runtimeLookup.offsets[i];
+ }
+@@ -1585,6 +1586,7 @@ void MethodContext::recGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ value.stubLookup.runtimeLookup.testForNull = (DWORD)0;
+ value.stubLookup.runtimeLookup.testForFixup = (DWORD)0;
+ value.stubLookup.runtimeLookup.indirectFirstOffset = (DWORD)0;
++ value.stubLookup.runtimeLookup.indirectSecondOffset = (DWORD)0;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ value.stubLookup.runtimeLookup.offsets[i] = (DWORDLONG)0;
+
+@@ -1764,6 +1766,7 @@ void MethodContext::repGetCallInfo(CORINFO_RESOLVED_TOKEN* pResolvedToken,
+ pResult->stubLookup.runtimeLookup.testForNull = value.stubLookup.runtimeLookup.testForNull != 0;
+ pResult->stubLookup.runtimeLookup.testForFixup = value.stubLookup.runtimeLookup.testForFixup != 0;
+ pResult->stubLookup.runtimeLookup.indirectFirstOffset = value.stubLookup.runtimeLookup.indirectFirstOffset != 0;
++ pResult->stubLookup.runtimeLookup.indirectSecondOffset = value.stubLookup.runtimeLookup.indirectSecondOffset != 0;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ pResult->stubLookup.runtimeLookup.offsets[i] = (SIZE_T)value.stubLookup.runtimeLookup.offsets[i];
+ }
+@@ -3222,6 +3225,7 @@ void MethodContext::recEmbedGenericHandle(CORINFO_RESOLVED_TOKEN* pResolve
+ value.lookup.runtimeLookup.testForNull = (DWORD)pResult->lookup.runtimeLookup.testForNull;
+ value.lookup.runtimeLookup.testForFixup = (DWORD)pResult->lookup.runtimeLookup.testForFixup;
+ value.lookup.runtimeLookup.indirectFirstOffset = (DWORD)pResult->lookup.runtimeLookup.indirectFirstOffset;
++ value.lookup.runtimeLookup.indirectSecondOffset = (DWORD)pResult->lookup.runtimeLookup.indirectSecondOffset;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ value.lookup.runtimeLookup.offsets[i] = (DWORDLONG)pResult->lookup.runtimeLookup.offsets[i];
+ }
+@@ -3233,6 +3237,7 @@ void MethodContext::recEmbedGenericHandle(CORINFO_RESOLVED_TOKEN* pResolve
+ value.lookup.runtimeLookup.testForNull = (DWORD)0;
+ value.lookup.runtimeLookup.testForFixup = (DWORD)0;
+ value.lookup.runtimeLookup.indirectFirstOffset = (DWORD)0;
++ value.lookup.runtimeLookup.indirectSecondOffset = (DWORD)0;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ value.lookup.runtimeLookup.offsets[i] = (DWORDLONG)0;
+ // copy the constLookup view of the union
+@@ -3311,6 +3316,7 @@ void MethodContext::repEmbedGenericHandle(CORINFO_RESOLVED_TOKEN* pResolve
+ pResult->lookup.runtimeLookup.testForNull = value.lookup.runtimeLookup.testForNull != 0;
+ pResult->lookup.runtimeLookup.testForFixup = value.lookup.runtimeLookup.testForFixup != 0;
+ pResult->lookup.runtimeLookup.indirectFirstOffset = value.lookup.runtimeLookup.indirectFirstOffset != 0;
++ pResult->lookup.runtimeLookup.indirectSecondOffset = value.lookup.runtimeLookup.indirectSecondOffset != 0;
+ for (int i = 0; i < CORINFO_MAXINDIRECTIONS; i++)
+ pResult->lookup.runtimeLookup.offsets[i] = (size_t)value.lookup.runtimeLookup.offsets[i];
+ }
+diff --git a/src/ToolBox/superpmi/superpmi-shared/methodcontext.h b/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
+index 4887522..53227e4 100644
+--- a/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
++++ b/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
+@@ -241,6 +241,7 @@ public:
+ DWORD testForFixup;
+ DWORDLONG offsets[CORINFO_MAXINDIRECTIONS];
+ DWORD indirectFirstOffset;
++ DWORD indirectSecondOffset;
+ };
+ struct Agnostic_CORINFO_CONST_LOOKUP
+ {
+diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
+index 04d610e..d43e9f9 100644
+--- a/src/debug/daccess/nidump.cpp
++++ b/src/debug/daccess/nidump.cpp
+@@ -5106,7 +5106,9 @@ void NativeImageDumper::MethodTableToString( PTR_MethodTable mt, SString& buf )
+ {
+ numDicts = (DWORD)CountDictionariesInClass(token, dependency->pImport);
+ }
+- PTR_Dictionary dictionary( mt->GetPerInstInfo()[numDicts-1] );
++
++ TADDR base = dac_cast<TADDR>(&(mt->GetPerInstInfo()[numDicts-1]));
++ PTR_Dictionary dictionary( MethodTable::PerInstInfoElem_t::GetValueAtPtr(base) );
+ unsigned numArgs = mt->GetNumGenericArgs();
+
+ DictionaryToArgString( dictionary, numArgs, buf );
+@@ -7092,7 +7094,7 @@ NativeImageDumper::DumpMethodTable( PTR_MethodTable mt, const char * name,
+ DisplayEndStructure( METHODTABLES ); //GenericsDictInfo
+
+
+- DPTR(PTR_Dictionary) perInstInfo = mt->GetPerInstInfo();
++ DPTR(MethodTable::PerInstInfoElem_t) perInstInfo = mt->GetPerInstInfo();
+
+ DisplayStartStructure( "PerInstInfo",
+ DPtrToPreferredAddr(perInstInfo),
+diff --git a/src/inc/corinfo.h b/src/inc/corinfo.h
+index a6acd71..58fcdf4 100644
+--- a/src/inc/corinfo.h
++++ b/src/inc/corinfo.h
+@@ -1331,6 +1331,13 @@ struct CORINFO_RUNTIME_LOOKUP
+ // 1 means that value stored at first offset (offsets[0]) from pointer is offset1, and the next pointer is
+ // stored at pointer+offsets[0]+offset1.
+ bool indirectFirstOffset;
++
++ // If set, second offset is indirect.
++ // 0 means that value stored at second offset (offsets[1]) from pointer is next pointer, to which the next offset
++ // (offsets[2]) is added and so on.
++ // 1 means that value stored at second offset (offsets[1]) from pointer is offset2, and the next pointer is
++ // stored at pointer+offsets[1]+offset2.
++ bool indirectSecondOffset;
+ } ;
+
+ // Result of calling embedGenericHandle
+diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
+index c5f2970..62f1c13 100644
+--- a/src/jit/importer.cpp
++++ b/src/jit/importer.cpp
+@@ -1980,10 +1980,10 @@ GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedTok
+ // Applied repeated indirections
+ for (WORD i = 0; i < pRuntimeLookup->indirections; i++)
+ {
+- if (i == 1 && pRuntimeLookup->indirectFirstOffset)
++ if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
+ {
+ indOffTree = impCloneExpr(slotPtrTree, &slotPtrTree, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
+- nullptr DEBUGARG("impRuntimeLookup indirectFirstOffset"));
++ nullptr DEBUGARG("impRuntimeLookup indirectOffset"));
+ }
+
+ if (i != 0)
+@@ -1993,7 +1993,7 @@ GenTreePtr Compiler::impRuntimeLookupToTree(CORINFO_RESOLVED_TOKEN* pResolvedTok
+ slotPtrTree->gtFlags |= GTF_IND_INVARIANT;
+ }
+
+- if (i == 1 && pRuntimeLookup->indirectFirstOffset)
++ if ((i == 1 && pRuntimeLookup->indirectFirstOffset) || (i == 2 && pRuntimeLookup->indirectSecondOffset))
+ {
+ slotPtrTree = gtNewOperNode(GT_ADD, TYP_I_IMPL, indOffTree, slotPtrTree);
+ }
+diff --git a/src/vm/ceeload.cpp b/src/vm/ceeload.cpp
+index 43e2c14..9e5a525 100644
+--- a/src/vm/ceeload.cpp
++++ b/src/vm/ceeload.cpp
+@@ -9325,13 +9325,20 @@ void Module::PlaceType(DataImage *image, TypeHandle th, DWORD profilingFlags)
+ {
+ if (pMT->HasPerInstInfo())
+ {
+- Dictionary ** pPerInstInfo = pMT->GetPerInstInfo();
++ DPTR(MethodTable::PerInstInfoElem_t) pPerInstInfo = pMT->GetPerInstInfo();
+
+ BOOL fIsEagerBound = pMT->CanEagerBindToParentDictionaries(image, NULL);
+
+ if (fIsEagerBound)
+ {
+- image->PlaceInternedStructureForAddress(pPerInstInfo, CORCOMPILE_SECTION_READONLY_SHARED_HOT, CORCOMPILE_SECTION_READONLY_HOT);
++ if (MethodTable::PerInstInfoElem_t::isRelative)
++ {
++ image->PlaceStructureForAddress(pPerInstInfo, CORCOMPILE_SECTION_READONLY_HOT);
++ }
++ else
++ {
++ image->PlaceInternedStructureForAddress(pPerInstInfo, CORCOMPILE_SECTION_READONLY_SHARED_HOT, CORCOMPILE_SECTION_READONLY_HOT);
++ }
+ }
+ else
+ {
+diff --git a/src/vm/class.cpp b/src/vm/class.cpp
+index 6697b23..c1519a2 100644
+--- a/src/vm/class.cpp
++++ b/src/vm/class.cpp
+@@ -908,8 +908,11 @@ ClassLoader::LoadExactParentAndInterfacesTransitively(MethodTable *pMT)
+ DWORD nDicts = pParentMT->GetNumDicts();
+ for (DWORD iDict = 0; iDict < nDicts; iDict++)
+ {
+- if (pMT->GetPerInstInfo()[iDict] != pParentMT->GetPerInstInfo()[iDict])
+- *EnsureWritablePages(&pMT->GetPerInstInfo()[iDict]) = pParentMT->GetPerInstInfo()[iDict];
++ if (pMT->GetPerInstInfo()[iDict].GetValueMaybeNull() != pParentMT->GetPerInstInfo()[iDict].GetValueMaybeNull())
++ {
++ EnsureWritablePages(&pMT->GetPerInstInfo()[iDict]);
++ pMT->GetPerInstInfo()[iDict].SetValueMaybeNull(pParentMT->GetPerInstInfo()[iDict].GetValueMaybeNull());
++ }
+ }
+ }
+
+diff --git a/src/vm/genericdict.cpp b/src/vm/genericdict.cpp
+index c93e583..5fad30f 100644
+--- a/src/vm/genericdict.cpp
++++ b/src/vm/genericdict.cpp
+@@ -742,7 +742,7 @@ Dictionary::PopulateEntry(
+ }
+
+ // MethodTable is expected to be normalized
+- _ASSERTE(pDictionary == pMT->GetPerInstInfo()[dictionaryIndex]);
++ _ASSERTE(pDictionary == pMT->GetPerInstInfo()[dictionaryIndex].GetValueMaybeNull());
+ }
+ else
+ {
+diff --git a/src/vm/generics.cpp b/src/vm/generics.cpp
+index 63d95a0..650caef 100644
+--- a/src/vm/generics.cpp
++++ b/src/vm/generics.cpp
+@@ -499,7 +499,7 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
+ _ASSERTE(pOldMT->HasPerInstInfo());
+
+ // Fill in per-inst map pointer (which points to the array of generic dictionary pointers)
+- pMT->SetPerInstInfo ((Dictionary**) (pMemory + cbMT + cbOptional + cbIMap + sizeof(GenericsDictInfo)));
++ pMT->SetPerInstInfo((MethodTable::PerInstInfoElem_t *) (pMemory + cbMT + cbOptional + cbIMap + sizeof(GenericsDictInfo)));
+ _ASSERTE(FitsIn<WORD>(pOldMT->GetNumDicts()));
+ _ASSERTE(FitsIn<WORD>(pOldMT->GetNumGenericArgs()));
+ pMT->SetDictInfo(static_cast<WORD>(pOldMT->GetNumDicts()), static_cast<WORD>(pOldMT->GetNumGenericArgs()));
+@@ -508,7 +508,8 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
+ // The others are filled in by LoadExactParents which copied down any inherited generic
+ // dictionary pointers.
+ Dictionary * pDict = (Dictionary*) (pMemory + cbMT + cbOptional + cbIMap + cbPerInst);
+- *(pMT->GetPerInstInfo() + (pOldMT->GetNumDicts()-1)) = pDict;
++ MethodTable::PerInstInfoElem_t *pPInstInfo = (MethodTable::PerInstInfoElem_t *) (pMT->GetPerInstInfo() + (pOldMT->GetNumDicts()-1));
++ pPInstInfo->SetValueMaybeNull(pDict);
+
+ // Fill in the instantiation section of the generic dictionary. The remainder of the
+ // generic dictionary will be zeroed, which is the correct initial state.
+diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
+index 52db7c5..e0adf87 100644
+--- a/src/vm/jitinterface.cpp
++++ b/src/vm/jitinterface.cpp
+@@ -3149,6 +3149,7 @@ void CEEInfo::ComputeRuntimeLookupForSharedGenericToken(DictionaryEntryKind entr
+ pResult->signature = NULL;
+
+ pResult->indirectFirstOffset = 0;
++ pResult->indirectSecondOffset = 0;
+
+ // Unless we decide otherwise, just do the lookup via a helper function
+ pResult->indirections = CORINFO_USEHELPER;
+@@ -3363,6 +3364,12 @@ void CEEInfo::ComputeRuntimeLookupForSharedGenericToken(DictionaryEntryKind entr
+ IfFailThrow(sigptr.GetData(&data));
+ pResult->offsets[2] = sizeof(TypeHandle) * data;
+
++ if (MethodTable::IsPerInstInfoRelative())
++ {
++ pResult->indirectFirstOffset = 1;
++ pResult->indirectSecondOffset = 1;
++ }
++
+ return;
+ }
+ else if (type == ELEMENT_TYPE_GENERICINST &&
+@@ -3610,6 +3617,12 @@ NoSpecialCase:
+
+ // Next indirect through the dictionary appropriate to this instantiated type
+ pResult->offsets[1] = sizeof(TypeHandle*) * (pContextMT->GetNumDicts() - 1);
++
++ if (MethodTable::IsPerInstInfoRelative())
++ {
++ pResult->indirectFirstOffset = 1;
++ pResult->indirectSecondOffset = 1;
++ }
+ }
+ }
+ }
+diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
+index e93f63d..4c1746e 100644
+--- a/src/vm/methodtable.cpp
++++ b/src/vm/methodtable.cpp
+@@ -4332,7 +4332,14 @@ void MethodTable::Save(DataImage *image, DWORD profilingFlags)
+ ZapStoredStructure * pPerInstInfoNode;
+ if (CanEagerBindToParentDictionaries(image, NULL))
+ {
+- pPerInstInfoNode = image->StoreInternedStructure((BYTE *)GetPerInstInfo() - sizeof(GenericsDictInfo), GetPerInstInfoSize() + sizeof(GenericsDictInfo), DataImage::ITEM_DICTIONARY);
++ if (PerInstInfoElem_t::isRelative)
++ {
++ pPerInstInfoNode = image->StoreStructure((BYTE *)GetPerInstInfo() - sizeof(GenericsDictInfo), GetPerInstInfoSize() + sizeof(GenericsDictInfo), DataImage::ITEM_DICTIONARY);
++ }
++ else
++ {
++ pPerInstInfoNode = image->StoreInternedStructure((BYTE *)GetPerInstInfo() - sizeof(GenericsDictInfo), GetPerInstInfoSize() + sizeof(GenericsDictInfo), DataImage::ITEM_DICTIONARY);
++ }
+ }
+ else
+ {
+@@ -4675,14 +4682,21 @@ BOOL MethodTable::IsWriteable()
+ // target module. Thus we want to call CanEagerBindToMethodTable
+ // to check we can hardbind to the containing structure.
+ static
+-void HardBindOrClearDictionaryPointer(DataImage *image, MethodTable *pMT, void * p, SSIZE_T offset)
++void HardBindOrClearDictionaryPointer(DataImage *image, MethodTable *pMT, void * p, SSIZE_T offset, bool isRelative)
+ {
+ WRAPPER_NO_CONTRACT;
+
+ if (image->CanEagerBindToMethodTable(pMT) &&
+ image->CanHardBindToZapModule(pMT->GetLoaderModule()))
+ {
+- image->FixupPointerField(p, offset);
++ if (isRelative)
++ {
++ image->FixupRelativePointerField(p, offset);
++ }
++ else
++ {
++ image->FixupPointerField(p, offset);
++ }
+ }
+ else
+ {
+@@ -5017,7 +5031,7 @@ void MethodTable::Fixup(DataImage *image)
+ if (HasPerInstInfo())
+ {
+ // Fixup the pointer to the per-inst table
+- image->FixupPointerField(this, offsetof(MethodTable, m_pPerInstInfo));
++ image->FixupPlainOrRelativePointerField(this, &MethodTable::m_pPerInstInfo);
+
+ for (MethodTable *pChain = this; pChain != NULL; pChain = pChain->GetParentMethodTable())
+ {
+@@ -5030,10 +5044,23 @@ void MethodTable::Fixup(DataImage *image)
+
+ // We special-case the dictionary for this method table because we must always
+ // hard bind to it even if it's not in its preferred zap module
++ size_t sizeDict = sizeof(PerInstInfoElem_t);
++
+ if (pChain == this)
+- image->FixupPointerField(GetPerInstInfo(), dictNum * sizeof(Dictionary *));
++ {
++ if (PerInstInfoElem_t::isRelative)
++ {
++ image->FixupRelativePointerField(GetPerInstInfo(), dictNum * sizeDict);
++ }
++ else
++ {
++ image->FixupPointerField(GetPerInstInfo(), dictNum * sizeDict);
++ }
++ }
+ else
+- HardBindOrClearDictionaryPointer(image, pChain, GetPerInstInfo(), dictNum * sizeof(Dictionary *));
++ {
++ HardBindOrClearDictionaryPointer(image, pChain, GetPerInstInfo(), dictNum * sizeDict, PerInstInfoElem_t::isRelative);
++ }
+ }
+ }
+ }
+@@ -6218,7 +6245,7 @@ BOOL MethodTable::IsWinRTObjectType()
+ //==========================================================================================
+ // Return a pointer to the dictionary for an instantiated type
+ // Return NULL if not instantiated
+-Dictionary* MethodTable::GetDictionary()
++PTR_Dictionary MethodTable::GetDictionary()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+@@ -6226,7 +6253,8 @@ Dictionary* MethodTable::GetDictionary()
+ {
+ // The instantiation for this class is stored in the type slots table
+ // *after* any inherited slots
+- return GetPerInstInfo()[GetNumDicts()-1];
++ TADDR base = dac_cast<TADDR>(&(GetPerInstInfo()[GetNumDicts()-1]));
++ return PerInstInfoElem_t::GetValueMaybeNullAtPtr(base);
+ }
+ else
+ {
+@@ -6243,7 +6271,8 @@ Instantiation MethodTable::GetInstantiation()
+ if (HasInstantiation())
+ {
+ PTR_GenericsDictInfo pDictInfo = GetGenericsDictInfo();
+- return Instantiation(GetPerInstInfo()[pDictInfo->m_wNumDicts-1]->GetInstantiation(), pDictInfo->m_wNumTyPars);
++ TADDR base = dac_cast<TADDR>(&(GetPerInstInfo()[pDictInfo->m_wNumDicts-1]));
++ return Instantiation(PerInstInfoElem_t::GetValueMaybeNullAtPtr(base)->GetInstantiation(), pDictInfo->m_wNumTyPars);
+ }
+ else
+ {
+diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
+index 63d35e6..81a9186 100644
+--- a/src/vm/methodtable.h
++++ b/src/vm/methodtable.h
+@@ -3008,12 +3008,20 @@ public:
+ // must have a dictionary entry. On the other hand, for instantiations shared with Dict<string,double> the opposite holds.
+ //
+
++#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
++ typedef RelativePointer<PTR_Dictionary> PerInstInfoElem_t;
++ typedef RelativePointer<DPTR(PerInstInfoElem_t)> PerInstInfo_t;
++#else
++ typedef PlainPointer<PTR_Dictionary> PerInstInfoElem_t;
++ typedef PlainPointer<DPTR(PerInstInfoElem_t)> PerInstInfo_t;
++#endif
++
+ // Return a pointer to the per-instantiation information. See field itself for comments.
+- DPTR(PTR_Dictionary) GetPerInstInfo()
++ DPTR(PerInstInfoElem_t) GetPerInstInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+ _ASSERTE(HasPerInstInfo());
+- return dac_cast<DPTR(PTR_Dictionary)>(m_pMultipurposeSlot1);
++ return ReadPointer(this, &MethodTable::m_pPerInstInfo);
+ }
+ BOOL HasPerInstInfo()
+ {
+@@ -3021,15 +3029,20 @@ public:
+ return GetFlag(enum_flag_HasPerInstInfo) && !IsArray();
+ }
+ #ifndef DACCESS_COMPILE
++ static inline bool IsPerInstInfoRelative()
++ {
++ LIMITED_METHOD_CONTRACT;
++ return decltype(m_pPerInstInfo)::isRelative;
++ }
+ static inline DWORD GetOffsetOfPerInstInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+ return offsetof(MethodTable, m_pPerInstInfo);
+ }
+- void SetPerInstInfo(Dictionary** pPerInstInfo)
++ void SetPerInstInfo(PerInstInfoElem_t *pPerInstInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+- m_pPerInstInfo = pPerInstInfo;
++ m_pPerInstInfo.SetValue(pPerInstInfo);
+ }
+ void SetDictInfo(WORD numDicts, WORD numTyPars)
+ {
+@@ -3049,7 +3062,7 @@ public:
+ // Get a pointer to the dictionary for this instantiated type
+ // (The instantiation is stored in the initial slots of the dictionary)
+ // If not instantiated, return NULL
+- Dictionary* GetDictionary();
++ PTR_Dictionary GetDictionary();
+
+ #ifdef FEATURE_PREJIT
+ //
+@@ -4123,9 +4136,9 @@ private:
+
+ union
+ {
+- PTR_Dictionary * m_pPerInstInfo;
+- TADDR m_ElementTypeHnd;
+- TADDR m_pMultipurposeSlot1;
++ PerInstInfo_t m_pPerInstInfo;
++ TADDR m_ElementTypeHnd;
++ TADDR m_pMultipurposeSlot1;
+ };
+ public:
+ union
+diff --git a/src/vm/methodtable.inl b/src/vm/methodtable.inl
+index 4c808ee..b69513d 100644
+--- a/src/vm/methodtable.inl
++++ b/src/vm/methodtable.inl
+@@ -1256,7 +1256,7 @@ inline BOOL MethodTable::HasExplicitSize()
+ inline DWORD MethodTable::GetPerInstInfoSize()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+- return GetNumDicts() * sizeof(TypeHandle*);
++ return GetNumDicts() * sizeof(PerInstInfoElem_t);
+ }
+
+ //==========================================================================================
+diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
+index 5d77463..792a19c 100644
+--- a/src/vm/methodtablebuilder.cpp
++++ b/src/vm/methodtablebuilder.cpp
+@@ -10050,7 +10050,7 @@ MethodTable * MethodTableBuilder::AllocateNewMT(Module *pLoaderModule,
+ if (dwNumDicts != 0)
+ {
+ cbTotalSize += sizeof(GenericsDictInfo);
+- cbTotalSize += S_SIZE_T(dwNumDicts) * S_SIZE_T(sizeof(TypeHandle*));
++ cbTotalSize += S_SIZE_T(dwNumDicts) * S_SIZE_T(sizeof(MethodTable::PerInstInfoElem_t));
+ cbTotalSize += cbInstAndDict;
+ }
+
+@@ -10203,14 +10203,15 @@ MethodTable * MethodTableBuilder::AllocateNewMT(Module *pLoaderModule,
+ // the dictionary pointers follow the interface map
+ if (dwNumDicts)
+ {
+- Dictionary** pPerInstInfo = (Dictionary**)(pData + offsetOfInstAndDict.Value() + sizeof(GenericsDictInfo));
++ MethodTable::PerInstInfoElem_t *pPerInstInfo = (MethodTable::PerInstInfoElem_t *)(pData + offsetOfInstAndDict.Value() + sizeof(GenericsDictInfo));
+
+ pMT->SetPerInstInfo ( pPerInstInfo);
+
+ // Fill in the dictionary for this type, if it's instantiated
+ if (cbInstAndDict)
+ {
+- *(pPerInstInfo + (dwNumDicts-1)) = (Dictionary*) (pPerInstInfo + dwNumDicts);
++ MethodTable::PerInstInfoElem_t *pPInstInfo = (MethodTable::PerInstInfoElem_t *)(pPerInstInfo + (dwNumDicts-1));
++ pPInstInfo->SetValueMaybeNull((Dictionary*) (pPerInstInfo + dwNumDicts));
+ }
+ }
+
+diff --git a/src/vm/prestub.cpp b/src/vm/prestub.cpp
+index 746e415..b31e8f7 100644
+--- a/src/vm/prestub.cpp
++++ b/src/vm/prestub.cpp
+@@ -2407,6 +2407,7 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
+ pResult->signature = NULL;
+
+ pResult->indirectFirstOffset = 0;
++ pResult->indirectSecondOffset = 0;
+
+ pResult->indirections = CORINFO_USEHELPER;
+
+@@ -2479,6 +2480,12 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
+ IfFailThrow(sigptr.GetData(&data));
+ pResult->offsets[2] = sizeof(TypeHandle) * data;
+
++ if (MethodTable::IsPerInstInfoRelative())
++ {
++ pResult->indirectFirstOffset = 1;
++ pResult->indirectSecondOffset = 1;
++ }
++
+ return;
+ }
+ }
+@@ -2524,6 +2531,12 @@ void ProcessDynamicDictionaryLookup(TransitionBlock * pTransitionBlock
+ // Next indirect through the dictionary appropriate to this instantiated type
+ pResult->offsets[1] = sizeof(TypeHandle*) * (pContextMT->GetNumDicts() - 1);
+
++ if (MethodTable::IsPerInstInfoRelative())
++ {
++ pResult->indirectFirstOffset = 1;
++ pResult->indirectSecondOffset = 1;
++ }
++
+ *pDictionaryIndexAndSlot |= dictionarySlot;
+ }
+ }
+--
+2.7.4
+
diff --git a/packaging/0025-Remove-relocations-for-MethodTable-s-vtable-1st-leve.patch b/packaging/0025-Remove-relocations-for-MethodTable-s-vtable-1st-leve.patch
new file mode 100644
index 0000000000..97f2ebbb84
--- /dev/null
+++ b/packaging/0025-Remove-relocations-for-MethodTable-s-vtable-1st-leve.patch
@@ -0,0 +1,901 @@
+From 3bf76a8bc479dd90c9f2f75d8941445c2a5ea2b2 Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Fri, 23 Jun 2017 15:48:48 +0300
+Subject: [PATCH 25/32] Remove relocations for MethodTable's
+ vtable-1st-level-indirection
+
+FIX: fix No.5, rebased
+---
+ .../superpmi/superpmi-shared/icorjitinfoimpl.h | 3 +-
+ src/ToolBox/superpmi/superpmi-shared/lwmlist.h | 2 +-
+ .../superpmi/superpmi-shared/methodcontext.cpp | 16 +++--
+ .../superpmi/superpmi-shared/methodcontext.h | 14 +++-
+ .../superpmi-shim-collector/icorjitinfo.cpp | 7 +-
+ .../superpmi/superpmi-shim-counter/icorjitinfo.cpp | 5 +-
+ .../superpmi/superpmi-shim-simple/icorjitinfo.cpp | 5 +-
+ src/ToolBox/superpmi/superpmi/icorjitinfo.cpp | 5 +-
+ src/debug/daccess/nidump.cpp | 9 +--
+ src/inc/corinfo.h | 3 +-
+ src/jit/ICorJitInfo_API_wrapper.hpp | 5 +-
+ src/jit/codegenlegacy.cpp | 76 ++++++++++++++++++----
+ src/jit/emitarm.cpp | 10 +++
+ src/jit/lower.cpp | 46 ++++++++++---
+ src/jit/lower.h | 6 ++
+ src/jit/morph.cpp | 20 +++++-
+ src/vm/array.cpp | 4 +-
+ src/vm/generics.cpp | 4 +-
+ src/vm/jitinterface.cpp | 6 +-
+ src/vm/jitinterface.h | 4 +-
+ src/vm/methodtable.cpp | 15 +++--
+ src/vm/methodtable.h | 19 ++++--
+ src/vm/methodtable.inl | 14 ++--
+ src/vm/methodtablebuilder.cpp | 12 ++--
+ src/zap/zapinfo.cpp | 7 +-
+ src/zap/zapinfo.h | 3 +-
+ 26 files changed, 238 insertions(+), 82 deletions(-)
+
+diff --git a/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h b/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h
+index 19feffa..44b81aa 100644
+--- a/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h
++++ b/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h
+@@ -109,7 +109,8 @@ CORINFO_MODULE_HANDLE getMethodModule(CORINFO_METHOD_HANDLE method);
+ // vtable of it's owning class or interface.
+ void getMethodVTableOffset(CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+- unsigned* offsetAfterIndirection /* OUT */
++ unsigned* offsetAfterIndirection,/* OUT */
++ unsigned* isRelative /* OUT */
+ );
+
+ // Find the virtual method in implementingClass that overrides virtualMethod.
+diff --git a/src/ToolBox/superpmi/superpmi-shared/lwmlist.h b/src/ToolBox/superpmi/superpmi-shared/lwmlist.h
+index 6e5f016..6b33681 100644
+--- a/src/ToolBox/superpmi/superpmi-shared/lwmlist.h
++++ b/src/ToolBox/superpmi/superpmi-shared/lwmlist.h
+@@ -102,7 +102,7 @@ LWM(GetMethodInfo, DWORDLONG, Agnostic_GetMethodInfo)
+ LWM(GetMethodName, DLD, DD)
+ LWM(GetMethodSig, DLDL, Agnostic_CORINFO_SIG_INFO)
+ LWM(GetMethodSync, DWORDLONG, DLDL)
+-LWM(GetMethodVTableOffset, DWORDLONG, DD)
++LWM(GetMethodVTableOffset, DWORDLONG, DDD)
+ LWM(GetNewArrHelper, DWORDLONG, DWORD)
+ LWM(GetNewHelper, Agnostic_GetNewHelper, DWORD)
+ LWM(GetParentType, DWORDLONG, DWORDLONG)
+diff --git a/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp b/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
+index 4406b85..f4130e9 100644
+--- a/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
++++ b/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
+@@ -3382,26 +3382,29 @@ void MethodContext::repGetEHinfo(CORINFO_METHOD_HANDLE ftn, unsigned EHnumber, C
+
+ void MethodContext::recGetMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ unsigned* offsetOfIndirection,
+- unsigned* offsetAfterIndirection)
++ unsigned* offsetAfterIndirection,
++ unsigned* isRelative)
+ {
+ if (GetMethodVTableOffset == nullptr)
+- GetMethodVTableOffset = new LightWeightMap<DWORDLONG, DD>();
++ GetMethodVTableOffset = new LightWeightMap<DWORDLONG, DDD>();
+
+- DD value;
++ DDD value;
+ value.A = (DWORD)*offsetOfIndirection;
+ value.B = (DWORD)*offsetAfterIndirection;
++ value.C = (DWORD)*isRelative;
+ GetMethodVTableOffset->Add((DWORDLONG)method, value);
+ DEBUG_REC(dmpGetMethodVTableOffset((DWORDLONG)method, value));
+ }
+-void MethodContext::dmpGetMethodVTableOffset(DWORDLONG key, DD value)
++void MethodContext::dmpGetMethodVTableOffset(DWORDLONG key, DDD value)
+ {
+ printf("GetMethodVTableOffset key ftn-%016llX, value offi-%u, offa-%u", key, value.A, value.B);
+ }
+ void MethodContext::repGetMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ unsigned* offsetOfIndirection,
+- unsigned* offsetAfterIndirection)
++ unsigned* offsetAfterIndirection,
++ unsigned* isRelative)
+ {
+- DD value;
++ DDD value;
+
+ AssertCodeMsg(GetMethodVTableOffset != nullptr, EXCEPTIONCODE_MC, "Didn't find anything for %016llX",
+ (DWORDLONG)method);
+@@ -3411,6 +3414,7 @@ void MethodContext::repGetMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+
+ *offsetOfIndirection = (unsigned)value.A;
+ *offsetAfterIndirection = (unsigned)value.B;
++ *isRelative = (unsigned)value.C;
+ DEBUG_REP(dmpGetMethodVTableOffset((DWORDLONG)method, value));
+ }
+
+diff --git a/src/ToolBox/superpmi/superpmi-shared/methodcontext.h b/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
+index 53227e4..a8612b5 100644
+--- a/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
++++ b/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
+@@ -206,6 +206,12 @@ public:
+ DWORD A;
+ DWORD B;
+ };
++ struct DDD
++ {
++ DWORD A;
++ DWORD B;
++ DWORD C;
++ };
+ struct Agnostic_CanTailCall
+ {
+ DWORDLONG callerHnd;
+@@ -774,11 +780,13 @@ public:
+
+ void recGetMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ unsigned* offsetOfIndirection,
+- unsigned* offsetAfterIndirection);
+- void dmpGetMethodVTableOffset(DWORDLONG key, DD value);
++ unsigned* offsetAfterIndirection,
++ unsigned* isRelative);
++ void dmpGetMethodVTableOffset(DWORDLONG key, DDD value);
+ void repGetMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ unsigned* offsetOfIndirection,
+- unsigned* offsetAfterIndirection);
++ unsigned* offsetAfterIndirection,
++ unsigned* isRelative);
+
+ void recResolveVirtualMethod(CORINFO_METHOD_HANDLE virtMethod,
+ CORINFO_CLASS_HANDLE implClass,
+diff --git a/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp b/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
+index 4741cf1..1f81883 100644
+--- a/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
++++ b/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
+@@ -214,12 +214,13 @@ CORINFO_MODULE_HANDLE interceptor_ICJI::getMethodModule(CORINFO_METHOD_HANDLE me
+ // vtable of it's owning class or interface.
+ void interceptor_ICJI::getMethodVTableOffset(CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+- unsigned* offsetAfterIndirection /* OUT */
++ unsigned* offsetAfterIndirection,/* OUT */
++ unsigned* isRelative /* OUT */
+ )
+ {
+ mc->cr->AddCall("getMethodVTableOffset");
+- original_ICorJitInfo->getMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection);
+- mc->recGetMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection);
++ original_ICorJitInfo->getMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection, isRelative);
++ mc->recGetMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection, isRelative);
+ }
+
+ // Find the virtual method in implementingClass that overrides virtualMethod.
+diff --git a/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp b/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp
+index 1d45229..5c2e784 100644
+--- a/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp
++++ b/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp
+@@ -145,11 +145,12 @@ CORINFO_MODULE_HANDLE interceptor_ICJI::getMethodModule(CORINFO_METHOD_HANDLE me
+ // vtable of it's owning class or interface.
+ void interceptor_ICJI::getMethodVTableOffset(CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+- unsigned* offsetAfterIndirection /* OUT */
++ unsigned* offsetAfterIndirection,/* OUT */
++ unsigned* isRelative /* OUT */
+ )
+ {
+ mcs->AddCall("getMethodVTableOffset");
+- original_ICorJitInfo->getMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection);
++ original_ICorJitInfo->getMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection, isRelative);
+ }
+
+ // Find the virtual method in implementingClass that overrides virtualMethod.
+diff --git a/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp b/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp
+index aca7536..df223f4 100644
+--- a/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp
++++ b/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp
+@@ -134,10 +134,11 @@ CORINFO_MODULE_HANDLE interceptor_ICJI::getMethodModule(CORINFO_METHOD_HANDLE me
+ // vtable of it's owning class or interface.
+ void interceptor_ICJI::getMethodVTableOffset(CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+- unsigned* offsetAfterIndirection /* OUT */
++ unsigned* offsetAfterIndirection,/* OUT */
++ unsigned* isRelative /* OUT */
+ )
+ {
+- original_ICorJitInfo->getMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection);
++ original_ICorJitInfo->getMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection, isRelative);
+ }
+
+ // Find the virtual method in implementingClass that overrides virtualMethod.
+diff --git a/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp b/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp
+index 59ad3c5..dc73a75 100644
+--- a/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp
++++ b/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp
+@@ -165,11 +165,12 @@ CORINFO_MODULE_HANDLE MyICJI::getMethodModule(CORINFO_METHOD_HANDLE method)
+ // vtable of it's owning class or interface.
+ void MyICJI::getMethodVTableOffset(CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+- unsigned* offsetAfterIndirection /* OUT */
++ unsigned* offsetAfterIndirection,/* OUT */
++ unsigned* isRelative /* OUT */
+ )
+ {
+ jitInstance->mc->cr->AddCall("getMethodVTableOffset");
+- jitInstance->mc->repGetMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection);
++ jitInstance->mc->repGetMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection, isRelative);
+ }
+
+ // Find the virtual method in implementingClass that overrides virtualMethod.
+diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
+index d43e9f9..2732c9e 100644
+--- a/src/debug/daccess/nidump.cpp
++++ b/src/debug/daccess/nidump.cpp
+@@ -7230,9 +7230,9 @@ NativeImageDumper::DumpMethodTable( PTR_MethodTable mt, const char * name,
+ {
+ m_display->StartStructureWithOffset("Vtable",
+ mt->GetVtableOffset(),
+- mt->GetNumVtableIndirections() * sizeof(PTR_PCODE),
++ mt->GetNumVtableIndirections() * sizeof(MethodTable::VTableIndir_t),
+ DataPtrToDisplay(PTR_TO_TADDR(mt) + mt->GetVtableOffset()),
+- mt->GetNumVtableIndirections() * sizeof(PTR_PCODE));
++ mt->GetNumVtableIndirections() * sizeof(MethodTable::VTableIndir_t));
+
+
+ MethodTable::VtableIndirectionSlotIterator itIndirect = mt->IterateVtableIndirectionSlots();
+@@ -7251,7 +7251,8 @@ NativeImageDumper::DumpMethodTable( PTR_MethodTable mt, const char * name,
+ {
+ DisplayStartElement( "Slot", ALWAYS );
+ DisplayWriteElementInt( "Index", i, ALWAYS );
+- PTR_PCODE tgt = mt->GetVtableIndirections()[i];
++ TADDR base = dac_cast<TADDR>(&(mt->GetVtableIndirections()[i]));
++ PTR_PCODE tgt = MethodTable::VTableIndir_t::GetValueMaybeNullAtPtr(base);
+ DisplayWriteElementPointer( "Pointer",
+ DataPtrToDisplay(dac_cast<TADDR>(tgt)),
+ ALWAYS );
+@@ -7287,7 +7288,7 @@ NativeImageDumper::DumpMethodTable( PTR_MethodTable mt, const char * name,
+ else
+ {
+ CoverageRead( PTR_TO_TADDR(mt) + mt->GetVtableOffset(),
+- mt->GetNumVtableIndirections() * sizeof(PTR_PCODE) );
++ mt->GetNumVtableIndirections() * sizeof(MethodTable::VTableIndir_t) );
+
+ if (mt->HasNonVirtualSlotsArray())
+ {
+diff --git a/src/inc/corinfo.h b/src/inc/corinfo.h
+index 58fcdf4..2b1d3a9 100644
+--- a/src/inc/corinfo.h
++++ b/src/inc/corinfo.h
+@@ -2067,7 +2067,8 @@ public:
+ virtual void getMethodVTableOffset (
+ CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+- unsigned* offsetAfterIndirection /* OUT */
++ unsigned* offsetAfterIndirection, /* OUT */
++ unsigned* isRelative /* OUT */
+ ) = 0;
+
+ // Find the virtual method in implementingClass that overrides virtualMethod,
+diff --git a/src/jit/ICorJitInfo_API_wrapper.hpp b/src/jit/ICorJitInfo_API_wrapper.hpp
+index a3ad211..8e0d1df 100644
+--- a/src/jit/ICorJitInfo_API_wrapper.hpp
++++ b/src/jit/ICorJitInfo_API_wrapper.hpp
+@@ -122,10 +122,11 @@ CORINFO_MODULE_HANDLE WrapICorJitInfo::getMethodModule(
+ void WrapICorJitInfo::getMethodVTableOffset(
+ CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+- unsigned* offsetAfterIndirection /* OUT */)
++ unsigned* offsetAfterIndirection, /* OUT */
++ unsigned* isRelative /* OUT */)
+ {
+ API_ENTER(getMethodVTableOffset);
+- wrapHnd->getMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection);
++ wrapHnd->getMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection, isRelative);
+ API_LEAVE(getMethodVTableOffset);
+ }
+
+diff --git a/src/jit/codegenlegacy.cpp b/src/jit/codegenlegacy.cpp
+index 178be54..a925c97 100644
+--- a/src/jit/codegenlegacy.cpp
++++ b/src/jit/codegenlegacy.cpp
+@@ -18886,35 +18886,68 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
+ // stub dispatching is off or this is not a virtual call (could be a tailcall)
+ {
+ regNumber vptrReg;
++ regNumber vptrReg1;
++ regMaskTP vptrMask1;
+ unsigned vtabOffsOfIndirection;
+ unsigned vtabOffsAfterIndirection;
++ unsigned isRelative;
+
+ noway_assert(callType == CT_USER_FUNC);
+
++ /* Get hold of the vtable offset (note: this might be expensive) */
++
++ compiler->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection,
++ &vtabOffsAfterIndirection, &isRelative);
++
+ vptrReg =
+ regSet.rsGrabReg(RBM_ALLINT); // Grab an available register to use for the CALL indirection
+ vptrMask = genRegMask(vptrReg);
+
++ if (isRelative)
++ {
++ vptrReg1 = regSet.rsGrabReg(RBM_ALLINT & ~vptrMask);
++ vptrMask1 = genRegMask(vptrReg1);
++ }
++
+ /* The register no longer holds a live pointer value */
+ gcInfo.gcMarkRegSetNpt(vptrMask);
+
++ if (isRelative)
++ {
++ gcInfo.gcMarkRegSetNpt(vptrMask1);
++ }
++
+ // MOV vptrReg, [REG_CALL_THIS + offs]
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, vptrReg, genGetThisArgReg(call),
+ VPTR_OFFS);
+ regTracker.rsTrackRegTrash(vptrReg);
+
+- noway_assert(vptrMask & ~call->gtCallRegUsedMask);
+-
+- /* Get hold of the vtable offset (note: this might be expensive) */
++ if (isRelative)
++ {
++ regTracker.rsTrackRegTrash(vptrReg1);
++ }
+
+- compiler->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection,
+- &vtabOffsAfterIndirection);
++ noway_assert(vptrMask & ~call->gtCallRegUsedMask);
+
+ /* Get the appropriate vtable chunk */
+
+ /* The register no longer holds a live pointer value */
+ gcInfo.gcMarkRegSetNpt(vptrMask);
+
++ /* Get the appropriate vtable chunk */
++
++ if (isRelative)
++ {
++#if defined(_TARGET_ARM_)
++ unsigned offset = vtabOffsOfIndirection + vtabOffsAfterIndirection;
++
++ // ADD vptrReg1, REG_CALL_IND_SCRATCH, vtabOffsOfIndirection + vtabOffsAfterIndirection
++ getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, vptrReg1, vptrReg, offset);
++#else
++ _ASSERTE(false);
++#endif
++ }
++
+ // MOV vptrReg, [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection]
+ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, vptrReg, vptrReg,
+ vtabOffsOfIndirection);
+@@ -18923,16 +18956,36 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
+
+ if (fTailCall)
+ {
+- /* Load the function address: "[vptrReg+vtabOffs] -> reg_intret" */
+-
+- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_TAILCALL_ADDR, vptrReg,
+- vtabOffsAfterIndirection);
++ if (isRelative)
++ {
++#if defined(_TARGET_ARM_)
++ /* Load the function address: "[vptrReg1 + vptrReg] -> reg_intret" */
++ getEmitter()->emitIns_R_ARR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_TAILCALL_ADDR, vptrReg1,
++ vptrReg, 0);
++#else
++ _ASSERTE(false);
++#endif
++ }
++ else
++ {
++ /* Load the function address: "[vptrReg+vtabOffs] -> reg_intret" */
++ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_TAILCALL_ADDR, vptrReg,
++ vtabOffsAfterIndirection);
++ }
+ }
+ else
+ {
+ #if CPU_LOAD_STORE_ARCH
+- getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, vptrReg, vptrReg,
+- vtabOffsAfterIndirection);
++ if (isRelative)
++ {
++ getEmitter()->emitIns_R_ARR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, vptrReg, vptrReg1, vptrReg,
++ 0);
++ }
++ else
++ {
++ getEmitter()->emitIns_R_AR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, vptrReg, vptrReg,
++ vtabOffsAfterIndirection);
++ }
+
+ getEmitter()->emitIns_Call(emitter::EC_INDIR_R, call->gtCallMethHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) NULL, // addr
+@@ -18940,6 +18993,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
+ gcInfo.gcRegByrefSetCur, ilOffset,
+ vptrReg); // ireg
+ #else
++ _ASSERTE(!isRelative);
+ getEmitter()->emitIns_Call(emitter::EC_FUNC_VIRTUAL, call->gtCallMethHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) NULL, // addr
+ args, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+diff --git a/src/jit/emitarm.cpp b/src/jit/emitarm.cpp
+index 9ec8e07..e765af7 100644
+--- a/src/jit/emitarm.cpp
++++ b/src/jit/emitarm.cpp
+@@ -2446,6 +2446,16 @@ void emitter::emitIns_R_R_I(instruction ins,
+ fmt = IF_T2_M0;
+ sf = INS_FLAGS_NOT_SET;
+ }
++ else if (insDoesNotSetFlags(flags) && reg1 != REG_SP && reg1 != REG_PC)
++ {
++ // movw,movt reg1, imm
++ codeGen->instGen_Set_Reg_To_Imm(attr, reg1, imm);
++
++ // ins reg1, reg2
++ emitIns_R_R(ins, attr, reg1, reg2);
++
++ return;
++ }
+ else
+ {
+ assert(!"Instruction cannot be encoded");
+diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp
+index 9cab5ee..d154f68 100644
+--- a/src/jit/lower.cpp
++++ b/src/jit/lower.cpp
+@@ -3401,6 +3401,13 @@ GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
+ // We'll introduce another use of this local so increase its ref count.
+ comp->lvaTable[lclNum].incRefCnts(comp->compCurBB->getBBWeight(comp), comp);
+
++ // Get hold of the vtable offset (note: this might be expensive)
++ unsigned vtabOffsOfIndirection;
++ unsigned vtabOffsAfterIndirection;
++ unsigned isRelative;
++ comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection,
++ &vtabOffsAfterIndirection, &isRelative);
++
+ // If the thisPtr is a local field, then construct a local field type node
+ GenTree* local;
+ if (thisPtr->isLclField())
+@@ -3416,19 +3423,40 @@ GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
+ // pointer to virtual table = [REG_CALL_THIS + offs]
+ GenTree* result = Ind(Offset(local, VPTR_OFFS));
+
+- // Get hold of the vtable offset (note: this might be expensive)
+- unsigned vtabOffsOfIndirection;
+- unsigned vtabOffsAfterIndirection;
+- comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection,
+- &vtabOffsAfterIndirection);
+-
+ // Get the appropriate vtable chunk
+- // result = [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection]
+- result = Ind(Offset(result, vtabOffsOfIndirection));
++ if (isRelative)
++ {
++ unsigned lclNumTmp = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp"));
++
++ comp->lvaTable[lclNumTmp].incRefCnts(comp->compCurBB->getBBWeight(comp), comp);
++ GenTree* lclvNodeStore = comp->gtNewTempAssign(lclNumTmp, result);
++
++ LIR::Range range = LIR::SeqTree(comp, lclvNodeStore);
++ JITDUMP("results of lowering call interm:\n");
++ DISPRANGE(range);
++ BlockRange().InsertBefore(call, std::move(range));
++
++ GenTree* tmpTree = comp->gtNewLclvNode(lclNumTmp, result->TypeGet());
++ tmpTree = Offset(tmpTree, vtabOffsOfIndirection);
++
++ tmpTree = comp->gtNewOperNode(GT_IND, TYP_I_IMPL, tmpTree, false);
++ GenTree* offs = comp->gtNewIconNode(vtabOffsOfIndirection + vtabOffsAfterIndirection, TYP_INT);
++ result = comp->gtNewOperNode(GT_ADD, TYP_I_IMPL, comp->gtNewLclvNode(lclNumTmp, result->TypeGet()), offs);
++
++ result = Ind(OffsetByIndex(result, tmpTree));
++ }
++ else
++ {
++ // result = [REG_CALL_IND_SCRATCH + vtabOffsOfIndirection]
++ result = Ind(Offset(result, vtabOffsOfIndirection));
++ }
+
+ // Load the function address
+ // result = [reg+vtabOffs]
+- result = Ind(Offset(result, vtabOffsAfterIndirection));
++ if (!isRelative)
++ {
++ result = Ind(Offset(result, vtabOffsAfterIndirection));
++ }
+
+ return result;
+ }
+diff --git a/src/jit/lower.h b/src/jit/lower.h
+index 5a55d2d..92d9cfe 100644
+--- a/src/jit/lower.h
++++ b/src/jit/lower.h
+@@ -120,6 +120,12 @@ private:
+ return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, nullptr, 0, offset);
+ }
+
++ GenTree* OffsetByIndex(GenTree* base, GenTree* index)
++ {
++ var_types resultType = (base->TypeGet() == TYP_REF) ? TYP_BYREF : base->TypeGet();
++ return new (comp, GT_LEA) GenTreeAddrMode(resultType, base, index, 0, 0);
++ }
++
+ // returns true if the tree can use the read-modify-write memory instruction form
+ bool isRMWRegOper(GenTreePtr tree);
+
+diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
+index 3475889..c5d1ff2 100644
+--- a/src/jit/morph.cpp
++++ b/src/jit/morph.cpp
+@@ -7116,13 +7116,29 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
+
+ unsigned vtabOffsOfIndirection;
+ unsigned vtabOffsAfterIndirection;
+- info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection);
++ unsigned isRelative;
++ info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection,
++ &isRelative);
+
+ /* Get the appropriate vtable chunk */
+
+- add = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtbl, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL));
++ add = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtbl, gtNewIconNode(vtabOffsOfIndirection, TYP_I_IMPL));
++
++ GenTreePtr indOffTree;
++
++ if (isRelative)
++ {
++ indOffTree = impCloneExpr(add, &add, NO_CLASS_HANDLE, (unsigned)CHECK_SPILL_ALL,
++ nullptr DEBUGARG("virtual table call"));
++ }
++
+ vtbl = gtNewOperNode(GT_IND, TYP_I_IMPL, add);
+
++ if (isRelative)
++ {
++ vtbl = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtbl, indOffTree);
++ }
++
+ /* Now the appropriate vtable slot */
+
+ add = gtNewOperNode(GT_ADD, TYP_I_IMPL, vtbl, gtNewIconNode(vtabOffsAfterIndirection, TYP_I_IMPL));
+diff --git a/src/vm/array.cpp b/src/vm/array.cpp
+index 3f5a8aa..3a33aff 100644
+--- a/src/vm/array.cpp
++++ b/src/vm/array.cpp
+@@ -310,7 +310,7 @@ MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementTy
+ DWORD numNonVirtualSlots = numCtors + 3; // 3 for the proper rank Get, Set, Address
+
+ size_t cbMT = sizeof(MethodTable);
+- cbMT += MethodTable::GetNumVtableIndirections(numVirtuals) * sizeof(PTR_PCODE);
++ cbMT += MethodTable::GetNumVtableIndirections(numVirtuals) * sizeof(MethodTable::VTableIndir_t);
+
+ // GC info
+ size_t cbCGCDescData = 0;
+@@ -539,7 +539,7 @@ MethodTable* Module::CreateArrayMethodTable(TypeHandle elemTypeHnd, CorElementTy
+ if (canShareVtableChunks)
+ {
+ // Share the parent chunk
+- it.SetIndirectionSlot(pParentClass->GetVtableIndirections()[it.GetIndex()]);
++ it.SetIndirectionSlot(pParentClass->GetVtableIndirections()[it.GetIndex()].GetValueMaybeNull());
+ }
+ else
+ {
+diff --git a/src/vm/generics.cpp b/src/vm/generics.cpp
+index 650caef..b110184 100644
+--- a/src/vm/generics.cpp
++++ b/src/vm/generics.cpp
+@@ -255,7 +255,7 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
+
+ // Bytes are required for the vtable itself
+ S_SIZE_T safe_cbMT = S_SIZE_T( cbGC ) + S_SIZE_T( sizeof(MethodTable) );
+- safe_cbMT += MethodTable::GetNumVtableIndirections(cSlots) * sizeof(PTR_PCODE);
++ safe_cbMT += MethodTable::GetNumVtableIndirections(cSlots) * sizeof(MethodTable::VTableIndir_t);
+ if (safe_cbMT.IsOverflow())
+ {
+ ThrowHR(COR_E_OVERFLOW);
+@@ -440,7 +440,7 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
+ if (canShareVtableChunks)
+ {
+ // Share the canonical chunk
+- it.SetIndirectionSlot(pOldMT->GetVtableIndirections()[it.GetIndex()]);
++ it.SetIndirectionSlot(pOldMT->GetVtableIndirections()[it.GetIndex()].GetValueMaybeNull());
+ }
+ else
+ {
+diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
+index e0adf87..72f4131 100644
+--- a/src/vm/jitinterface.cpp
++++ b/src/vm/jitinterface.cpp
+@@ -8724,7 +8724,8 @@ CONTRACTL {
+ /*********************************************************************/
+ void CEEInfo::getMethodVTableOffset (CORINFO_METHOD_HANDLE methodHnd,
+ unsigned * pOffsetOfIndirection,
+- unsigned * pOffsetAfterIndirection)
++ unsigned * pOffsetAfterIndirection,
++ unsigned * isRelative)
+ {
+ CONTRACTL {
+ SO_TOLERANT;
+@@ -8745,8 +8746,9 @@ void CEEInfo::getMethodVTableOffset (CORINFO_METHOD_HANDLE methodHnd,
+ // better be in the vtable
+ _ASSERTE(method->GetSlot() < method->GetMethodTable()->GetNumVirtuals());
+
+- *pOffsetOfIndirection = MethodTable::GetVtableOffset() + MethodTable::GetIndexOfVtableIndirection(method->GetSlot()) * sizeof(PTR_PCODE);
++ *pOffsetOfIndirection = MethodTable::GetVtableOffset() + MethodTable::GetIndexOfVtableIndirection(method->GetSlot()) * sizeof(MethodTable::VTableIndir_t);
+ *pOffsetAfterIndirection = MethodTable::GetIndexAfterVtableIndirection(method->GetSlot()) * sizeof(PCODE);
++ *isRelative = MethodTable::VTableIndir_t::isRelative ? 1 : 0;
+
+ EE_TO_JIT_TRANSITION_LEAF();
+ }
+diff --git a/src/vm/jitinterface.h b/src/vm/jitinterface.h
+index d67cfc5..cf1097c 100644
+--- a/src/vm/jitinterface.h
++++ b/src/vm/jitinterface.h
+@@ -727,8 +727,8 @@ public:
+ void getMethodVTableOffset (
+ CORINFO_METHOD_HANDLE methodHnd,
+ unsigned * pOffsetOfIndirection,
+- unsigned * pOffsetAfterIndirection
+- );
++ unsigned * pOffsetAfterIndirection,
++ unsigned * isRelative);
+
+ CORINFO_METHOD_HANDLE resolveVirtualMethod(
+ CORINFO_METHOD_HANDLE virtualMethod,
+diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
+index 4c1746e..75db911 100644
+--- a/src/vm/methodtable.cpp
++++ b/src/vm/methodtable.cpp
+@@ -4915,7 +4915,14 @@ void MethodTable::Fixup(DataImage *image)
+ VtableIndirectionSlotIterator it = IterateVtableIndirectionSlots();
+ while (it.Next())
+ {
+- image->FixupPointerField(this, it.GetOffsetFromMethodTable());
++ if (VTableIndir_t::isRelative)
++ {
++ image->FixupRelativePointerField(this, it.GetOffsetFromMethodTable());
++ }
++ else
++ {
++ image->FixupPointerField(this, it.GetOffsetFromMethodTable());
++ }
+ }
+ }
+
+@@ -4936,7 +4943,7 @@ void MethodTable::Fixup(DataImage *image)
+ {
+ // Virtual slots live in chunks pointed to by vtable indirections
+
+- slotBase = (PVOID) GetVtableIndirections()[GetIndexOfVtableIndirection(slotNumber)];
++ slotBase = (PVOID) GetVtableIndirections()[GetIndexOfVtableIndirection(slotNumber)].GetValueMaybeNull();
+ slotOffset = GetIndexAfterVtableIndirection(slotNumber) * sizeof(PCODE);
+ }
+ else if (HasSingleNonVirtualSlot())
+@@ -9414,13 +9421,13 @@ void MethodTable::SetSlot(UINT32 slotNumber, PCODE slotCode)
+
+ if (!IsCanonicalMethodTable())
+ {
+- if (GetVtableIndirections()[indirectionIndex] == GetCanonicalMethodTable()->GetVtableIndirections()[indirectionIndex])
++ if (GetVtableIndirections()[indirectionIndex].GetValueMaybeNull() == GetCanonicalMethodTable()->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull())
+ fSharedVtableChunk = TRUE;
+ }
+
+ if (slotNumber < GetNumParentVirtuals())
+ {
+- if (GetVtableIndirections()[indirectionIndex] == GetParentMethodTable()->GetVtableIndirections()[indirectionIndex])
++ if (GetVtableIndirections()[indirectionIndex].GetValueMaybeNull() == GetParentMethodTable()->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull())
+ fSharedVtableChunk = TRUE;
+ }
+
+diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
+index 81a9186..8c15d2e 100644
+--- a/src/vm/methodtable.h
++++ b/src/vm/methodtable.h
+@@ -1514,7 +1514,10 @@ public:
+
+ CONSISTENCY_CHECK(slotNum < GetNumVirtuals());
+ // Virtual slots live in chunks pointed to by vtable indirections
+- return *(GetVtableIndirections()[GetIndexOfVtableIndirection(slotNum)] + GetIndexAfterVtableIndirection(slotNum));
++
++ DWORD index = GetIndexOfVtableIndirection(slotNum);
++ TADDR base = dac_cast<TADDR>(&(GetVtableIndirections()[index]));
++ return *(VTableIndir_t::GetValueMaybeNullAtPtr(base) + GetIndexAfterVtableIndirection(slotNum));
+ }
+
+ PTR_PCODE GetSlotPtrRaw(UINT32 slotNum)
+@@ -1526,7 +1529,9 @@ public:
+ if (slotNum < GetNumVirtuals())
+ {
+ // Virtual slots live in chunks pointed to by vtable indirections
+- return GetVtableIndirections()[GetIndexOfVtableIndirection(slotNum)] + GetIndexAfterVtableIndirection(slotNum);
++ DWORD index = GetIndexOfVtableIndirection(slotNum);
++ TADDR base = dac_cast<TADDR>(&(GetVtableIndirections()[index]));
++ return VTableIndir_t::GetValueMaybeNullAtPtr(base) + GetIndexAfterVtableIndirection(slotNum);
+ }
+ else if (HasSingleNonVirtualSlot())
+ {
+@@ -1610,12 +1615,18 @@ public:
+ #define VTABLE_SLOTS_PER_CHUNK 8
+ #define VTABLE_SLOTS_PER_CHUNK_LOG2 3
+
++#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
++ typedef RelativePointer<PTR_PCODE> VTableIndir_t;
++#else
++ typedef PlainPointer<PTR_PCODE> VTableIndir_t;
++#endif
++
+ static DWORD GetIndexOfVtableIndirection(DWORD slotNum);
+ static DWORD GetStartSlotForVtableIndirection(UINT32 indirectionIndex, DWORD wNumVirtuals);
+ static DWORD GetEndSlotForVtableIndirection(UINT32 indirectionIndex, DWORD wNumVirtuals);
+ static UINT32 GetIndexAfterVtableIndirection(UINT32 slotNum);
+ static DWORD GetNumVtableIndirections(DWORD wNumVirtuals);
+- PTR_PTR_PCODE GetVtableIndirections();
++ DPTR(VTableIndir_t) GetVtableIndirections();
+ DWORD GetNumVtableIndirections();
+
+ class VtableIndirectionSlotIterator
+@@ -1623,7 +1634,7 @@ public:
+ friend class MethodTable;
+
+ private:
+- PTR_PTR_PCODE m_pSlot;
++ DPTR(VTableIndir_t) m_pSlot;
+ DWORD m_i;
+ DWORD m_count;
+ PTR_MethodTable m_pMT;
+diff --git a/src/vm/methodtable.inl b/src/vm/methodtable.inl
+index b69513d..0d0acda 100644
+--- a/src/vm/methodtable.inl
++++ b/src/vm/methodtable.inl
+@@ -887,10 +887,10 @@ inline DWORD MethodTable::GetNumVtableIndirections(DWORD wNumVirtuals)
+ }
+
+ //==========================================================================================
+-inline PTR_PTR_PCODE MethodTable::GetVtableIndirections()
++inline DPTR(MethodTable::VTableIndir_t) MethodTable::GetVtableIndirections()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+- return dac_cast<PTR_PTR_PCODE>(dac_cast<TADDR>(this) + sizeof(MethodTable));
++ return dac_cast<DPTR(VTableIndir_t)>(dac_cast<TADDR>(this) + sizeof(MethodTable));
+ }
+
+ //==========================================================================================
+@@ -952,7 +952,7 @@ inline DWORD MethodTable::VtableIndirectionSlotIterator::GetOffsetFromMethodTabl
+ WRAPPER_NO_CONTRACT;
+ PRECONDITION(m_i != (DWORD) -1 && m_i < m_count);
+
+- return GetVtableOffset() + sizeof(PTR_PCODE) * m_i;
++ return GetVtableOffset() + sizeof(VTableIndir_t) * m_i;
+ }
+
+ //==========================================================================================
+@@ -961,7 +961,7 @@ inline PTR_PCODE MethodTable::VtableIndirectionSlotIterator::GetIndirectionSlot(
+ LIMITED_METHOD_DAC_CONTRACT;
+ PRECONDITION(m_i != (DWORD) -1 && m_i < m_count);
+
+- return *m_pSlot;
++ return m_pSlot->GetValueMaybeNull(dac_cast<TADDR>(m_pSlot));
+ }
+
+ //==========================================================================================
+@@ -969,7 +969,7 @@ inline PTR_PCODE MethodTable::VtableIndirectionSlotIterator::GetIndirectionSlot(
+ inline void MethodTable::VtableIndirectionSlotIterator::SetIndirectionSlot(PTR_PCODE pChunk)
+ {
+ LIMITED_METHOD_CONTRACT;
+- *m_pSlot = pChunk;
++ m_pSlot->SetValueMaybeNull(pChunk);
+ }
+ #endif
+
+@@ -1355,7 +1355,7 @@ FORCEINLINE TADDR MethodTable::GetMultipurposeSlotPtr(WFLAGS2_ENUM flag, const B
+ DWORD offset = offsets[GetFlag((WFLAGS2_ENUM)(flag - 1))];
+
+ if (offset >= sizeof(MethodTable)) {
+- offset += GetNumVtableIndirections() * sizeof(PTR_PCODE);
++ offset += GetNumVtableIndirections() * sizeof(VTableIndir_t);
+ }
+
+ return dac_cast<TADDR>(this) + offset;
+@@ -1370,7 +1370,7 @@ FORCEINLINE DWORD MethodTable::GetOffsetOfOptionalMember(OptionalMemberId id)
+
+ DWORD offset = c_OptionalMembersStartOffsets[GetFlag(enum_flag_MultipurposeSlotsMask)];
+
+- offset += GetNumVtableIndirections() * sizeof(PTR_PCODE);
++ offset += GetNumVtableIndirections() * sizeof(VTableIndir_t);
+
+ #undef METHODTABLE_OPTIONAL_MEMBER
+ #define METHODTABLE_OPTIONAL_MEMBER(NAME, TYPE, GETTER) \
+diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
+index 792a19c..970166d 100644
+--- a/src/vm/methodtablebuilder.cpp
++++ b/src/vm/methodtablebuilder.cpp
+@@ -9111,7 +9111,7 @@ void MethodTableBuilder::CopyExactParentSlots(MethodTable *pMT, MethodTable *pAp
+ //
+ // Non-canonical method tables either share everything or nothing so it is sufficient to check
+ // just the first indirection to detect sharing.
+- if (pMT->GetVtableIndirections()[0] != pCanonMT->GetVtableIndirections()[0])
++ if (pMT->GetVtableIndirections()[0].GetValueMaybeNull() != pCanonMT->GetVtableIndirections()[0].GetValueMaybeNull())
+ {
+ for (DWORD i = 0; i < nParentVirtuals; i++)
+ {
+@@ -9138,7 +9138,7 @@ void MethodTableBuilder::CopyExactParentSlots(MethodTable *pMT, MethodTable *pAp
+ // We need to re-inherit this slot from the exact parent.
+
+ DWORD indirectionIndex = MethodTable::GetIndexOfVtableIndirection(i);
+- if (pMT->GetVtableIndirections()[indirectionIndex] == pApproxParentMT->GetVtableIndirections()[indirectionIndex])
++ if (pMT->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull() == pApproxParentMT->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull())
+ {
+ // The slot lives in a chunk shared from the approximate parent MT
+ // If so, we need to change to share the chunk from the exact parent MT
+@@ -9149,7 +9149,7 @@ void MethodTableBuilder::CopyExactParentSlots(MethodTable *pMT, MethodTable *pAp
+ _ASSERTE(MethodTable::CanShareVtableChunksFrom(pParentMT, pMT->GetLoaderModule()));
+ #endif
+
+- pMT->GetVtableIndirections()[indirectionIndex] = pParentMT->GetVtableIndirections()[indirectionIndex];
++ pMT->GetVtableIndirections()[indirectionIndex].SetValueMaybeNull(pParentMT->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull());
+
+ i = MethodTable::GetEndSlotForVtableIndirection(indirectionIndex, nParentVirtuals) - 1;
+ continue;
+@@ -10006,7 +10006,7 @@ MethodTable * MethodTableBuilder::AllocateNewMT(Module *pLoaderModule,
+ S_SIZE_T cbTotalSize = S_SIZE_T(dwGCSize) + S_SIZE_T(sizeof(MethodTable));
+
+ // vtable
+- cbTotalSize += MethodTable::GetNumVtableIndirections(dwVirtuals) * sizeof(PTR_PCODE);
++ cbTotalSize += MethodTable::GetNumVtableIndirections(dwVirtuals) * sizeof(MethodTable::VTableIndir_t);
+
+
+ DWORD dwMultipurposeSlotsMask = 0;
+@@ -10155,7 +10155,7 @@ MethodTable * MethodTableBuilder::AllocateNewMT(Module *pLoaderModule,
+ {
+ // Share the parent chunk
+ _ASSERTE(it.GetEndSlot() <= pMTParent->GetNumVirtuals());
+- it.SetIndirectionSlot(pMTParent->GetVtableIndirections()[it.GetIndex()]);
++ it.SetIndirectionSlot(pMTParent->GetVtableIndirections()[it.GetIndex()].GetValueMaybeNull());
+ }
+ else
+ {
+@@ -10711,7 +10711,7 @@ MethodTableBuilder::SetupMethodTable2(
+ // with code:MethodDesc::SetStableEntryPointInterlocked.
+ //
+ DWORD indirectionIndex = MethodTable::GetIndexOfVtableIndirection(iCurSlot);
+- if (GetParentMethodTable()->GetVtableIndirections()[indirectionIndex] != pMT->GetVtableIndirections()[indirectionIndex])
++ if (GetParentMethodTable()->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull() != pMT->GetVtableIndirections()[indirectionIndex].GetValueMaybeNull())
+ pMT->SetSlot(iCurSlot, pMD->GetMethodEntryPoint());
+ }
+ else
+diff --git a/src/zap/zapinfo.cpp b/src/zap/zapinfo.cpp
+index e94dea6..507cc25 100644
+--- a/src/zap/zapinfo.cpp
++++ b/src/zap/zapinfo.cpp
+@@ -3708,10 +3708,11 @@ CORINFO_MODULE_HANDLE ZapInfo::getMethodModule(CORINFO_METHOD_HANDLE method)
+ }
+
+ void ZapInfo::getMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+- unsigned * pOffsetOfIndirection,
+- unsigned * pOffsetAfterIndirection)
++ unsigned * pOffsetOfIndirection,
++ unsigned * pOffsetAfterIndirection,
++ unsigned * isRelative)
+ {
+- m_pEEJitInfo->getMethodVTableOffset(method, pOffsetOfIndirection, pOffsetAfterIndirection);
++ m_pEEJitInfo->getMethodVTableOffset(method, pOffsetOfIndirection, pOffsetAfterIndirection, isRelative);
+ }
+
+ CORINFO_METHOD_HANDLE ZapInfo::resolveVirtualMethod(
+diff --git a/src/zap/zapinfo.h b/src/zap/zapinfo.h
+index 6e83657..65c1ddd 100644
+--- a/src/zap/zapinfo.h
++++ b/src/zap/zapinfo.h
+@@ -663,7 +663,8 @@ public:
+
+ void getMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ unsigned * pOffsetOfIndirection,
+- unsigned * pOffsetAfterIndirection);
++ unsigned * pOffsetAfterIndirection,
++ unsigned * isRelative);
+
+ CORINFO_METHOD_HANDLE resolveVirtualMethod(
+ CORINFO_METHOD_HANDLE virtualMethod,
+--
+2.7.4
+
diff --git a/packaging/0026-Move-ITEM_DICTIONARY-and-ITEM_VTABLE_CHUNK-to-separa.patch b/packaging/0026-Move-ITEM_DICTIONARY-and-ITEM_VTABLE_CHUNK-to-separa.patch
new file mode 100644
index 0000000000..f89e386445
--- /dev/null
+++ b/packaging/0026-Move-ITEM_DICTIONARY-and-ITEM_VTABLE_CHUNK-to-separa.patch
@@ -0,0 +1,64 @@
+From ca8da76187c4470b951dc9a1971b1918e99242bf Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Fri, 23 Jun 2017 15:58:45 +0300
+Subject: [PATCH 26/32] Move ITEM_DICTIONARY and ITEM_VTABLE_CHUNK to separate
+ subsection of SECTION_Readonly
+
+---
+ src/inc/corcompile.h | 1 +
+ src/vm/dataimage.cpp | 6 ++++--
+ src/zap/zapimage.cpp | 1 +
+ 3 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/src/inc/corcompile.h b/src/inc/corcompile.h
+index 68eefc1..17fdfcb 100644
+--- a/src/inc/corcompile.h
++++ b/src/inc/corcompile.h
+@@ -1329,6 +1329,7 @@ class ICorCompilePreloader
+ CORCOMPILE_SECTION(READONLY_HOT) \
+ CORCOMPILE_SECTION(READONLY_WARM) \
+ CORCOMPILE_SECTION(READONLY_COLD) \
++ CORCOMPILE_SECTION(READONLY_VCHUNKS_AND_DICTIONARY) \
+ CORCOMPILE_SECTION(CLASS_COLD) \
+ CORCOMPILE_SECTION(CROSS_DOMAIN_INFO) \
+ CORCOMPILE_SECTION(METHOD_PRECODE_COLD) \
+diff --git a/src/vm/dataimage.cpp b/src/vm/dataimage.cpp
+index fc584d7..4e276fe 100644
+--- a/src/vm/dataimage.cpp
++++ b/src/vm/dataimage.cpp
+@@ -738,9 +738,7 @@ FORCEINLINE static CorCompileSection GetSectionForNodeType(ZapNodeType type)
+
+ // SECTION_READONLY_WARM
+ case NodeTypeForItemKind(DataImage::ITEM_METHOD_TABLE):
+- case NodeTypeForItemKind(DataImage::ITEM_VTABLE_CHUNK):
+ case NodeTypeForItemKind(DataImage::ITEM_INTERFACE_MAP):
+- case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY):
+ case NodeTypeForItemKind(DataImage::ITEM_DISPATCH_MAP):
+ case NodeTypeForItemKind(DataImage::ITEM_GENERICS_STATIC_FIELDDESCS):
+ case NodeTypeForItemKind(DataImage::ITEM_GC_STATIC_HANDLES_COLD):
+@@ -750,6 +748,10 @@ FORCEINLINE static CorCompileSection GetSectionForNodeType(ZapNodeType type)
+ case NodeTypeForItemKind(DataImage::ITEM_STORED_METHOD_SIG_READONLY_WARM):
+ return CORCOMPILE_SECTION_READONLY_WARM;
+
++ case NodeTypeForItemKind(DataImage::ITEM_DICTIONARY):
++ case NodeTypeForItemKind(DataImage::ITEM_VTABLE_CHUNK):
++ return CORCOMPILE_SECTION_READONLY_VCHUNKS_AND_DICTIONARY;
++
+ // SECTION_CLASS_COLD
+ case NodeTypeForItemKind(DataImage::ITEM_PARAM_TYPEDESC):
+ case NodeTypeForItemKind(DataImage::ITEM_ARRAY_TYPEDESC):
+diff --git a/src/zap/zapimage.cpp b/src/zap/zapimage.cpp
+index 61cf099..4c26946 100644
+--- a/src/zap/zapimage.cpp
++++ b/src/zap/zapimage.cpp
+@@ -572,6 +572,7 @@ void ZapImage::AllocateVirtualSections()
+ #endif // defined(WIN64EXCEPTIONS)
+
+ m_pPreloadSections[CORCOMPILE_SECTION_READONLY_WARM] = NewVirtualSection(pTextSection, IBCProfiledSection | WarmRange | ReadonlySection, sizeof(TADDR));
++ m_pPreloadSections[CORCOMPILE_SECTION_READONLY_VCHUNKS_AND_DICTIONARY] = NewVirtualSection(pTextSection, IBCProfiledSection | WarmRange | ReadonlySection, sizeof(TADDR));
+
+ //
+ // GC Info for methods which were not touched in profiling
+--
+2.7.4
+
diff --git a/packaging/0027-Update-GUID.patch b/packaging/0027-Update-GUID.patch
new file mode 100644
index 0000000000..c4489d5846
--- /dev/null
+++ b/packaging/0027-Update-GUID.patch
@@ -0,0 +1,33 @@
+From c7812b407b5d9370db61c0e54d414e58f17dc25d Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Fri, 23 Jun 2017 17:34:11 +0300
+Subject: [PATCH 27/32] Update GUID
+
+---
+ src/inc/corinfo.h | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/src/inc/corinfo.h b/src/inc/corinfo.h
+index 2b1d3a9..1489a74 100644
+--- a/src/inc/corinfo.h
++++ b/src/inc/corinfo.h
+@@ -213,11 +213,11 @@ TODO: Talk about initializing strutures before use
+ #define SELECTANY extern __declspec(selectany)
+ #endif
+
+-SELECTANY const GUID JITEEVersionIdentifier = { /* 28eb875f-b6a9-4a04-9ba7-69ba59deed46 */
+- 0x28eb875f,
+- 0xb6a9,
+- 0x4a04,
+- { 0x9b, 0xa7, 0x69, 0xba, 0x59, 0xde, 0xed, 0x46 }
++SELECTANY const GUID JITEEVersionIdentifier = { /* 5a1cfc89-a84a-4642-b01d-ead88e60c1ee */
++ 0x5a1cfc89,
++ 0xa84a,
++ 0x4642,
++ { 0xb0, 0x1d, 0xea, 0xd8, 0x8e, 0x60, 0xc1, 0xee }
+ };
+
+ //////////////////////////////////////////////////////////////////////////////////////////////////////////
+--
+2.7.4
+
diff --git a/packaging/0028-Review-fixes.patch b/packaging/0028-Review-fixes.patch
new file mode 100644
index 0000000000..b524b8af4e
--- /dev/null
+++ b/packaging/0028-Review-fixes.patch
@@ -0,0 +1,357 @@
+From 5718b54e2b069a20e0a9c5f2d15ff2a6d1ea1e72 Mon Sep 17 00:00:00 2001
+From: Gleb Balykov <g.balykov@samsung.com>
+Date: Mon, 7 Aug 2017 13:33:58 +0300
+Subject: [PATCH 28/32] Review fixes
+
+FIX: fix No.6, rebased
+---
+ src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h | 2 +-
+ src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp | 10 +++++-----
+ src/ToolBox/superpmi/superpmi-shared/methodcontext.h | 6 +++---
+ .../superpmi/superpmi-shim-collector/icorjitinfo.cpp | 2 +-
+ .../superpmi/superpmi-shim-counter/icorjitinfo.cpp | 2 +-
+ src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp | 2 +-
+ src/ToolBox/superpmi/superpmi/icorjitinfo.cpp | 2 +-
+ src/inc/corinfo.h | 2 +-
+ src/jit/ICorJitInfo_API_wrapper.hpp | 2 +-
+ src/jit/codegenlegacy.cpp | 8 ++++----
+ src/jit/emitarm.cpp | 6 +++---
+ src/jit/lower.cpp | 15 +++++++++++++--
+ src/jit/morph.cpp | 2 +-
+ src/vm/jitinterface.cpp | 2 +-
+ src/vm/jitinterface.h | 2 +-
+ src/zap/zapinfo.cpp | 2 +-
+ src/zap/zapinfo.h | 2 +-
+ 17 files changed, 40 insertions(+), 29 deletions(-)
+
+diff --git a/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h b/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h
+index 44b81aa..e38a66b 100644
+--- a/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h
++++ b/src/ToolBox/superpmi/superpmi-shared/icorjitinfoimpl.h
+@@ -110,7 +110,7 @@ CORINFO_MODULE_HANDLE getMethodModule(CORINFO_METHOD_HANDLE method);
+ void getMethodVTableOffset(CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+ unsigned* offsetAfterIndirection,/* OUT */
+- unsigned* isRelative /* OUT */
++ bool* isRelative /* OUT */
+ );
+
+ // Find the virtual method in implementingClass that overrides virtualMethod.
+diff --git a/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp b/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
+index f4130e9..bac6004 100644
+--- a/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
++++ b/src/ToolBox/superpmi/superpmi-shared/methodcontext.cpp
+@@ -3383,7 +3383,7 @@ void MethodContext::repGetEHinfo(CORINFO_METHOD_HANDLE ftn, unsigned EHnumber, C
+ void MethodContext::recGetMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ unsigned* offsetOfIndirection,
+ unsigned* offsetAfterIndirection,
+- unsigned* isRelative)
++ bool* isRelative)
+ {
+ if (GetMethodVTableOffset == nullptr)
+ GetMethodVTableOffset = new LightWeightMap<DWORDLONG, DDD>();
+@@ -3391,18 +3391,18 @@ void MethodContext::recGetMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ DDD value;
+ value.A = (DWORD)*offsetOfIndirection;
+ value.B = (DWORD)*offsetAfterIndirection;
+- value.C = (DWORD)*isRelative;
++ value.C = *isRelative;
+ GetMethodVTableOffset->Add((DWORDLONG)method, value);
+ DEBUG_REC(dmpGetMethodVTableOffset((DWORDLONG)method, value));
+ }
+ void MethodContext::dmpGetMethodVTableOffset(DWORDLONG key, DDD value)
+ {
+- printf("GetMethodVTableOffset key ftn-%016llX, value offi-%u, offa-%u", key, value.A, value.B);
++ printf("GetMethodVTableOffset key ftn-%016llX, value offi-%u, offa-%u. offr-%d", key, value.A, value.B, value.C);
+ }
+ void MethodContext::repGetMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ unsigned* offsetOfIndirection,
+ unsigned* offsetAfterIndirection,
+- unsigned* isRelative)
++ bool* isRelative)
+ {
+ DDD value;
+
+@@ -3414,7 +3414,7 @@ void MethodContext::repGetMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+
+ *offsetOfIndirection = (unsigned)value.A;
+ *offsetAfterIndirection = (unsigned)value.B;
+- *isRelative = (unsigned)value.C;
++ *isRelative = value.C;
+ DEBUG_REP(dmpGetMethodVTableOffset((DWORDLONG)method, value));
+ }
+
+diff --git a/src/ToolBox/superpmi/superpmi-shared/methodcontext.h b/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
+index a8612b5..70e04ea 100644
+--- a/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
++++ b/src/ToolBox/superpmi/superpmi-shared/methodcontext.h
+@@ -210,7 +210,7 @@ public:
+ {
+ DWORD A;
+ DWORD B;
+- DWORD C;
++ bool C;
+ };
+ struct Agnostic_CanTailCall
+ {
+@@ -781,12 +781,12 @@ public:
+ void recGetMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ unsigned* offsetOfIndirection,
+ unsigned* offsetAfterIndirection,
+- unsigned* isRelative);
++ bool* isRelative);
+ void dmpGetMethodVTableOffset(DWORDLONG key, DDD value);
+ void repGetMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ unsigned* offsetOfIndirection,
+ unsigned* offsetAfterIndirection,
+- unsigned* isRelative);
++ bool* isRelative);
+
+ void recResolveVirtualMethod(CORINFO_METHOD_HANDLE virtMethod,
+ CORINFO_CLASS_HANDLE implClass,
+diff --git a/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp b/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
+index 1f81883..a02c88d 100644
+--- a/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
++++ b/src/ToolBox/superpmi/superpmi-shim-collector/icorjitinfo.cpp
+@@ -215,7 +215,7 @@ CORINFO_MODULE_HANDLE interceptor_ICJI::getMethodModule(CORINFO_METHOD_HANDLE me
+ void interceptor_ICJI::getMethodVTableOffset(CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+ unsigned* offsetAfterIndirection,/* OUT */
+- unsigned* isRelative /* OUT */
++ bool* isRelative /* OUT */
+ )
+ {
+ mc->cr->AddCall("getMethodVTableOffset");
+diff --git a/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp b/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp
+index 5c2e784..5f02638 100644
+--- a/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp
++++ b/src/ToolBox/superpmi/superpmi-shim-counter/icorjitinfo.cpp
+@@ -146,7 +146,7 @@ CORINFO_MODULE_HANDLE interceptor_ICJI::getMethodModule(CORINFO_METHOD_HANDLE me
+ void interceptor_ICJI::getMethodVTableOffset(CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+ unsigned* offsetAfterIndirection,/* OUT */
+- unsigned* isRelative /* OUT */
++ bool* isRelative /* OUT */
+ )
+ {
+ mcs->AddCall("getMethodVTableOffset");
+diff --git a/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp b/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp
+index df223f4..5f89a87 100644
+--- a/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp
++++ b/src/ToolBox/superpmi/superpmi-shim-simple/icorjitinfo.cpp
+@@ -135,7 +135,7 @@ CORINFO_MODULE_HANDLE interceptor_ICJI::getMethodModule(CORINFO_METHOD_HANDLE me
+ void interceptor_ICJI::getMethodVTableOffset(CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+ unsigned* offsetAfterIndirection,/* OUT */
+- unsigned* isRelative /* OUT */
++ bool* isRelative /* OUT */
+ )
+ {
+ original_ICorJitInfo->getMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection, isRelative);
+diff --git a/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp b/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp
+index dc73a75..dd974d1 100644
+--- a/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp
++++ b/src/ToolBox/superpmi/superpmi/icorjitinfo.cpp
+@@ -166,7 +166,7 @@ CORINFO_MODULE_HANDLE MyICJI::getMethodModule(CORINFO_METHOD_HANDLE method)
+ void MyICJI::getMethodVTableOffset(CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+ unsigned* offsetAfterIndirection,/* OUT */
+- unsigned* isRelative /* OUT */
++ bool* isRelative /* OUT */
+ )
+ {
+ jitInstance->mc->cr->AddCall("getMethodVTableOffset");
+diff --git a/src/inc/corinfo.h b/src/inc/corinfo.h
+index 1489a74..63ade7f 100644
+--- a/src/inc/corinfo.h
++++ b/src/inc/corinfo.h
+@@ -2068,7 +2068,7 @@ public:
+ CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+ unsigned* offsetAfterIndirection, /* OUT */
+- unsigned* isRelative /* OUT */
++ bool* isRelative /* OUT */
+ ) = 0;
+
+ // Find the virtual method in implementingClass that overrides virtualMethod,
+diff --git a/src/jit/ICorJitInfo_API_wrapper.hpp b/src/jit/ICorJitInfo_API_wrapper.hpp
+index 8e0d1df..0d3bf5e 100644
+--- a/src/jit/ICorJitInfo_API_wrapper.hpp
++++ b/src/jit/ICorJitInfo_API_wrapper.hpp
+@@ -123,7 +123,7 @@ void WrapICorJitInfo::getMethodVTableOffset(
+ CORINFO_METHOD_HANDLE method, /* IN */
+ unsigned* offsetOfIndirection, /* OUT */
+ unsigned* offsetAfterIndirection, /* OUT */
+- unsigned* isRelative /* OUT */)
++ bool* isRelative /* OUT */)
+ {
+ API_ENTER(getMethodVTableOffset);
+ wrapHnd->getMethodVTableOffset(method, offsetOfIndirection, offsetAfterIndirection, isRelative);
+diff --git a/src/jit/codegenlegacy.cpp b/src/jit/codegenlegacy.cpp
+index a925c97..53c8f8d 100644
+--- a/src/jit/codegenlegacy.cpp
++++ b/src/jit/codegenlegacy.cpp
+@@ -18890,7 +18890,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
+ regMaskTP vptrMask1;
+ unsigned vtabOffsOfIndirection;
+ unsigned vtabOffsAfterIndirection;
+- unsigned isRelative;
++ bool isRelative;
+
+ noway_assert(callType == CT_USER_FUNC);
+
+@@ -18944,7 +18944,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
+ // ADD vptrReg1, REG_CALL_IND_SCRATCH, vtabOffsOfIndirection + vtabOffsAfterIndirection
+ getEmitter()->emitIns_R_R_I(INS_add, EA_PTRSIZE, vptrReg1, vptrReg, offset);
+ #else
+- _ASSERTE(false);
++ unreached();
+ #endif
+ }
+
+@@ -18963,7 +18963,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
+ getEmitter()->emitIns_R_ARR(ins_Load(TYP_I_IMPL), EA_PTRSIZE, REG_TAILCALL_ADDR, vptrReg1,
+ vptrReg, 0);
+ #else
+- _ASSERTE(false);
++ unreached();
+ #endif
+ }
+ else
+@@ -18993,7 +18993,7 @@ regMaskTP CodeGen::genCodeForCall(GenTreeCall* call, bool valUsed)
+ gcInfo.gcRegByrefSetCur, ilOffset,
+ vptrReg); // ireg
+ #else
+- _ASSERTE(!isRelative);
++ assert(!isRelative);
+ getEmitter()->emitIns_Call(emitter::EC_FUNC_VIRTUAL, call->gtCallMethHnd,
+ INDEBUG_LDISASM_COMMA(sigInfo) NULL, // addr
+ args, retSize, gcInfo.gcVarPtrSetCur, gcInfo.gcRegGCrefSetCur,
+diff --git a/src/jit/emitarm.cpp b/src/jit/emitarm.cpp
+index e765af7..ba89c9b 100644
+--- a/src/jit/emitarm.cpp
++++ b/src/jit/emitarm.cpp
+@@ -2446,13 +2446,13 @@ void emitter::emitIns_R_R_I(instruction ins,
+ fmt = IF_T2_M0;
+ sf = INS_FLAGS_NOT_SET;
+ }
+- else if (insDoesNotSetFlags(flags) && reg1 != REG_SP && reg1 != REG_PC)
++ else if (insDoesNotSetFlags(flags) && (reg1 != REG_SP) && (reg1 != REG_PC))
+ {
+ // movw,movt reg1, imm
+- codeGen->instGen_Set_Reg_To_Imm(attr, reg1, imm);
++ codeGen->instGen_Set_Reg_To_Imm(attr, reg1, (ins == INS_sub ? -1 : 1) * imm);
+
+ // ins reg1, reg2
+- emitIns_R_R(ins, attr, reg1, reg2);
++ emitIns_R_R(INS_add, attr, reg1, reg2);
+
+ return;
+ }
+diff --git a/src/jit/lower.cpp b/src/jit/lower.cpp
+index d154f68..c06dcb6 100644
+--- a/src/jit/lower.cpp
++++ b/src/jit/lower.cpp
+@@ -3404,7 +3404,7 @@ GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
+ // Get hold of the vtable offset (note: this might be expensive)
+ unsigned vtabOffsOfIndirection;
+ unsigned vtabOffsAfterIndirection;
+- unsigned isRelative;
++ bool isRelative;
+ comp->info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection,
+ &vtabOffsAfterIndirection, &isRelative);
+
+@@ -3426,13 +3426,24 @@ GenTree* Lowering::LowerVirtualVtableCall(GenTreeCall* call)
+ // Get the appropriate vtable chunk
+ if (isRelative)
+ {
++ // MethodTable offset is a relative pointer.
++ //
++ // Additional temporary variable is used to store virtual table pointer.
++ // Address of method is obtained by the next computations:
++ //
++ // Save relative offset to tmp (vtab is virtual table pointer, vtabOffsOfIndirection is offset of
++ // vtable-1st-level-indirection):
++ // tmp = [vtab + vtabOffsOfIndirection]
++ //
++ // Save address of method to result (vtabOffsAfterIndirection is offset of vtable-2nd-level-indirection):
++ // result = [vtab + vtabOffsOfIndirection + vtabOffsAfterIndirection + tmp]
+ unsigned lclNumTmp = comp->lvaGrabTemp(true DEBUGARG("lclNumTmp"));
+
+ comp->lvaTable[lclNumTmp].incRefCnts(comp->compCurBB->getBBWeight(comp), comp);
+ GenTree* lclvNodeStore = comp->gtNewTempAssign(lclNumTmp, result);
+
+ LIR::Range range = LIR::SeqTree(comp, lclvNodeStore);
+- JITDUMP("results of lowering call interm:\n");
++ JITDUMP("result of obtaining pointer to virtual table:\n");
+ DISPRANGE(range);
+ BlockRange().InsertBefore(call, std::move(range));
+
+diff --git a/src/jit/morph.cpp b/src/jit/morph.cpp
+index c5d1ff2..79b3fef 100644
+--- a/src/jit/morph.cpp
++++ b/src/jit/morph.cpp
+@@ -7116,7 +7116,7 @@ void Compiler::fgMorphTailCall(GenTreeCall* call)
+
+ unsigned vtabOffsOfIndirection;
+ unsigned vtabOffsAfterIndirection;
+- unsigned isRelative;
++ bool isRelative;
+ info.compCompHnd->getMethodVTableOffset(call->gtCallMethHnd, &vtabOffsOfIndirection, &vtabOffsAfterIndirection,
+ &isRelative);
+
+diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
+index 72f4131..9cefd10 100644
+--- a/src/vm/jitinterface.cpp
++++ b/src/vm/jitinterface.cpp
+@@ -8725,7 +8725,7 @@ CONTRACTL {
+ void CEEInfo::getMethodVTableOffset (CORINFO_METHOD_HANDLE methodHnd,
+ unsigned * pOffsetOfIndirection,
+ unsigned * pOffsetAfterIndirection,
+- unsigned * isRelative)
++ bool * isRelative)
+ {
+ CONTRACTL {
+ SO_TOLERANT;
+diff --git a/src/vm/jitinterface.h b/src/vm/jitinterface.h
+index cf1097c..a906a0f 100644
+--- a/src/vm/jitinterface.h
++++ b/src/vm/jitinterface.h
+@@ -728,7 +728,7 @@ public:
+ CORINFO_METHOD_HANDLE methodHnd,
+ unsigned * pOffsetOfIndirection,
+ unsigned * pOffsetAfterIndirection,
+- unsigned * isRelative);
++ bool * isRelative);
+
+ CORINFO_METHOD_HANDLE resolveVirtualMethod(
+ CORINFO_METHOD_HANDLE virtualMethod,
+diff --git a/src/zap/zapinfo.cpp b/src/zap/zapinfo.cpp
+index 507cc25..19247dd 100644
+--- a/src/zap/zapinfo.cpp
++++ b/src/zap/zapinfo.cpp
+@@ -3710,7 +3710,7 @@ CORINFO_MODULE_HANDLE ZapInfo::getMethodModule(CORINFO_METHOD_HANDLE method)
+ void ZapInfo::getMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ unsigned * pOffsetOfIndirection,
+ unsigned * pOffsetAfterIndirection,
+- unsigned * isRelative)
++ bool * isRelative)
+ {
+ m_pEEJitInfo->getMethodVTableOffset(method, pOffsetOfIndirection, pOffsetAfterIndirection, isRelative);
+ }
+diff --git a/src/zap/zapinfo.h b/src/zap/zapinfo.h
+index 65c1ddd..afa50c7 100644
+--- a/src/zap/zapinfo.h
++++ b/src/zap/zapinfo.h
+@@ -664,7 +664,7 @@ public:
+ void getMethodVTableOffset(CORINFO_METHOD_HANDLE method,
+ unsigned * pOffsetOfIndirection,
+ unsigned * pOffsetAfterIndirection,
+- unsigned * isRelative);
++ bool * isRelative);
+
+ CORINFO_METHOD_HANDLE resolveVirtualMethod(
+ CORINFO_METHOD_HANDLE virtualMethod,
+--
+2.7.4
+
diff --git a/packaging/0029-Allocate-FileMappingImmutableData-szFileName-and-CFi.patch b/packaging/0029-Allocate-FileMappingImmutableData-szFileName-and-CFi.patch
new file mode 100644
index 0000000000..0d51058234
--- /dev/null
+++ b/packaging/0029-Allocate-FileMappingImmutableData-szFileName-and-CFi.patch
@@ -0,0 +1,1606 @@
+From ed41e9b578a7e888031896562ac0ce5657dc58f7 Mon Sep 17 00:00:00 2001
+From: gbalykov <g.balykov@samsung.com>
+Date: Tue, 15 Aug 2017 19:15:47 +0300
+Subject: [PATCH 29/32] Allocate FileMappingImmutableData::szFileName and
+ CFileProcessLocalData::unix_filename strings dynamically (#13374)
+
+---
+ src/pal/inc/pal.h | 13 --
+ src/pal/src/cruntime/filecrt.cpp | 2 +-
+ src/pal/src/file/file.cpp | 45 ++++-
+ src/pal/src/include/pal/corunix.hpp | 60 +++++-
+ src/pal/src/include/pal/event.hpp | 9 -
+ src/pal/src/include/pal/file.hpp | 2 +-
+ src/pal/src/include/pal/map.hpp | 11 +-
+ src/pal/src/include/pal/semaphore.hpp | 9 -
+ src/pal/src/map/map.cpp | 122 ++++++------
+ src/pal/src/objmgr/palobjbase.cpp | 10 +
+ src/pal/src/objmgr/shmobject.cpp | 294 ++--------------------------
+ src/pal/src/objmgr/shmobject.hpp | 21 +-
+ src/pal/src/objmgr/shmobjectmanager.cpp | 337 +-------------------------------
+ src/pal/src/objmgr/shmobjectmanager.hpp | 7 -
+ src/pal/src/synchobj/event.cpp | 88 +--------
+ src/pal/src/synchobj/mutex.cpp | 6 +
+ src/pal/src/synchobj/semaphore.cpp | 85 +-------
+ src/pal/src/thread/process.cpp | 11 +-
+ src/pal/src/thread/thread.cpp | 9 +-
+ 19 files changed, 231 insertions(+), 910 deletions(-)
+
+diff --git a/src/pal/inc/pal.h b/src/pal/inc/pal.h
+index 0f470d9..0a00b67 100644
+--- a/src/pal/inc/pal.h
++++ b/src/pal/inc/pal.h
+@@ -1601,19 +1601,6 @@ WaitForMultipleObjectsEx(
+ IN DWORD dwMilliseconds,
+ IN BOOL bAlertable);
+
+-PALIMPORT
+-RHANDLE
+-PALAPI
+-PAL_LocalHandleToRemote(
+- IN HANDLE hLocal);
+-
+-PALIMPORT
+-HANDLE
+-PALAPI
+-PAL_RemoteHandleToLocal(
+- IN RHANDLE hRemote);
+-
+-
+ #define DUPLICATE_CLOSE_SOURCE 0x00000001
+ #define DUPLICATE_SAME_ACCESS 0x00000002
+
+diff --git a/src/pal/src/cruntime/filecrt.cpp b/src/pal/src/cruntime/filecrt.cpp
+index 48079b3..182a42d 100644
+--- a/src/pal/src/cruntime/filecrt.cpp
++++ b/src/pal/src/cruntime/filecrt.cpp
+@@ -93,7 +93,7 @@ _open_osfhandle( INT_PTR osfhandle, int flags )
+
+ if (NO_ERROR == palError)
+ {
+- if ('\0' != pLocalData->unix_filename[0])
++ if (NULL != pLocalData->unix_filename)
+ {
+ nRetVal = InternalOpen(pLocalData->unix_filename, openFlags);
+ }
+diff --git a/src/pal/src/file/file.cpp b/src/pal/src/file/file.cpp
+index feec655..e7003c0 100644
+--- a/src/pal/src/file/file.cpp
++++ b/src/pal/src/file/file.cpp
+@@ -56,6 +56,12 @@ InternalSetFilePointerForUnixFd(
+ );
+
+ void
++CFileProcessLocalDataCleanupRoutine(
++ CPalThread *pThread,
++ IPalObject *pObjectToCleanup
++ );
++
++void
+ FileCleanupRoutine(
+ CPalThread *pThread,
+ IPalObject *pObjectToCleanup,
+@@ -68,7 +74,10 @@ CObjectType CorUnix::otFile(
+ FileCleanupRoutine,
+ NULL, // No initialization routine
+ 0, // No immutable data
++ NULL, // No immutable data copy routine
++ NULL, // No immutable data cleanup routine
+ sizeof(CFileProcessLocalData),
++ CFileProcessLocalDataCleanupRoutine,
+ 0, // No shared data
+ GENERIC_READ|GENERIC_WRITE, // Ignored -- no Win32 object security support
+ CObjectType::SecuritySupported,
+@@ -86,6 +95,34 @@ static CSharedMemoryFileLockMgr _FileLockManager;
+ IFileLockManager *CorUnix::g_pFileLockManager = &_FileLockManager;
+
+ void
++CFileProcessLocalDataCleanupRoutine(
++ CPalThread *pThread,
++ IPalObject *pObjectToCleanup
++ )
++{
++ PAL_ERROR palError;
++ CFileProcessLocalData *pLocalData = NULL;
++ IDataLock *pLocalDataLock = NULL;
++
++ palError = pObjectToCleanup->GetProcessLocalData(
++ pThread,
++ ReadLock,
++ &pLocalDataLock,
++ reinterpret_cast<void**>(&pLocalData)
++ );
++
++ if (NO_ERROR != palError)
++ {
++ ASSERT("Unable to obtain data to cleanup file object");
++ return;
++ }
++
++ free(pLocalData->unix_filename);
++
++ pLocalDataLock->ReleaseLock(pThread, FALSE);
++}
++
++void
+ FileCleanupRoutine(
+ CPalThread *pThread,
+ IPalObject *pObjectToCleanup,
+@@ -738,10 +775,12 @@ CorUnix::InternalCreateFile(
+ goto done;
+ }
+
+- if (strcpy_s(pLocalData->unix_filename, sizeof(pLocalData->unix_filename), lpUnixPath) != SAFECRT_SUCCESS)
++ _ASSERTE(pLocalData->unix_filename == NULL);
++ pLocalData->unix_filename = strdup(lpUnixPath);
++ if (pLocalData->unix_filename == NULL)
+ {
+- palError = ERROR_INSUFFICIENT_BUFFER;
+- TRACE("strcpy_s failed!\n");
++ ASSERT("Unable to copy string\n");
++ palError = ERROR_INTERNAL_ERROR;
+ goto done;
+ }
+
+diff --git a/src/pal/src/include/pal/corunix.hpp b/src/pal/src/include/pal/corunix.hpp
+index e9e9503..bfdfb6c 100644
+--- a/src/pal/src/include/pal/corunix.hpp
++++ b/src/pal/src/include/pal/corunix.hpp
+@@ -173,6 +173,15 @@ namespace CorUnix
+ void * // pProcessLocalData
+ );
+
++ typedef void (*OBJECT_IMMUTABLE_DATA_COPY_ROUTINE) (
++ void *,
++ void *);
++ typedef void (*OBJECT_IMMUTABLE_DATA_CLEANUP_ROUTINE) (
++ void *);
++ typedef void (*OBJECT_PROCESS_LOCAL_DATA_CLEANUP_ROUTINE) (
++ CPalThread *, // pThread
++ IPalObject *);
++
+ enum PalObjectTypeId
+ {
+ otiAutoResetEvent = 0,
+@@ -315,7 +324,10 @@ namespace CorUnix
+ OBJECTCLEANUPROUTINE m_pCleanupRoutine;
+ OBJECTINITROUTINE m_pInitRoutine;
+ DWORD m_dwImmutableDataSize;
++ OBJECT_IMMUTABLE_DATA_COPY_ROUTINE m_pImmutableDataCopyRoutine;
++ OBJECT_IMMUTABLE_DATA_CLEANUP_ROUTINE m_pImmutableDataCleanupRoutine;
+ DWORD m_dwProcessLocalDataSize;
++ OBJECT_PROCESS_LOCAL_DATA_CLEANUP_ROUTINE m_pProcessLocalDataCleanupRoutine;
+ DWORD m_dwSharedDataSize;
+ DWORD m_dwSupportedAccessRights;
+ // Generic access rights mapping
+@@ -335,7 +347,10 @@ namespace CorUnix
+ OBJECTCLEANUPROUTINE pCleanupRoutine,
+ OBJECTINITROUTINE pInitRoutine,
+ DWORD dwImmutableDataSize,
++ OBJECT_IMMUTABLE_DATA_COPY_ROUTINE pImmutableDataCopyRoutine,
++ OBJECT_IMMUTABLE_DATA_CLEANUP_ROUTINE pImmutableDataCleanupRoutine,
+ DWORD dwProcessLocalDataSize,
++ OBJECT_PROCESS_LOCAL_DATA_CLEANUP_ROUTINE pProcessLocalDataCleanupRoutine,
+ DWORD dwSharedDataSize,
+ DWORD dwSupportedAccessRights,
+ SecuritySupport eSecuritySupport,
+@@ -352,7 +367,10 @@ namespace CorUnix
+ m_pCleanupRoutine(pCleanupRoutine),
+ m_pInitRoutine(pInitRoutine),
+ m_dwImmutableDataSize(dwImmutableDataSize),
++ m_pImmutableDataCopyRoutine(pImmutableDataCopyRoutine),
++ m_pImmutableDataCleanupRoutine(pImmutableDataCleanupRoutine),
+ m_dwProcessLocalDataSize(dwProcessLocalDataSize),
++ m_pProcessLocalDataCleanupRoutine(pProcessLocalDataCleanupRoutine),
+ m_dwSharedDataSize(dwSharedDataSize),
+ m_dwSupportedAccessRights(dwSupportedAccessRights),
+ m_eSecuritySupport(eSecuritySupport),
+@@ -408,6 +426,38 @@ namespace CorUnix
+ return m_dwImmutableDataSize;
+ };
+
++ void
++ SetImmutableDataCopyRoutine(
++ OBJECT_IMMUTABLE_DATA_COPY_ROUTINE ptr
++ )
++ {
++ m_pImmutableDataCopyRoutine = ptr;
++ };
++
++ OBJECT_IMMUTABLE_DATA_COPY_ROUTINE
++ GetImmutableDataCopyRoutine(
++ void
++ )
++ {
++ return m_pImmutableDataCopyRoutine;
++ };
++
++ void
++ SetImmutableDataCleanupRoutine(
++ OBJECT_IMMUTABLE_DATA_CLEANUP_ROUTINE ptr
++ )
++ {
++ m_pImmutableDataCleanupRoutine = ptr;
++ };
++
++ OBJECT_IMMUTABLE_DATA_CLEANUP_ROUTINE
++ GetImmutableDataCleanupRoutine(
++ void
++ )
++ {
++ return m_pImmutableDataCleanupRoutine;
++ }
++
+ DWORD
+ GetProcessLocalDataSize(
+ void
+@@ -415,7 +465,15 @@ namespace CorUnix
+ {
+ return m_dwProcessLocalDataSize;
+ };
+-
++
++ OBJECT_PROCESS_LOCAL_DATA_CLEANUP_ROUTINE
++ GetProcessLocalDataCleanupRoutine(
++ void
++ )
++ {
++ return m_pProcessLocalDataCleanupRoutine;
++ }
++
+ DWORD
+ GetSharedDataSize(
+ void
+diff --git a/src/pal/src/include/pal/event.hpp b/src/pal/src/include/pal/event.hpp
+index 98eeaee..21dc478 100644
+--- a/src/pal/src/include/pal/event.hpp
++++ b/src/pal/src/include/pal/event.hpp
+@@ -44,15 +44,6 @@ namespace CorUnix
+ HANDLE hEvent,
+ BOOL fSetEvent
+ );
+-
+- PAL_ERROR
+- InternalOpenEvent(
+- CPalThread *pThread,
+- DWORD dwDesiredAccess,
+- BOOL bInheritHandle,
+- LPCWSTR lpName,
+- HANDLE *phEvent
+- );
+
+ }
+
+diff --git a/src/pal/src/include/pal/file.hpp b/src/pal/src/include/pal/file.hpp
+index 5acccb0..22e4187 100644
+--- a/src/pal/src/include/pal/file.hpp
++++ b/src/pal/src/include/pal/file.hpp
+@@ -44,7 +44,7 @@ namespace CorUnix
+ In Windows we can open a file for writing only */
+ int open_flags; /* stores Unix file creation flags */
+ BOOL open_flags_deviceaccessonly;
+- char unix_filename[MAXPATHLEN];
++ CHAR *unix_filename;
+ BOOL inheritable;
+ };
+
+diff --git a/src/pal/src/include/pal/map.hpp b/src/pal/src/include/pal/map.hpp
+index 854e6c5..7bcb20a 100644
+--- a/src/pal/src/include/pal/map.hpp
++++ b/src/pal/src/include/pal/map.hpp
+@@ -144,7 +144,7 @@ namespace CorUnix
+ class CFileMappingImmutableData
+ {
+ public:
+- CHAR szFileName[MAXPATHLEN];
++ CHAR *lpFileName;
+ UINT MaxSize; // The max size of the file mapping object
+ DWORD flProtect; // Protection desired for the file view
+ BOOL bPALCreatedTempFile; // TRUE if it's a PAL created file
+@@ -179,15 +179,6 @@ namespace CorUnix
+ );
+
+ PAL_ERROR
+- InternalOpenFileMapping(
+- CPalThread *pThread,
+- DWORD dwDesiredAccess,
+- BOOL bInheritHandle,
+- LPCWSTR lpName,
+- HANDLE *phMapping
+- );
+-
+- PAL_ERROR
+ InternalMapViewOfFile(
+ CPalThread *pThread,
+ HANDLE hFileMappingObject,
+diff --git a/src/pal/src/include/pal/semaphore.hpp b/src/pal/src/include/pal/semaphore.hpp
+index 2943d61..33cf35b 100644
+--- a/src/pal/src/include/pal/semaphore.hpp
++++ b/src/pal/src/include/pal/semaphore.hpp
+@@ -49,15 +49,6 @@ namespace CorUnix
+ LONG lReleaseCount,
+ LPLONG lpPreviousCount
+ );
+-
+- PAL_ERROR
+- InternalOpenSemaphore(
+- CPalThread *pThread,
+- DWORD dwDesiredAccess,
+- BOOL bInheritHandle,
+- LPCWSTR lpName,
+- HANDLE *phSemaphore
+- );
+
+ }
+
+diff --git a/src/pal/src/map/map.cpp b/src/pal/src/map/map.cpp
+index b8ffc84..f99e330 100644
+--- a/src/pal/src/map/map.cpp
++++ b/src/pal/src/map/map.cpp
+@@ -131,12 +131,26 @@ FileMappingInitializationRoutine(
+ void *pProcessLocalData
+ );
+
++void
++CFileMappingImmutableDataCopyRoutine(
++ void *pImmData,
++ void *pImmDataTarget
++ );
++
++void
++CFileMappingImmutableDataCleanupRoutine(
++ void *pImmData
++ );
++
+ CObjectType CorUnix::otFileMapping(
+ otiFileMapping,
+ FileMappingCleanupRoutine,
+ FileMappingInitializationRoutine,
+ sizeof(CFileMappingImmutableData),
++ CFileMappingImmutableDataCopyRoutine,
++ CFileMappingImmutableDataCleanupRoutine,
+ sizeof(CFileMappingProcessLocalData),
++ NULL, // No process local data cleanup routine
+ 0,
+ PAGE_READWRITE | PAGE_READONLY | PAGE_WRITECOPY,
+ CObjectType::SecuritySupported,
+@@ -152,6 +166,33 @@ CObjectType CorUnix::otFileMapping(
+ CAllowedObjectTypes aotFileMapping(otiFileMapping);
+
+ void
++CFileMappingImmutableDataCopyRoutine(
++ void *pImmData,
++ void *pImmDataTarget
++ )
++{
++ PAL_ERROR palError = NO_ERROR;
++ CFileMappingImmutableData *pImmutableData = (CFileMappingImmutableData *) pImmData;
++ CFileMappingImmutableData *pImmutableDataTarget = (CFileMappingImmutableData *) pImmDataTarget;
++
++ if (NULL != pImmutableData->lpFileName)
++ {
++ pImmutableDataTarget->lpFileName = strdup(pImmutableData->lpFileName);
++ }
++}
++
++void
++CFileMappingImmutableDataCleanupRoutine(
++ void *pImmData
++ )
++{
++ PAL_ERROR palError = NO_ERROR;
++ CFileMappingImmutableData *pImmutableData = (CFileMappingImmutableData *) pImmData;
++
++ free(pImmutableData->lpFileName);
++}
++
++void
+ FileMappingCleanupRoutine(
+ CPalThread *pThread,
+ IPalObject *pObjectToCleanup,
+@@ -184,7 +225,7 @@ FileMappingCleanupRoutine(
+
+ if (pImmutableData->bPALCreatedTempFile)
+ {
+- unlink(pImmutableData->szFileName);
++ unlink(pImmutableData->lpFileName);
+ }
+ }
+
+@@ -245,7 +286,7 @@ FileMappingInitializationRoutine(
+ reinterpret_cast<CFileMappingProcessLocalData *>(pvProcessLocalData);
+
+ pProcessLocalData->UnixFd = InternalOpen(
+- pImmutableData->szFileName,
++ pImmutableData->lpFileName,
+ MAPProtectionToFileOpenFlags(pImmutableData->flProtect) | O_CLOEXEC
+ );
+
+@@ -501,16 +542,18 @@ CorUnix::InternalCreateFileMapping(
+ //
+
+ /* Anonymous mapped files. */
+- if (strcpy_s(pImmutableData->szFileName, sizeof(pImmutableData->szFileName), "/dev/zero") != SAFECRT_SUCCESS)
++ _ASSERTE(pImmutableData->lpFileName == NULL);
++ pImmutableData->lpFileName = strdup("/dev/zero");
++ if (pImmutableData->lpFileName == NULL)
+ {
+- ERROR( "strcpy_s failed!\n" );
++ ASSERT("Unable to copy string\n");
+ palError = ERROR_INTERNAL_ERROR;
+ goto ExitInternalCreateFileMapping;
+ }
+
+ #if HAVE_MMAP_DEV_ZERO
+
+- UnixFd = InternalOpen(pImmutableData->szFileName, O_RDWR | O_CLOEXEC);
++ UnixFd = InternalOpen(pImmutableData->lpFileName, O_RDWR | O_CLOEXEC);
+ if ( -1 == UnixFd )
+ {
+ ERROR( "Unable to open the file.\n");
+@@ -598,10 +641,12 @@ CorUnix::InternalCreateFileMapping(
+ }
+ goto ExitInternalCreateFileMapping;
+ }
+-
+- if (strcpy_s(pImmutableData->szFileName, sizeof(pImmutableData->szFileName), pFileLocalData->unix_filename) != SAFECRT_SUCCESS)
++
++ _ASSERTE(pImmutableData->lpFileName == NULL);
++ pImmutableData->lpFileName = strdup(pFileLocalData->unix_filename);
++ if (pImmutableData->lpFileName == NULL)
+ {
+- ERROR( "strcpy_s failed!\n" );
++ ASSERT("Unable to copy string\n");
+ palError = ERROR_INTERNAL_ERROR;
+ if (NULL != pFileLocalDataLock)
+ {
+@@ -623,7 +668,7 @@ CorUnix::InternalCreateFileMapping(
+
+ /* Create a temporary file on the filesystem in order to be
+ shared across processes. */
+- palError = MAPCreateTempFile(pThread, &UnixFd, pImmutableData->szFileName);
++ palError = MAPCreateTempFile(pThread, &UnixFd, pImmutableData->lpFileName);
+ if (NO_ERROR != palError)
+ {
+ ERROR("Unable to create the temporary file.\n");
+@@ -771,7 +816,7 @@ ExitInternalCreateFileMapping:
+
+ if (bPALCreatedTempFile)
+ {
+- unlink(pImmutableData->szFileName);
++ unlink(pImmutableData->lpFileName);
+ }
+
+ if (-1 != UnixFd)
+@@ -881,63 +926,6 @@ OpenFileMappingW(
+ return hFileMapping;
+ }
+
+-PAL_ERROR
+-CorUnix::InternalOpenFileMapping(
+- CPalThread *pThread,
+- DWORD dwDesiredAccess,
+- BOOL bInheritHandle,
+- LPCWSTR lpName,
+- HANDLE *phMapping
+- )
+-{
+- PAL_ERROR palError = NO_ERROR;
+- IPalObject *pFileMapping = NULL;
+- CPalString sObjectName(lpName);
+-
+- if ( MAPContainsInvalidFlags( dwDesiredAccess ) )
+- {
+- ASSERT( "dwDesiredAccess can be one or more of FILE_MAP_READ, "
+- "FILE_MAP_WRITE, FILE_MAP_COPY or FILE_MAP_ALL_ACCESS.\n" );
+- palError = ERROR_INVALID_PARAMETER;
+- goto ExitInternalOpenFileMapping;
+- }
+-
+- palError = g_pObjectManager->LocateObject(
+- pThread,
+- &sObjectName,
+- &aotFileMapping,
+- &pFileMapping
+- );
+-
+- if (NO_ERROR != palError)
+- {
+- goto ExitInternalOpenFileMapping;
+- }
+-
+- palError = g_pObjectManager->ObtainHandleForObject(
+- pThread,
+- pFileMapping,
+- dwDesiredAccess,
+- bInheritHandle,
+- NULL,
+- phMapping
+- );
+-
+- if (NO_ERROR != palError)
+- {
+- goto ExitInternalOpenFileMapping;
+- }
+-
+-ExitInternalOpenFileMapping:
+-
+- if (NULL != pFileMapping)
+- {
+- pFileMapping->ReleaseReference(pThread);
+- }
+-
+- return palError;
+-}
+-
+ /*++
+ Function:
+ MapViewOfFile
+diff --git a/src/pal/src/objmgr/palobjbase.cpp b/src/pal/src/objmgr/palobjbase.cpp
+index 27842f6..0f226b9 100644
+--- a/src/pal/src/objmgr/palobjbase.cpp
++++ b/src/pal/src/objmgr/palobjbase.cpp
+@@ -314,6 +314,16 @@ CPalObjectBase::ReleaseReference(
+ );
+ }
+
++ if (NULL != m_pot->GetImmutableDataCleanupRoutine())
++ {
++ (*m_pot->GetImmutableDataCleanupRoutine())(m_pvImmutableData);
++ }
++
++ if (NULL != m_pot->GetProcessLocalDataCleanupRoutine())
++ {
++ (*m_pot->GetProcessLocalDataCleanupRoutine())(pthr, static_cast<IPalObject*>(this));
++ }
++
+ InternalDelete(this);
+
+ pthr->ReleaseThreadReference();
+diff --git a/src/pal/src/objmgr/shmobject.cpp b/src/pal/src/objmgr/shmobject.cpp
+index 2692554..17ef3e4 100644
+--- a/src/pal/src/objmgr/shmobject.cpp
++++ b/src/pal/src/objmgr/shmobject.cpp
+@@ -242,6 +242,13 @@ CSharedMemoryObject::InitializeFromExistingSharedData(
+ if (NULL != pv)
+ {
+ memcpy(m_pvImmutableData, pv, m_pot->GetImmutableDataSize());
++ if (NULL != psmod->pCopyRoutine)
++ {
++ (*psmod->pCopyRoutine)(pv, m_pvImmutableData);
++ }
++
++ m_pot->SetImmutableDataCopyRoutine(psmod->pCopyRoutine);
++ m_pot->SetImmutableDataCleanupRoutine(psmod->pCleanupRoutine);
+ }
+ else
+ {
+@@ -436,6 +443,10 @@ CSharedMemoryObject::FreeSharedDataAreas(
+
+ if (NULL != psmod->shmObjImmutableData)
+ {
++ if (NULL != psmod->pCleanupRoutine)
++ {
++ (*psmod->pCleanupRoutine)(psmod->shmObjImmutableData);
++ }
+ free(psmod->shmObjImmutableData);
+ }
+
+@@ -458,159 +469,6 @@ CSharedMemoryObject::FreeSharedDataAreas(
+
+ /*++
+ Function:
+- CSharedMemoryObject::PromoteShjaredData
+-
+- Copies the object's state into the passed-in shared data structures
+-
+-Parameters:
+- shmObjData -- shared memory pointer for the shared memory object data
+- psmod -- locally-mapped pointer for the shared memory object data
+---*/
+-
+-void
+-CSharedMemoryObject::PromoteSharedData(
+- SHMPTR shmObjData,
+- SHMObjData *psmod
+- )
+-{
+- _ASSERTE(NULL != shmObjData);
+- _ASSERTE(NULL != psmod);
+-
+- ENTRY("CSharedMemoryObject::PromoteSharedData"
+- "(this = %p, shmObjData = %p, psmod = %p)\n",
+- this,
+- shmObjData,
+- psmod);
+-
+- //
+- // psmod has been zero-inited, so we don't need to worry about
+- // shmPrevObj, shmNextObj, fAddedToList, shmObjName, dwNameLength,
+- // or pvSynchData
+- //
+-
+- psmod->lProcessRefCount = 1;
+- psmod->eTypeId = m_pot->GetId();
+-
+- if (0 != m_pot->GetImmutableDataSize())
+- {
+- void *pvImmutableData;
+-
+- pvImmutableData = SHMPTR_TO_TYPED_PTR(void, psmod->shmObjImmutableData);
+- _ASSERTE(NULL != pvImmutableData);
+-
+- CopyMemory(
+- pvImmutableData,
+- m_pvImmutableData,
+- m_pot->GetImmutableDataSize()
+- );
+- }
+-
+- if (0 != m_pot->GetSharedDataSize())
+- {
+- void *pvSharedData;
+-
+- pvSharedData = SHMPTR_TO_TYPED_PTR(void, psmod->shmObjSharedData);
+- _ASSERTE(NULL != pvSharedData);
+-
+- CopyMemory(
+- pvSharedData,
+- m_pvSharedData,
+- m_pot->GetSharedDataSize()
+- );
+-
+- free(m_pvSharedData);
+- m_pvSharedData = pvSharedData;
+- }
+-
+- m_shmod = shmObjData;
+-
+- LOGEXIT("CSharedMemoryObject::PromoteSharedData\n");
+-}
+-
+-/*++
+-Function:
+- CSharedMemoryObject::EnsureObjectIsShared
+-
+- If this object is not yet in the shared domain allocate the necessary
+- shared memory structures for it and copy the object's data into those
+- structures
+-
+-Parameters:
+- pthr -- thread data for the calling thread
+---*/
+-
+-PAL_ERROR
+-CSharedMemoryObject::EnsureObjectIsShared(
+- CPalThread *pthr
+- )
+-{
+- PAL_ERROR palError = NO_ERROR;
+- IDataLock *pDataLock = NULL;
+- SHMPTR shmObjData;
+- SHMObjData *psmod;
+-
+- _ASSERTE(NULL != pthr);
+-
+- ENTRY("CSharedMemoryObject::EnsureObjectIsShared"
+- "(this = %p, pthr = %p)\n",
+- this,
+- pthr
+- );
+-
+- //
+- // Grab the shared memory lock and check if the object is already
+- // shared
+- //
+-
+- SHMLock();
+-
+- if (SharedObject == m_ObjectDomain)
+- {
+- goto EnsureObjectIsSharedExit;
+- }
+-
+- //
+- // Grab the local shared data lock, if necessary
+- //
+-
+- if (0 != m_pot->GetSharedDataSize())
+- {
+- m_sdlSharedData.AcquireLock(pthr, &pDataLock);
+- }
+-
+- //
+- // Allocate the necessary shared data areas
+- //
+-
+- palError = AllocateSharedDataItems(&shmObjData, &psmod);
+- if (NO_ERROR != palError)
+- {
+- goto EnsureObjectIsSharedExit;
+- }
+-
+- //
+- // Promote the object's data and set the domain to shared
+- //
+-
+- PromoteSharedData(shmObjData, psmod);
+- m_ObjectDomain = SharedObject;
+-
+-EnsureObjectIsSharedExit:
+-
+- if (NULL != pDataLock)
+- {
+- pDataLock->ReleaseLock(pthr, TRUE);
+- }
+-
+- SHMRelease();
+-
+- LOGEXIT("CSharedMemoryObject::EnsureObjectIsShared returns %d\n", palError);
+-
+- return palError;
+-}
+-
+-/*++
+-Function:
+ CSharedMemoryObject::CleanupForProcessShutdown
+
+ Cleanup routine called by the object manager when shutting down
+@@ -646,6 +504,16 @@ CSharedMemoryObject::CleanupForProcessShutdown(
+ );
+ }
+
++ if (NULL != m_pot->GetImmutableDataCleanupRoutine())
++ {
++ (*m_pot->GetImmutableDataCleanupRoutine())(m_pvImmutableData);
++ }
++
++ if (NULL != m_pot->GetProcessLocalDataCleanupRoutine())
++ {
++ (*m_pot->GetProcessLocalDataCleanupRoutine())(pthr, static_cast<IPalObject*>(this));
++ }
++
+ //
+ // We need to do two things with the calling thread data here:
+ // 1) store it in m_pthrCleanup so it is available to the destructors
+@@ -1188,126 +1056,6 @@ InitializeExit:
+
+ /*++
+ Function:
+- CSharedMemoryWaitableObject::EnsureObjectIsShared
+-
+- If this object is not yet in the shared domain allocate the necessary
+- shared memory structures for it and copy the object's data into those
+- structures
+-
+-Parameters:
+- pthr -- thread data for the calling thread
+---*/
+-
+-PAL_ERROR
+-CSharedMemoryWaitableObject::EnsureObjectIsShared(
+- CPalThread *pthr
+- )
+-{
+- PAL_ERROR palError = NO_ERROR;
+- IDataLock *pDataLock = NULL;
+- SHMPTR shmObjData = NULL;
+- SHMObjData *psmod;
+- VOID *pvSharedSynchData;
+-
+- _ASSERTE(NULL != pthr);
+-
+- ENTRY("CSharedMemoryWaitableObject::EnsureObjectIsShared"
+- "(this = %p, pthr = %p)\n",
+- this,
+- pthr
+- );
+-
+- //
+- // First, grab the process synchronization lock and check
+- // if the object is already shared
+- //
+-
+- g_pSynchronizationManager->AcquireProcessLock(pthr);
+-
+- if (SharedObject == m_ObjectDomain)
+- {
+- goto EnsureObjectIsSharedExitNoSHMLockRelease;
+- }
+-
+- //
+- // Grab the necessary locks
+- //
+-
+- SHMLock();
+-
+- if (0 != m_pot->GetSharedDataSize())
+- {
+- m_sdlSharedData.AcquireLock(pthr, &pDataLock);
+- }
+-
+- //
+- // Allocate the necessary shared data areas
+- //
+-
+- palError = AllocateSharedDataItems(&shmObjData, &psmod);
+- if (NO_ERROR != palError)
+- {
+- goto EnsureObjectIsSharedExit;
+- }
+-
+- //
+- // Promote the object's synchronization data
+- //
+-
+- palError = g_pSynchronizationManager->PromoteObjectSynchData(
+- pthr,
+- m_pvSynchData,
+- &pvSharedSynchData
+- );
+-
+- if (NO_ERROR != palError)
+- {
+- goto EnsureObjectIsSharedExit;
+- }
+-
+- m_pvSynchData = pvSharedSynchData;
+- psmod->pvSynchData = pvSharedSynchData;
+-
+- //
+- // Promote the object's data and set the domain to shared
+- //
+-
+- PromoteSharedData(shmObjData, psmod);
+- m_ObjectDomain = SharedObject;
+-
+-EnsureObjectIsSharedExit:
+-
+- if (NULL != pDataLock)
+- {
+- pDataLock->ReleaseLock(pthr, TRUE);
+- }
+-
+- SHMRelease();
+-
+-EnsureObjectIsSharedExitNoSHMLockRelease:
+-
+- g_pSynchronizationManager->ReleaseProcessLock(pthr);
+-
+- if (NO_ERROR != palError && NULL != shmObjData)
+- {
+- //
+- // Since shmObjdData is local to this function there's no
+- // need to continue to hold the promotion locks when
+- // freeing the allocated data on error
+- //
+-
+- FreeSharedDataAreas(shmObjData);
+- }
+-
+- LOGEXIT("CSharedMemoryWaitableObject::EnsureObjectIsShared returns %d\n",
+- palError
+- );
+-
+- return palError;
+-}
+-
+-/*++
+-Function:
+ CSharedMemoryWaitableObject::~CSharedMemoryWaitableObject
+
+ Destructor; should only be called from ReleaseReference
+diff --git a/src/pal/src/objmgr/shmobject.hpp b/src/pal/src/objmgr/shmobject.hpp
+index 66b9ea9..9d55f90 100644
+--- a/src/pal/src/objmgr/shmobject.hpp
++++ b/src/pal/src/objmgr/shmobject.hpp
+@@ -65,6 +65,9 @@ namespace CorUnix
+ SHMPTR shmObjImmutableData;
+ SHMPTR shmObjSharedData;
+
++ OBJECT_IMMUTABLE_DATA_COPY_ROUTINE pCopyRoutine;
++ OBJECT_IMMUTABLE_DATA_CLEANUP_ROUTINE pCleanupRoutine;
++
+ LONG lProcessRefCount;
+ DWORD dwNameLength;
+
+@@ -140,12 +143,6 @@ namespace CorUnix
+ SHMPTR shmObjData
+ );
+
+- void
+- PromoteSharedData(
+- SHMPTR shmObjData,
+- SHMObjData *psmod
+- );
+-
+ bool
+ DereferenceSharedData();
+
+@@ -228,12 +225,6 @@ namespace CorUnix
+ CObjectAttributes *poa
+ );
+
+- virtual
+- PAL_ERROR
+- EnsureObjectIsShared(
+- CPalThread *pthr
+- );
+-
+ void
+ CleanupForProcessShutdown(
+ CPalThread *pthr
+@@ -353,12 +344,6 @@ namespace CorUnix
+ CObjectAttributes *poa
+ );
+
+- virtual
+- PAL_ERROR
+- EnsureObjectIsShared(
+- CPalThread *pthr
+- );
+-
+ //
+ // IPalObject routines
+ //
+diff --git a/src/pal/src/objmgr/shmobjectmanager.cpp b/src/pal/src/objmgr/shmobjectmanager.cpp
+index 755fa46..90caa65 100644
+--- a/src/pal/src/objmgr/shmobjectmanager.cpp
++++ b/src/pal/src/objmgr/shmobjectmanager.cpp
+@@ -418,6 +418,14 @@ CSharedMemoryObjectManager::RegisterObject(
+ pvImmutableData,
+ potObj->GetImmutableDataSize()
+ );
++
++ if (NULL != potObj->GetImmutableDataCopyRoutine())
++ {
++ (*potObj->GetImmutableDataCopyRoutine())(pvImmutableData, pvSharedImmutableData);
++ }
++
++ psmod->pCopyRoutine = potObj->GetImmutableDataCopyRoutine();
++ psmod->pCleanupRoutine = potObj->GetImmutableDataCleanupRoutine();
+ }
+ else
+ {
+@@ -1174,335 +1182,6 @@ static CAllowedObjectTypes aotRemotable(
+
+ /*++
+ Function:
+- PAL_LocalHandleToRemote
+-
+- Returns a "remote handle" that may be passed to another process.
+-
+-Parameters:
+- hLocal -- the handle to generate a "remote handle" for
+---*/
+-
+-PALIMPORT
+-RHANDLE
+-PALAPI
+-PAL_LocalHandleToRemote(IN HANDLE hLocal)
+-{
+- PAL_ERROR palError = NO_ERROR;
+- CPalThread *pthr;
+- IPalObject *pobj = NULL;
+- CSharedMemoryObject *pshmobj;
+- SHMObjData *psmod = NULL;
+- RHANDLE hRemote = reinterpret_cast<RHANDLE>(INVALID_HANDLE_VALUE);
+-
+- PERF_ENTRY(PAL_LocalHandleToRemote);
+- ENTRY("PAL_LocalHandleToRemote( hLocal=0x%lx )\n", hLocal);
+-
+- pthr = InternalGetCurrentThread();
+-
+- if (!HandleIsSpecial(hLocal))
+- {
+- palError = g_pObjectManager->ReferenceObjectByHandle(
+- pthr,
+- hLocal,
+- &aotRemotable,
+- 0,
+- &pobj
+- );
+-
+- if (NO_ERROR != palError)
+- {
+- goto PAL_LocalHandleToRemoteExitNoLockRelease;
+- }
+- }
+- else if (hPseudoCurrentProcess == hLocal)
+- {
+- pobj = g_pobjProcess;
+- pobj->AddReference();
+- }
+- else
+- {
+- ASSERT("Invalid special handle type passed to PAL_LocalHandleToRemote\n");
+- palError = ERROR_INVALID_HANDLE;
+- goto PAL_LocalHandleToRemoteExitNoLockRelease;
+- }
+-
+- pshmobj = static_cast<CSharedMemoryObject*>(pobj);
+-
+- //
+- // Make sure that the object is shared
+- //
+-
+- palError = pshmobj->EnsureObjectIsShared(pthr);
+- if (NO_ERROR != palError)
+- {
+- ERROR("Failure %d promoting object\n", palError);
+- goto PAL_LocalHandleToRemoteExitNoLockRelease;
+- }
+-
+- SHMLock();
+-
+- psmod = SHMPTR_TO_TYPED_PTR(SHMObjData, pshmobj->GetShmObjData());
+- if (NULL != psmod)
+- {
+- //
+- // Bump up the process ref count by 1. The receiving process will not
+- // increase the ref count when it converts the remote handle to
+- // local.
+- //
+-
+- psmod->lProcessRefCount += 1;
+-
+- //
+- // The remote handle is simply the SHMPTR for the SHMObjData
+- //
+-
+- hRemote = reinterpret_cast<RHANDLE>(pshmobj->GetShmObjData());
+- }
+- else
+- {
+- ASSERT("Unable to map shared object data\n");
+- palError = ERROR_INTERNAL_ERROR;
+- goto PAL_LocalHandleToRemoteExit;
+- }
+-
+-PAL_LocalHandleToRemoteExit:
+-
+- SHMRelease();
+-
+-PAL_LocalHandleToRemoteExitNoLockRelease:
+-
+- if (NULL != pobj)
+- {
+- pobj->ReleaseReference(pthr);
+- }
+-
+- if (NO_ERROR != palError)
+- {
+- pthr->SetLastError(palError);
+- }
+-
+- LOGEXIT("PAL_LocalHandleToRemote returns RHANDLE 0x%lx\n", hRemote);
+- PERF_EXIT(PAL_LocalHandleToRemote);
+- return hRemote;
+-}
+-
+-/*++
+-Function:
+- CSharedMemoryObjectManager::ConvertRemoteHandleToLocal
+-
+- Given a "remote handle" creates a local handle that refers
+- to the desired object. (Unlike PAL_RemoteHandleToLocal this method
+- needs to access internal object manager state, so it's a member function.)
+-
+-Parameters:
+- pthr -- thread data for calling thread
+- rhRemote -- the remote handle
+- phLocal -- on success, receives the local handle
+---*/
+-
+-PAL_ERROR
+-CSharedMemoryObjectManager::ConvertRemoteHandleToLocal(
+- CPalThread *pthr,
+- RHANDLE rhRemote,
+- HANDLE *phLocal
+- )
+-{
+- PAL_ERROR palError = NO_ERROR;
+- SHMObjData *psmod;
+- CSharedMemoryObject *pshmobj = NULL;
+- PLIST_ENTRY pleObjectList;
+-
+- _ASSERTE(NULL != pthr);
+- _ASSERTE(NULL != phLocal);
+-
+- ENTRY("CSharedMemoryObjectManager::ConvertRemoteHandleToLocal "
+- "(this=%p, pthr=%p, rhRemote=%p, phLocal=%p)\n",
+- this,
+- pthr,
+- rhRemote,
+- phLocal
+- );
+-
+- if (rhRemote == NULL || rhRemote == INVALID_HANDLE_VALUE)
+- {
+- palError = ERROR_INVALID_HANDLE;
+- goto ConvertRemoteHandleToLocalExitNoLockRelease;
+- }
+-
+- InternalEnterCriticalSection(pthr, &m_csListLock);
+- SHMLock();
+-
+- //
+- // The remote handle is really a shared memory pointer to the
+- // SHMObjData for the object.
+- //
+-
+- psmod = SHMPTR_TO_TYPED_PTR(SHMObjData, reinterpret_cast<SHMPTR>(rhRemote));
+- if (NULL == psmod)
+- {
+- ERROR("Invalid remote handle\n");
+- palError = ERROR_INVALID_HANDLE;
+- goto ConvertRemoteHandleToLocalExit;
+- }
+-
+- //
+- // Check to see if a local reference for this object already
+- // exists
+- //
+-
+- if (0 != psmod->dwNameLength)
+- {
+- pleObjectList = &m_leNamedObjects;
+- }
+- else
+- {
+- pleObjectList = &m_leAnonymousObjects;
+- }
+-
+- for (PLIST_ENTRY ple = pleObjectList->Flink;
+- ple != pleObjectList;
+- ple = ple->Flink)
+- {
+- pshmobj = CSharedMemoryObject::GetObjectFromListLink(ple);
+-
+- if (SharedObject == pshmobj->GetObjectDomain()
+- && reinterpret_cast<SHMPTR>(rhRemote) == pshmobj->GetShmObjData())
+- {
+- TRACE("Object for remote handle already present in this process\n");
+-
+- //
+- // PAL_LocalHandleToRemote bumped up the process refcount on the
+- // object. Since this process already had a reference to the object
+- // we need to decrement that reference now...
+- //
+-
+- psmod->lProcessRefCount -= 1;
+- _ASSERTE(0 < psmod->lProcessRefCount);
+-
+- //
+- // We also need to add a reference to the object (since ReleaseReference
+- // gets called below)
+- //
+-
+- pshmobj->AddReference();
+-
+- break;
+- }
+-
+- pshmobj = NULL;
+- }
+-
+- if (NULL == pshmobj)
+- {
+- CObjectType *pot;
+- CObjectAttributes oa;
+-
+- //
+- // Get the local instance of the CObjectType
+- //
+-
+- pot = CObjectType::GetObjectTypeById(psmod->eTypeId);
+- if (NULL == pot)
+- {
+- ASSERT("Invalid object type ID in shared memory info\n");
+- goto ConvertRemoteHandleToLocalExit;
+- }
+-
+- //
+- // Create the local state for the shared object
+- //
+-
+- palError = ImportSharedObjectIntoProcess(
+- pthr,
+- pot,
+- &oa,
+- reinterpret_cast<SHMPTR>(rhRemote),
+- psmod,
+- FALSE,
+- &pshmobj
+- );
+-
+- if (NO_ERROR != palError)
+- {
+- goto ConvertRemoteHandleToLocalExit;
+- }
+- }
+-
+- //
+- // Finally, allocate a local handle for the object
+- //
+-
+- palError = ObtainHandleForObject(
+- pthr,
+- pshmobj,
+- 0,
+- FALSE,
+- NULL,
+- phLocal
+- );
+-
+-ConvertRemoteHandleToLocalExit:
+-
+- SHMRelease();
+- InternalLeaveCriticalSection(pthr, &m_csListLock);
+-
+-ConvertRemoteHandleToLocalExitNoLockRelease:
+-
+- if (NULL != pshmobj)
+- {
+- pshmobj->ReleaseReference(pthr);
+- }
+-
+- LOGEXIT("CSharedMemoryObjectManager::ConvertRemoteHandleToLocal returns %d\n", palError);
+-
+- return palError;
+-}
+-
+-/*++
+-Function:
+- PAL_RemoteHandleToLocal
+-
+- Given a "remote handle", return a local handle that refers to the
+- specified process. Calls
+- SharedMemoryObjectManager::ConvertRemoteHandleToLocal to do the actual
+- work
+-
+-Parameters:
+- rhRemote -- the "remote handle" to convert to a local handle
+---*/
+-
+-PALIMPORT
+-HANDLE
+-PALAPI
+-PAL_RemoteHandleToLocal(IN RHANDLE rhRemote)
+-{
+- PAL_ERROR palError = NO_ERROR;
+- CPalThread *pthr;
+- HANDLE hLocal = INVALID_HANDLE_VALUE;
+-
+- PERF_ENTRY(PAL_RemoteHandleToLocal);
+- ENTRY("PAL_RemoteHandleToLocal( hRemote=0x%lx )\n", rhRemote);
+-
+- pthr = InternalGetCurrentThread();
+-
+- palError = static_cast<CSharedMemoryObjectManager*>(g_pObjectManager)->ConvertRemoteHandleToLocal(
+- pthr,
+- rhRemote,
+- &hLocal
+- );
+-
+- if (NO_ERROR != palError)
+- {
+- pthr->SetLastError(palError);
+- }
+-
+- LOGEXIT("PAL_RemoteHandleToLocal returns HANDLE 0x%lx\n", hLocal);
+- PERF_EXIT(PAL_RemoteHandleToLocal);
+- return hLocal;
+-}
+-
+-/*++
+-Function:
+ CheckObjectTypeAndRights
+
+ Helper routine that determines if:
+diff --git a/src/pal/src/objmgr/shmobjectmanager.hpp b/src/pal/src/objmgr/shmobjectmanager.hpp
+index fbde872..6e11b20 100644
+--- a/src/pal/src/objmgr/shmobjectmanager.hpp
++++ b/src/pal/src/objmgr/shmobjectmanager.hpp
+@@ -71,13 +71,6 @@ namespace CorUnix
+ CPalThread *pthr
+ );
+
+- PAL_ERROR
+- ConvertRemoteHandleToLocal(
+- CPalThread *pthr,
+- RHANDLE rhRemote,
+- HANDLE *phLocal
+- );
+-
+ //
+ // IPalObjectManager routines
+ //
+diff --git a/src/pal/src/synchobj/event.cpp b/src/pal/src/synchobj/event.cpp
+index 54addad..3d15917 100644
+--- a/src/pal/src/synchobj/event.cpp
++++ b/src/pal/src/synchobj/event.cpp
+@@ -35,7 +35,10 @@ CObjectType CorUnix::otManualResetEvent(
+ NULL, // No cleanup routine
+ NULL, // No initialization routine
+ 0, // No immutable data
++ NULL, // No immutable data copy routine
++ NULL, // No immutable data cleanup routine
+ 0, // No process local data
++ NULL, // No process local data cleanup routine
+ 0, // No shared data
+ EVENT_ALL_ACCESS, // Currently ignored (no Win32 security)
+ CObjectType::SecuritySupported,
+@@ -53,7 +56,10 @@ CObjectType CorUnix::otAutoResetEvent(
+ NULL, // No cleanup routine
+ NULL, // No initialization routine
+ 0, // No immutable data
++ NULL, // No immutable data copy routine
++ NULL, // No immutable data cleanup routine
+ 0, // No process local data
++ NULL, // No process local data cleanup routine
+ 0, // No shared data
+ EVENT_ALL_ACCESS, // Currently ignored (no Win32 security)
+ CObjectType::SecuritySupported,
+@@ -506,84 +512,4 @@ OpenEventWExit:
+ PERF_EXIT(OpenEventW);
+
+ return hEvent;
+-}
+-
+-/*++
+-Function:
+- InternalOpenEvent
+-
+-Note:
+- dwDesiredAccess is currently ignored (no Win32 object security support)
+- bInheritHandle is currently ignored (handles to events are not inheritable)
+-
+-Parameters:
+- pthr -- thread data for calling thread
+- phEvent -- on success, receives the allocated event handle
+-
+- See MSDN docs on OpenEvent for all other parameters.
+---*/
+-
+-PAL_ERROR
+-CorUnix::InternalOpenEvent(
+- CPalThread *pthr,
+- DWORD dwDesiredAccess,
+- BOOL bInheritHandle,
+- LPCWSTR lpName,
+- HANDLE *phEvent
+- )
+-{
+- PAL_ERROR palError = NO_ERROR;
+- IPalObject *pobjEvent = NULL;
+- CPalString sObjectName(lpName);
+-
+- _ASSERTE(NULL != pthr);
+- _ASSERTE(NULL != lpName);
+- _ASSERTE(NULL != phEvent);
+-
+- ENTRY("InternalOpenEvent(pthr=%p, dwDesiredAccess=%#x, bInheritHandle=%d, "
+- "lpName=%p, phEvent=%p)\n",
+- pthr,
+- dwDesiredAccess,
+- bInheritHandle,
+- lpName,
+- phEvent
+- );
+-
+- palError = g_pObjectManager->LocateObject(
+- pthr,
+- &sObjectName,
+- &aotEvent,
+- &pobjEvent
+- );
+-
+- if (NO_ERROR != palError)
+- {
+- goto InternalOpenEventExit;
+- }
+-
+- palError = g_pObjectManager->ObtainHandleForObject(
+- pthr,
+- pobjEvent,
+- dwDesiredAccess,
+- bInheritHandle,
+- NULL,
+- phEvent
+- );
+-
+- if (NO_ERROR != palError)
+- {
+- goto InternalOpenEventExit;
+- }
+-
+-InternalOpenEventExit:
+-
+- if (NULL != pobjEvent)
+- {
+- pobjEvent->ReleaseReference(pthr);
+- }
+-
+- LOGEXIT("InternalOpenEvent returns %d\n", palError);
+-
+- return palError;
+-}
+-
++}
+\ No newline at end of file
+diff --git a/src/pal/src/synchobj/mutex.cpp b/src/pal/src/synchobj/mutex.cpp
+index 692f5e2..fbbaf17 100644
+--- a/src/pal/src/synchobj/mutex.cpp
++++ b/src/pal/src/synchobj/mutex.cpp
+@@ -49,7 +49,10 @@ CObjectType CorUnix::otMutex(
+ NULL, // No cleanup routine
+ NULL, // No initialization routine
+ 0, // No immutable data
++ NULL, // No immutable data copy routine
++ NULL, // No immutable data cleanup routine
+ 0, // No process local data
++ NULL, // No process local data cleanup routine
+ 0, // No shared data
+ 0, // Should be MUTEX_ALL_ACCESS; currently ignored (no Win32 security)
+ CObjectType::SecuritySupported,
+@@ -69,7 +72,10 @@ CObjectType CorUnix::otNamedMutex(
+ &SharedMemoryProcessDataHeader::PalObject_Close, // Cleanup routine
+ NULL, // No initialization routine
+ sizeof(SharedMemoryProcessDataHeader *), // Immutable data
++ NULL, // No immutable data copy routine
++ NULL, // No immutable data cleanup routine
+ 0, // No process local data
++ NULL, // No process local data cleanup routine
+ 0, // No shared data
+ 0, // Should be MUTEX_ALL_ACCESS; currently ignored (no Win32 security)
+ CObjectType::SecuritySupported,
+diff --git a/src/pal/src/synchobj/semaphore.cpp b/src/pal/src/synchobj/semaphore.cpp
+index b224018..5f8cf72 100644
+--- a/src/pal/src/synchobj/semaphore.cpp
++++ b/src/pal/src/synchobj/semaphore.cpp
+@@ -35,7 +35,10 @@ CObjectType CorUnix::otSemaphore(
+ NULL, // No cleanup routine
+ NULL, // No initialization routine
+ sizeof(SemaphoreImmutableData),
++ NULL, // No immutable data copy routine
++ NULL, // No immutable data cleanup routine
+ 0, // No process local data
++ NULL, // No process local data cleanup routine
+ 0, // No shared data
+ 0, // Should be SEMAPHORE_ALL_ACCESS; currently ignored (no Win32 security)
+ CObjectType::SecuritySupported,
+@@ -597,84 +600,4 @@ OpenSemaphoreW(
+ PERF_EXIT(OpenSemaphoreW);
+
+ return hSemaphore;
+-}
+-
+-/*++
+-Function:
+- InternalOpenSemaphore
+-
+-Note:
+- dwDesiredAccess is currently ignored (no Win32 object security support)
+- bInheritHandle is currently ignored (handles to semaphores are not inheritable)
+-
+-Parameters:
+- pthr -- thread data for calling thread
+- phEvent -- on success, receives the allocated semaphore handle
+-
+- See MSDN docs on OpenSemaphore for all other parameters.
+---*/
+-
+-PAL_ERROR
+-CorUnix::InternalOpenSemaphore(
+- CPalThread *pthr,
+- DWORD dwDesiredAccess,
+- BOOL bInheritHandle,
+- LPCWSTR lpName,
+- HANDLE *phSemaphore
+- )
+-{
+- PAL_ERROR palError = NO_ERROR;
+- IPalObject *pobjSemaphore = NULL;
+- CPalString sObjectName(lpName);
+-
+- _ASSERTE(NULL != pthr);
+- _ASSERTE(NULL != lpName);
+- _ASSERTE(NULL != phSemaphore);
+-
+- ENTRY("InternalOpenSemaphore(pthr=%p, dwDesiredAccess=%d, bInheritHandle=%d, "
+- "lpName=%p, phSemaphore=%p)\n",
+- pthr,
+- dwDesiredAccess,
+- bInheritHandle,
+- phSemaphore
+- );
+-
+- palError = g_pObjectManager->LocateObject(
+- pthr,
+- &sObjectName,
+- &aotSempahore,
+- &pobjSemaphore
+- );
+-
+- if (NO_ERROR != palError)
+- {
+- goto InternalOpenSemaphoreExit;
+- }
+-
+- palError = g_pObjectManager->ObtainHandleForObject(
+- pthr,
+- pobjSemaphore,
+- dwDesiredAccess,
+- bInheritHandle,
+- NULL,
+- phSemaphore
+- );
+-
+- if (NO_ERROR != palError)
+- {
+- goto InternalOpenSemaphoreExit;
+- }
+-
+-InternalOpenSemaphoreExit:
+-
+- if (NULL != pobjSemaphore)
+- {
+- pobjSemaphore->ReleaseReference(pthr);
+- }
+-
+- LOGEXIT("InternalOpenSemaphore returns %d\n", palError);
+-
+- return palError;
+-}
+-
+-
++}
+\ No newline at end of file
+diff --git a/src/pal/src/thread/process.cpp b/src/pal/src/thread/process.cpp
+index e22d268..e882cba 100644
+--- a/src/pal/src/thread/process.cpp
++++ b/src/pal/src/thread/process.cpp
+@@ -78,11 +78,14 @@ using namespace CorUnix;
+
+ CObjectType CorUnix::otProcess(
+ otiProcess,
+- NULL,
+- NULL,
+- 0,
++ NULL, // No cleanup routine
++ NULL, // No initialization routine
++ 0, // No immutable data
++ NULL, // No immutable data copy routine
++ NULL, // No immutable data cleanup routine
+ sizeof(CProcProcessLocalData),
+- 0,
++ NULL, // No process local data cleanup routine
++ 0, // No shared data
+ PROCESS_ALL_ACCESS,
+ CObjectType::SecuritySupported,
+ CObjectType::SecurityInfoNotPersisted,
+diff --git a/src/pal/src/thread/thread.cpp b/src/pal/src/thread/thread.cpp
+index df42ebc..15de659 100644
+--- a/src/pal/src/thread/thread.cpp
++++ b/src/pal/src/thread/thread.cpp
+@@ -130,10 +130,13 @@ CObjectType CorUnix::otThread(
+ otiThread,
+ ThreadCleanupRoutine,
+ ThreadInitializationRoutine,
+- 0, //sizeof(CThreadImmutableData),
++ 0, // sizeof(CThreadImmutableData),
++ NULL, // No immutable data copy routine
++ NULL, // No immutable data cleanup routine
+ sizeof(CThreadProcessLocalData),
+- 0, //sizeof(CThreadSharedData),
+- 0, // THREAD_ALL_ACCESS,
++ NULL, // No process local data cleanup routine
++ 0, // sizeof(CThreadSharedData),
++ 0, // THREAD_ALL_ACCESS,
+ CObjectType::SecuritySupported,
+ CObjectType::SecurityInfoNotPersisted,
+ CObjectType::UnnamedObject,
+--
+2.7.4
+
diff --git a/packaging/0030-Remove-relocations-for-MethodTable-m_pParentMethodTa.patch b/packaging/0030-Remove-relocations-for-MethodTable-m_pParentMethodTa.patch
new file mode 100644
index 0000000000..9dae3204ee
--- /dev/null
+++ b/packaging/0030-Remove-relocations-for-MethodTable-m_pParentMethodTa.patch
@@ -0,0 +1,563 @@
+From 52707bd377553ce6c42b1baa0b924dc7413e93ea Mon Sep 17 00:00:00 2001
+From: gbalykov <g.balykov@samsung.com>
+Date: Thu, 22 Feb 2018 20:47:46 +0300
+Subject: [PATCH 30/32] Remove relocations for
+ MethodTable::m_pParentMethodTable for Linux ARM (#15915)
+
+---
+ src/debug/daccess/nidump.cpp | 4 +-
+ src/inc/fixuppointer.h | 208 +++++++++++++++++++++++++++++++++++++++
+ src/vm/class.cpp | 10 +-
+ src/vm/generics.cpp | 1 +
+ src/vm/methodtable.cpp | 35 ++++---
+ src/vm/methodtable.h | 69 ++++++++++---
+ src/vm/proftoeeinterfaceimpl.cpp | 2 +-
+ 7 files changed, 299 insertions(+), 30 deletions(-)
+
+diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
+index 2732c9e..ef4725f 100644
+--- a/src/debug/daccess/nidump.cpp
++++ b/src/debug/daccess/nidump.cpp
+@@ -6053,7 +6053,7 @@ PTR_MethodTable NativeImageDumper::GetParent( PTR_MethodTable mt )
+ /* REVISIT_TODO Thu 12/01/2005
+ * Handle fixups
+ */
+- PTR_MethodTable parent( mt->m_pParentMethodTable );
++ PTR_MethodTable parent( ReadPointerMaybeNull((MethodTable*) mt, &MethodTable::m_pParentMethodTable, mt->GetFlagHasIndirectParent()) );
+ _ASSERTE(!CORCOMPILE_IS_POINTER_TAGGED(PTR_TO_TADDR(parent)));
+ return parent;
+ }
+@@ -7027,7 +7027,7 @@ NativeImageDumper::DumpMethodTable( PTR_MethodTable mt, const char * name,
+
+
+
+- PTR_MethodTable parent = mt->m_pParentMethodTable;
++ PTR_MethodTable parent = ReadPointerMaybeNull((MethodTable*) mt, &MethodTable::m_pParentMethodTable, mt->GetFlagHasIndirectParent());
+ if( parent == NULL )
+ {
+ DisplayWriteFieldPointer( m_pParentMethodTable, NULL, MethodTable,
+diff --git a/src/inc/fixuppointer.h b/src/inc/fixuppointer.h
+index a711418..5a897e4 100644
+--- a/src/inc/fixuppointer.h
++++ b/src/inc/fixuppointer.h
+@@ -319,6 +319,14 @@ public:
+ return FALSE;
+ }
+
++ // Returns whether the indirection cell contain fixup that has not been converted to real pointer yet.
++ // Ignores isIndirect and offset values.
++ FORCEINLINE BOOL IsTaggedIndirect(TADDR base, bool isIndirect, intptr_t offset) const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return IsTagged(base);
++ }
++
+ #ifndef DACCESS_COMPILE
+ // Returns whether the indirection cell contain fixup that has not been converted to real pointer yet.
+ // Does not need explicit base and thus can be used in non-DAC builds only.
+@@ -358,6 +366,14 @@ public:
+ return dac_cast<DPTR(RelativeFixupPointer<PTR_TYPE>)>(base)->GetValue(base);
+ }
+
++ // Static version of GetValue. It is meant to simplify access to arrays of pointers.
++ // Ignores isIndirect and offset values.
++ FORCEINLINE static PTR_TYPE GetValueAtPtrIndirect(TADDR base, bool isIndirect, intptr_t offset)
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return GetValueAtPtr(base);
++ }
++
+ // Returns value of the encoded pointer. The pointer can be NULL.
+ PTR_TYPE GetValueMaybeNull(TADDR base) const
+ {
+@@ -393,6 +409,14 @@ public:
+ return dac_cast<DPTR(RelativeFixupPointer<PTR_TYPE>)>(base)->GetValueMaybeNull(base);
+ }
+
++ // Static version of GetValueMaybeNull. It is meant to simplify access to arrays of pointers.
++ // Ignores isIndirect and offset values.
++ FORCEINLINE static PTR_TYPE GetValueMaybeNullAtPtrIndirect(TADDR base, bool isIndirect, intptr_t offset)
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return GetValueMaybeNullAtPtr(base);
++ }
++
+ #ifndef DACCESS_COMPILE
+ // Set encoded value of the pointer. Assumes that the value is not NULL.
+ FORCEINLINE void SetValue(PTR_TYPE addr)
+@@ -429,6 +453,14 @@ public:
+ _ASSERTE((addr & FIXUP_POINTER_INDIRECTION) != 0);
+ return (PTR_TYPE *)(addr - FIXUP_POINTER_INDIRECTION);
+ }
++
++ // Returns the pointer to the indirection cell.
++ // Ignores isIndirect and offset values.
++ PTR_TYPE * GetValuePtrIndirect(bool isIndirect, intptr_t offset) const
++ {
++ LIMITED_METHOD_CONTRACT;
++ return GetValuePtr();
++ }
+ #endif // !DACCESS_COMPILE
+
+ // Returns value of the encoded pointer. Assumes that the pointer is not NULL.
+@@ -462,6 +494,14 @@ public:
+ LIMITED_METHOD_CONTRACT;
+ return IsIndirectPtr((TADDR)this);
+ }
++
++ // Returns whether pointer is indirect. Assumes that the value is not NULL.
++ // Ignores isIndirect and offset values.
++ bool IsIndirectPtrIndirect(bool isIndirect, intptr_t offset) const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return IsIndirectPtr();
++ }
+ #endif
+
+ // Returns whether pointer is indirect. The value can be NULL.
+@@ -483,6 +523,14 @@ public:
+ LIMITED_METHOD_CONTRACT;
+ return IsIndirectPtrMaybeNull((TADDR)this);
+ }
++
++ // Returns whether pointer is indirect. The value can be NULL.
++ // Ignores isIndirect and offset values.
++ bool IsIndirectPtrMaybeNullIndirect(bool isIndirect, intptr_t offset) const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return IsIndirectPtrMaybeNull();
++ }
+ #endif
+
+ private:
+@@ -765,6 +813,130 @@ private:
+ INT32 m_delta;
+ };
+
++//----------------------------------------------------------------------------
++// IndirectPointer is pointer with optional indirection, similar to FixupPointer and RelativeFixupPointer.
++//
++// In comparison to FixupPointer, IndirectPointer's indirection is handled from outside by isIndirect flag.
++// In comparison to RelativeFixupPointer, IndirectPointer's offset is a constant,
++// while RelativeFixupPointer's offset is an address.
++//
++// IndirectPointer can contain NULL only if it is not indirect.
++//
++template<typename PTR_TYPE>
++class IndirectPointer
++{
++public:
++
++ static constexpr bool isRelative = false;
++ typedef PTR_TYPE type;
++
++ // Returns whether the encoded pointer is NULL.
++ BOOL IsNull() const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return m_addr == (TADDR)NULL;
++ }
++
++ // Returns whether the indirection cell contain fixup that has not been converted to real pointer yet.
++ // Uses isIndirect to identify, whether pointer is indirect or not. If it is, uses offset.
++ FORCEINLINE BOOL IsTaggedIndirect(TADDR base, bool isIndirect, intptr_t offset) const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ TADDR addr = m_addr;
++ if (isIndirect)
++ {
++ _ASSERTE(!IsNull());
++ return (*PTR_TADDR(addr + offset) & 1) != 0;
++ }
++ return FALSE;
++ }
++
++ // Returns value of the encoded pointer.
++ // Uses isIndirect to identify, whether pointer is indirect or not. If it is, uses offset.
++ FORCEINLINE PTR_TYPE GetValueIndirect(bool isIndirect, intptr_t offset) const
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ TADDR addr = m_addr;
++ if (isIndirect)
++ {
++ _ASSERTE(!IsNull());
++ addr = *PTR_TADDR(addr + offset);
++ }
++ return dac_cast<PTR_TYPE>(addr);
++ }
++
++#ifndef DACCESS_COMPILE
++ // Returns the pointer to the indirection cell.
++ // Uses isIndirect to identify, whether pointer is indirect or not. If it is, uses offset.
++ PTR_TYPE * GetValuePtrIndirect(bool isIndirect, intptr_t offset) const
++ {
++ LIMITED_METHOD_CONTRACT;
++ TADDR addr = m_addr;
++ if (isIndirect)
++ {
++ _ASSERTE(!IsNull());
++ return (PTR_TYPE *)(addr + offset);
++ }
++ return (PTR_TYPE *)&m_addr;
++ }
++#endif // !DACCESS_COMPILE
++
++ // Static version of GetValue. It is meant to simplify access to arrays of pointers.
++ // Uses isIndirect to identify, whether pointer is indirect or not. If it is, uses offset.
++ FORCEINLINE static PTR_TYPE GetValueAtPtrIndirect(TADDR base, bool isIndirect, intptr_t offset)
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return dac_cast<DPTR(IndirectPointer<PTR_TYPE>)>(base)->GetValueIndirect(isIndirect, offset);
++ }
++
++ // Static version of GetValueMaybeNull. It is meant to simplify access to arrays of pointers.
++ // Uses isIndirect to identify, whether pointer is indirect or not. If it is, uses offset.
++ FORCEINLINE static PTR_TYPE GetValueMaybeNullAtPtrIndirect(TADDR base, bool isIndirect, intptr_t offset)
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++ return GetValueAtPtrIndirect(base, isIndirect, offset);
++ }
++
++#ifndef DACCESS_COMPILE
++ // Returns whether pointer is indirect. Assumes that the value is not NULL.
++ // Uses isIndirect to identify, whether pointer is indirect or not. If it is, uses offset.
++ bool IsIndirectPtrIndirect(bool isIndirect, intptr_t offset) const
++ {
++ LIMITED_METHOD_CONTRACT;
++ if (isIndirect)
++ _ASSERTE(!IsNull());
++ return isIndirect;
++ }
++
++ // Returns whether pointer is indirect. The value can be NULL.
++ // Uses isIndirect to identify, whether pointer is indirect or not. If it is, uses offset.
++ bool IsIndirectPtrMaybeNullIndirect(bool isIndirect, intptr_t offset) const
++ {
++ LIMITED_METHOD_CONTRACT;
++ return IsIndirectPtrIndirect(isIndirect, offset);
++ }
++#endif // !DACCESS_COMPILE
++
++#ifndef DACCESS_COMPILE
++ // Set encoded value of the pointer. Assumes that the value is not NULL.
++ void SetValue(PTR_TYPE addr)
++ {
++ LIMITED_METHOD_CONTRACT;
++ m_addr = dac_cast<TADDR>(addr);
++ }
++
++ // Set encoded value of the pointer. The value can be NULL.
++ void SetValueMaybeNull(PTR_TYPE addr)
++ {
++ LIMITED_METHOD_CONTRACT;
++ SetValue(addr);
++ }
++#endif // !DACCESS_COMPILE
++
++private:
++ TADDR m_addr;
++};
++
+ template<bool isMaybeNull, typename T, typename PT>
+ typename PT::type
+ ReadPointer(const T *base, const PT T::* pPointerFieldMember)
+@@ -783,6 +955,24 @@ ReadPointer(const T *base, const PT T::* pPointerFieldMember)
+ }
+ }
+
++template<bool isMaybeNull, typename T, typename PT>
++typename PT::type
++ReadPointer(const T *base, const PT T::* pPointerFieldMember, bool isIndirect)
++{
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ uintptr_t offset = (uintptr_t) &(base->*pPointerFieldMember) - (uintptr_t) base;
++
++ if (isMaybeNull)
++ {
++ return PT::GetValueMaybeNullAtPtrIndirect(dac_cast<TADDR>(base) + offset, isIndirect, offset);
++ }
++ else
++ {
++ return PT::GetValueAtPtrIndirect(dac_cast<TADDR>(base) + offset, isIndirect, offset);
++ }
++}
++
+ template<typename T, typename PT>
+ typename PT::type
+ ReadPointerMaybeNull(const T *base, const PT T::* pPointerFieldMember)
+@@ -794,6 +984,15 @@ ReadPointerMaybeNull(const T *base, const PT T::* pPointerFieldMember)
+
+ template<typename T, typename PT>
+ typename PT::type
++ReadPointerMaybeNull(const T *base, const PT T::* pPointerFieldMember, bool isIndirect)
++{
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return ReadPointer<true>(base, pPointerFieldMember, isIndirect);
++}
++
++template<typename T, typename PT>
++typename PT::type
+ ReadPointer(const T *base, const PT T::* pPointerFieldMember)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+@@ -801,6 +1000,15 @@ ReadPointer(const T *base, const PT T::* pPointerFieldMember)
+ return ReadPointer<false>(base, pPointerFieldMember);
+ }
+
++template<typename T, typename PT>
++typename PT::type
++ReadPointer(const T *base, const PT T::* pPointerFieldMember, bool isIndirect)
++{
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return ReadPointer<false>(base, pPointerFieldMember, isIndirect);
++}
++
+ template<bool isMaybeNull, typename T, typename C, typename PT>
+ typename PT::type
+ ReadPointer(const T *base, const C T::* pFirstPointerFieldMember, const PT C::* pSecondPointerFieldMember)
+diff --git a/src/vm/class.cpp b/src/vm/class.cpp
+index c1519a2..7b2885b 100644
+--- a/src/vm/class.cpp
++++ b/src/vm/class.cpp
+@@ -889,7 +889,15 @@ ClassLoader::LoadExactParentAndInterfacesTransitively(MethodTable *pMT)
+ LOG((LF_CLASSLOADER, LL_INFO1000, "GENERICS: Replaced approximate parent %s with exact parent %s from token %x\n", pParentMT->GetDebugClassName(), pNewParentMT->GetDebugClassName(), crExtends));
+
+ // SetParentMethodTable is not used here since we want to update the indirection cell in the NGen case
+- *EnsureWritablePages(pMT->GetParentMethodTablePtr()) = pNewParentMT;
++ if (pMT->IsParentMethodTableIndirectPointerMaybeNull())
++ {
++ *EnsureWritablePages(pMT->GetParentMethodTableValuePtr()) = pNewParentMT;
++ }
++ else
++ {
++ EnsureWritablePages(pMT->GetParentMethodTablePointerPtr());
++ pMT->GetParentMethodTablePointerPtr()->SetValueMaybeNull(pNewParentMT);
++ }
+
+ pParentMT = pNewParentMT;
+ }
+diff --git a/src/vm/generics.cpp b/src/vm/generics.cpp
+index b110184..61a1de5 100644
+--- a/src/vm/generics.cpp
++++ b/src/vm/generics.cpp
+@@ -365,6 +365,7 @@ ClassLoader::CreateTypeHandleForNonCanonicalGenericInstantiation(
+ pMT->ClearFlag(MethodTable::enum_flag_IsPreRestored);
+
+ pMT->ClearFlag(MethodTable::enum_flag_HasIndirectParent);
++ pMT->m_pParentMethodTable.SetValueMaybeNull(NULL);
+
+ // Non non-virtual slots
+ pMT->ClearFlag(MethodTable::enum_flag_HasSingleNonVirtualSlot);
+diff --git a/src/vm/methodtable.cpp b/src/vm/methodtable.cpp
+index 75db911..b097406 100644
+--- a/src/vm/methodtable.cpp
++++ b/src/vm/methodtable.cpp
+@@ -4778,7 +4778,17 @@ void MethodTable::Fixup(DataImage *image)
+ #endif // _DEBUG
+
+ MethodTable * pParentMT = GetParentMethodTable();
+- _ASSERTE(!pNewMT->GetFlag(enum_flag_HasIndirectParent));
++ _ASSERTE(!pNewMT->IsParentMethodTableIndirectPointerMaybeNull());
++
++ ZapRelocationType relocType;
++ if (decltype(MethodTable::m_pParentMethodTable)::isRelative)
++ {
++ relocType = IMAGE_REL_BASED_RELPTR;
++ }
++ else
++ {
++ relocType = IMAGE_REL_BASED_PTR;
++ }
+
+ if (pParentMT != NULL)
+ {
+@@ -4790,7 +4800,8 @@ void MethodTable::Fixup(DataImage *image)
+ {
+ if (image->CanHardBindToZapModule(pParentMT->GetLoaderModule()))
+ {
+- image->FixupPointerField(this, offsetof(MethodTable, m_pParentMethodTable));
++ _ASSERTE(!IsParentMethodTableIndirectPointer());
++ image->FixupField(this, offsetof(MethodTable, m_pParentMethodTable), pParentMT, 0, relocType);
+ }
+ else
+ {
+@@ -4826,7 +4837,7 @@ void MethodTable::Fixup(DataImage *image)
+
+ if (pImport != NULL)
+ {
+- image->FixupFieldToNode(this, offsetof(MethodTable, m_pParentMethodTable), pImport, -(SSIZE_T)offsetof(MethodTable, m_pParentMethodTable));
++ image->FixupFieldToNode(this, offsetof(MethodTable, m_pParentMethodTable), pImport, -PARENT_MT_FIXUP_OFFSET, relocType);
+ pNewMT->SetFlag(enum_flag_HasIndirectParent);
+ }
+ }
+@@ -6091,7 +6102,15 @@ void MethodTable::Restore()
+ //
+ // Restore parent method table
+ //
+- Module::RestoreMethodTablePointerRaw(GetParentMethodTablePtr(), GetLoaderModule(), CLASS_LOAD_APPROXPARENTS);
++ if (IsParentMethodTableIndirectPointerMaybeNull())
++ {
++ Module::RestoreMethodTablePointerRaw(GetParentMethodTableValuePtr(), GetLoaderModule(), CLASS_LOAD_APPROXPARENTS);
++ }
++ else
++ {
++ ClassLoader::EnsureLoaded(ReadPointer(this, &MethodTable::m_pParentMethodTable, GetFlagHasIndirectParent()),
++ CLASS_LOAD_APPROXPARENTS);
++ }
+
+ //
+ // Restore interface classes
+@@ -7849,13 +7868,7 @@ BOOL MethodTable::IsParentMethodTablePointerValid()
+ if (!GetWriteableData_NoLogging()->IsParentMethodTablePointerValid())
+ return FALSE;
+
+- if (!GetFlag(enum_flag_HasIndirectParent))
+- {
+- return TRUE;
+- }
+- TADDR pMT;
+- pMT = *PTR_TADDR(m_pParentMethodTable + offsetof(MethodTable, m_pParentMethodTable));
+- return !CORCOMPILE_IS_POINTER_TAGGED(pMT);
++ return !IsParentMethodTableTagged(dac_cast<PTR_MethodTable>(this));
+ }
+ #endif
+
+diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
+index 8c15d2e..c2c2564 100644
+--- a/src/vm/methodtable.h
++++ b/src/vm/methodtable.h
+@@ -2127,6 +2127,14 @@ public:
+ // THE METHOD TABLE PARENT (SUPERCLASS/BASE CLASS)
+ //
+
++#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
++#define PARENT_MT_FIXUP_OFFSET (-FIXUP_POINTER_INDIRECTION)
++ typedef RelativeFixupPointer<PTR_MethodTable> ParentMT_t;
++#else
++#define PARENT_MT_FIXUP_OFFSET ((SSIZE_T)offsetof(MethodTable, m_pParentMethodTable))
++ typedef IndirectPointer<PTR_MethodTable> ParentMT_t;
++#endif
++
+ BOOL HasApproxParent()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+@@ -2145,33 +2153,63 @@ public:
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ PRECONDITION(IsParentMethodTablePointerValid());
+-
+- TADDR pMT = m_pParentMethodTable;
+-#ifdef FEATURE_PREJIT
+- if (GetFlag(enum_flag_HasIndirectParent))
+- pMT = *PTR_TADDR(m_pParentMethodTable + offsetof(MethodTable, m_pParentMethodTable));
+-#endif
+- return PTR_MethodTable(pMT);
++ return ReadPointerMaybeNull(this, &MethodTable::m_pParentMethodTable, GetFlagHasIndirectParent());
+ }
+
+ inline static PTR_VOID GetParentMethodTableOrIndirection(PTR_VOID pMT)
+ {
+ WRAPPER_NO_CONTRACT;
++#if defined(PLATFORM_UNIX) && defined(_TARGET_ARM_)
++ PTR_MethodTable pMethodTable = dac_cast<PTR_MethodTable>(pMT);
++ PTR_MethodTable pParentMT = ReadPointerMaybeNull((MethodTable*) pMethodTable, &MethodTable::m_pParentMethodTable);
++ return dac_cast<PTR_VOID>(pParentMT);
++#else
+ return PTR_VOID(*PTR_TADDR(dac_cast<TADDR>(pMT) + offsetof(MethodTable, m_pParentMethodTable)));
++#endif
+ }
+
+- inline MethodTable ** GetParentMethodTablePtr()
++ inline static bool IsParentMethodTableTagged(PTR_MethodTable pMT)
+ {
+- WRAPPER_NO_CONTRACT;
++ LIMITED_METHOD_CONTRACT;
++ TADDR base = dac_cast<TADDR>(pMT) + offsetof(MethodTable, m_pParentMethodTable);
++ return pMT->m_pParentMethodTable.IsTaggedIndirect(base, pMT->GetFlagHasIndirectParent(), PARENT_MT_FIXUP_OFFSET);
++ }
+
++ bool GetFlagHasIndirectParent()
++ {
+ #ifdef FEATURE_PREJIT
+- return GetFlag(enum_flag_HasIndirectParent) ?
+- (MethodTable **)(m_pParentMethodTable + offsetof(MethodTable, m_pParentMethodTable)) :(MethodTable **)&m_pParentMethodTable;
++ return GetFlag(enum_flag_HasIndirectParent);
+ #else
+- return (MethodTable **)&m_pParentMethodTable;
++ return FALSE;
+ #endif
+ }
+
++#ifndef DACCESS_COMPILE
++ inline ParentMT_t * GetParentMethodTablePointerPtr()
++ {
++ LIMITED_METHOD_CONTRACT;
++ return &m_pParentMethodTable;
++ }
++
++ inline bool IsParentMethodTableIndirectPointerMaybeNull()
++ {
++ LIMITED_METHOD_CONTRACT;
++ return m_pParentMethodTable.IsIndirectPtrMaybeNullIndirect(GetFlagHasIndirectParent(), PARENT_MT_FIXUP_OFFSET);
++ }
++
++ inline bool IsParentMethodTableIndirectPointer()
++ {
++ LIMITED_METHOD_CONTRACT;
++ return m_pParentMethodTable.IsIndirectPtrIndirect(GetFlagHasIndirectParent(), PARENT_MT_FIXUP_OFFSET);
++ }
++
++ inline MethodTable ** GetParentMethodTableValuePtr()
++ {
++ LIMITED_METHOD_CONTRACT;
++ return m_pParentMethodTable.GetValuePtrIndirect(GetFlagHasIndirectParent(), PARENT_MT_FIXUP_OFFSET);
++ }
++#endif // !DACCESS_COMPILE
++
+ // Is the parent method table pointer equal to the given argument?
+ BOOL ParentEquals(PTR_MethodTable pMT)
+ {
+@@ -2189,8 +2227,8 @@ public:
+ void SetParentMethodTable (MethodTable *pParentMethodTable)
+ {
+ LIMITED_METHOD_CONTRACT;
+- PRECONDITION(!GetFlag(enum_flag_HasIndirectParent));
+- m_pParentMethodTable = (TADDR)pParentMethodTable;
++ PRECONDITION(!IsParentMethodTableIndirectPointerMaybeNull());
++ m_pParentMethodTable.SetValueMaybeNull(pParentMethodTable);
+ #ifdef _DEBUG
+ GetWriteableDataForWrite_NoLogging()->SetParentMethodTablePointerValid();
+ #endif
+@@ -4095,11 +4133,12 @@ private:
+ LPCUTF8 debug_m_szClassName;
+ #endif //_DEBUG
+
++ // On Linux ARM is a RelativeFixupPointer. Otherwise,
+ // Parent PTR_MethodTable if enum_flag_HasIndirectParent is not set. Pointer to indirection cell
+ // if enum_flag_enum_flag_HasIndirectParent is set. The indirection is offset by offsetof(MethodTable, m_pParentMethodTable).
+ // It allows casting helpers to go through parent chain natually. Casting helper do not need need the explicit check
+ // for enum_flag_HasIndirectParentMethodTable.
+- TADDR m_pParentMethodTable;
++ ParentMT_t m_pParentMethodTable;
+
+ RelativePointer<PTR_Module> m_pLoaderModule; // LoaderModule. It is equal to the ZapModule in ngened images
+
+diff --git a/src/vm/proftoeeinterfaceimpl.cpp b/src/vm/proftoeeinterfaceimpl.cpp
+index cfd99ad..972a0eb 100644
+--- a/src/vm/proftoeeinterfaceimpl.cpp
++++ b/src/vm/proftoeeinterfaceimpl.cpp
+@@ -6832,7 +6832,7 @@ HRESULT ProfToEEInterfaceImpl::GetClassLayout(ClassID classID,
+ // running into - attempting to get the class layout for all types at module load time.
+ // If we don't detect this the runtime will AV during the field iteration below. Feel
+ // free to eliminate this check when a more complete solution is available.
+- if (CORCOMPILE_IS_POINTER_TAGGED(*(typeHandle.AsMethodTable()->GetParentMethodTablePtr())))
++ if (MethodTable::IsParentMethodTableTagged(typeHandle.AsMethodTable()))
+ {
+ return CORPROF_E_DATAINCOMPLETE;
+ }
+--
+2.7.4
+
diff --git a/packaging/0031-Fix-build-break-with-older-VS-versions-16522.patch b/packaging/0031-Fix-build-break-with-older-VS-versions-16522.patch
new file mode 100644
index 0000000000..62f238efe4
--- /dev/null
+++ b/packaging/0031-Fix-build-break-with-older-VS-versions-16522.patch
@@ -0,0 +1,37 @@
+From 23ab0b67eec1cc89a17cbdf745c433bab38b9506 Mon Sep 17 00:00:00 2001
+From: Jan Kotas <jkotas@microsoft.com>
+Date: Fri, 23 Feb 2018 00:17:45 -0800
+Subject: [PATCH 31/32] Fix build break with older VS versions (#16522)
+
+---
+ src/vm/methodtable.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/src/vm/methodtable.h b/src/vm/methodtable.h
+index c2c2564..9dc24d8 100644
+--- a/src/vm/methodtable.h
++++ b/src/vm/methodtable.h
+@@ -2168,7 +2168,7 @@ public:
+ #endif
+ }
+
+- inline static bool IsParentMethodTableTagged(PTR_MethodTable pMT)
++ inline static BOOL IsParentMethodTableTagged(PTR_MethodTable pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+ TADDR base = dac_cast<TADDR>(pMT) + offsetof(MethodTable, m_pParentMethodTable);
+@@ -2178,9 +2178,9 @@ public:
+ bool GetFlagHasIndirectParent()
+ {
+ #ifdef FEATURE_PREJIT
+- return GetFlag(enum_flag_HasIndirectParent);
++ return !!GetFlag(enum_flag_HasIndirectParent);
+ #else
+- return FALSE;
++ return false;
+ #endif
+ }
+
+--
+2.7.4
+
diff --git a/packaging/0032-Fix-handling-of-incorrect-assemblies-on-Unix-16747.patch b/packaging/0032-Fix-handling-of-incorrect-assemblies-on-Unix-16747.patch
new file mode 100644
index 0000000000..d18df3e22c
--- /dev/null
+++ b/packaging/0032-Fix-handling-of-incorrect-assemblies-on-Unix-16747.patch
@@ -0,0 +1,196 @@
+From d47f1d334d67db1b28b9d55e7b6eccf71403ab0e Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <ruben-ayrapetyan@users.noreply.github.com>
+Date: Tue, 6 Mar 2018 06:37:43 +0000
+Subject: [PATCH 32/32] Fix handling of incorrect assemblies on Unix (#16747)
+
+* Return DPTR from PEDecoder::FindFirstSection()
+
+Change type of the function's return value
+to PTR_IMAGE_SECTION_HEADER instead of (IMAGE_SECTION_HEADER *)
+
+* Fix handling of incorrect assemblies on Unix
+
+This fixes the regression that was introduced by #10772 and is
+caused by a missing check for validity of loaded assembly file.
+
+Related issue: #15544
+---
+ src/debug/daccess/nidump.cpp | 2 +-
+ src/inc/pedecoder.h | 2 +-
+ src/inc/pedecoder.inl | 2 +-
+ src/utilcode/pedecoder.cpp | 3 +-
+ src/vm/peimage.cpp | 7 ++--
+ tests/src/Loader/regressions/GitHub_15544/main.cs | 37 ++++++++++++++++++++++
+ .../Loader/regressions/GitHub_15544/main.csproj | 31 ++++++++++++++++++
+ 7 files changed, 77 insertions(+), 7 deletions(-)
+ create mode 100644 tests/src/Loader/regressions/GitHub_15544/main.cs
+ create mode 100644 tests/src/Loader/regressions/GitHub_15544/main.csproj
+
+diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
+index ef4725f..18bef97 100644
+--- a/src/debug/daccess/nidump.cpp
++++ b/src/debug/daccess/nidump.cpp
+@@ -720,7 +720,7 @@ NativeImageDumper::DumpNativeImage()
+
+ for (COUNT_T i = 0; i < m_decoder.GetNumberOfSections(); i++)
+ {
+- PTR_IMAGE_SECTION_HEADER section = dptr_add(m_decoder.FindFirstSection(), i);
++ PTR_IMAGE_SECTION_HEADER section = m_decoder.FindFirstSection() + i;
+ m_display->Section(reinterpret_cast<char *>(section->Name),
+ section->VirtualAddress,
+ section->SizeOfRawData);
+diff --git a/src/inc/pedecoder.h b/src/inc/pedecoder.h
+index 01375e6..8163fff 100644
+--- a/src/inc/pedecoder.h
++++ b/src/inc/pedecoder.h
+@@ -182,7 +182,7 @@ class PEDecoder
+ UINT32 GetWin32VersionValue() const;
+ COUNT_T GetNumberOfRvaAndSizes() const;
+ COUNT_T GetNumberOfSections() const;
+- IMAGE_SECTION_HEADER *FindFirstSection() const;
++ PTR_IMAGE_SECTION_HEADER FindFirstSection() const;
+ IMAGE_SECTION_HEADER *FindSection(LPCSTR sectionName) const;
+
+ DWORD GetImageIdentity() const;
+diff --git a/src/inc/pedecoder.inl b/src/inc/pedecoder.inl
+index b75c495..4199a5b 100644
+--- a/src/inc/pedecoder.inl
++++ b/src/inc/pedecoder.inl
+@@ -1178,7 +1178,7 @@ inline DWORD PEDecoder::GetImageIdentity() const
+ }
+
+
+-inline IMAGE_SECTION_HEADER *PEDecoder::FindFirstSection() const
++inline PTR_IMAGE_SECTION_HEADER PEDecoder::FindFirstSection() const
+ {
+ CONTRACT(IMAGE_SECTION_HEADER *)
+ {
+diff --git a/src/utilcode/pedecoder.cpp b/src/utilcode/pedecoder.cpp
+index babe374..e0f441c 100644
+--- a/src/utilcode/pedecoder.cpp
++++ b/src/utilcode/pedecoder.cpp
+@@ -445,6 +445,7 @@ BOOL PEDecoder::HasWriteableSections() const
+ CONTRACT_CHECK
+ {
+ INSTANCE_CHECK;
++ PRECONDITION(CheckNTHeaders());
+ PRECONDITION(CheckFormat());
+ NOTHROW;
+ GC_NOTRIGGER;
+@@ -453,7 +454,7 @@ BOOL PEDecoder::HasWriteableSections() const
+ }
+ CONTRACT_CHECK_END;
+
+- PTR_IMAGE_SECTION_HEADER pSection = FindFirstSection(FindNTHeaders());
++ PTR_IMAGE_SECTION_HEADER pSection = FindFirstSection();
+ _ASSERTE(pSection != NULL);
+
+ PTR_IMAGE_SECTION_HEADER pSectionEnd = pSection + VAL16(FindNTHeaders()->FileHeader.NumberOfSections);
+diff --git a/src/vm/peimage.cpp b/src/vm/peimage.cpp
+index bd5ad7f..95f32e3 100644
+--- a/src/vm/peimage.cpp
++++ b/src/vm/peimage.cpp
+@@ -1029,7 +1029,9 @@ PTR_PEImageLayout PEImage::CreateLayoutFlat(BOOL bPermitWriteableSections)
+
+ PTR_PEImageLayout pFlatLayout = PEImageLayout::LoadFlat(GetFileHandle(),this);
+
+- if (!bPermitWriteableSections && pFlatLayout->HasWriteableSections())
++ if (!bPermitWriteableSections
++ && pFlatLayout->CheckNTHeaders()
++ && pFlatLayout->HasWriteableSections())
+ {
+ pFlatLayout->Release();
+
+@@ -1108,8 +1110,7 @@ void PEImage::Load()
+
+ #ifdef PLATFORM_UNIX
+ if (m_pLayouts[IMAGE_FLAT] != NULL
+- && m_pLayouts[IMAGE_FLAT]->CheckFormat()
+- && m_pLayouts[IMAGE_FLAT]->IsILOnly()
++ && m_pLayouts[IMAGE_FLAT]->CheckILOnlyFormat()
+ && !m_pLayouts[IMAGE_FLAT]->HasWriteableSections())
+ {
+ // IL-only images with writeable sections are mapped in general way,
+diff --git a/tests/src/Loader/regressions/GitHub_15544/main.cs b/tests/src/Loader/regressions/GitHub_15544/main.cs
+new file mode 100644
+index 0000000..25e7d79
+--- /dev/null
++++ b/tests/src/Loader/regressions/GitHub_15544/main.cs
+@@ -0,0 +1,37 @@
++// Licensed to the .NET Foundation under one or more agreements.
++// The .NET Foundation licenses this file to you under the MIT license.
++// See the LICENSE file in the project root for more information.
++//
++
++using System;
++using System.IO;
++using System.Reflection;
++
++public class CMain{
++ public static int Main(String[] args) {
++ string tempFileName = Path.GetTempFileName();
++
++ bool isThrown = false;
++
++ try
++ {
++ AssemblyName.GetAssemblyName(tempFileName);
++ }
++ catch (BadImageFormatException)
++ {
++ isThrown = true;
++ }
++
++ File.Delete(tempFileName);
++
++ if (isThrown) {
++ Console.WriteLine("PASS");
++
++ return 100;
++ } else {
++ Console.WriteLine("FAIL");
++
++ return 101;
++ }
++ }
++}
+diff --git a/tests/src/Loader/regressions/GitHub_15544/main.csproj b/tests/src/Loader/regressions/GitHub_15544/main.csproj
+new file mode 100644
+index 0000000..e46a44c
+--- /dev/null
++++ b/tests/src/Loader/regressions/GitHub_15544/main.csproj
+@@ -0,0 +1,31 @@
++<?xml version="1.0" encoding="utf-8"?>
++<Project ToolsVersion="12.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
++ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.props))\dir.props" />
++ <PropertyGroup>
++ <Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
++ <Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
++ <SchemaVersion>2.0</SchemaVersion>
++ <ProjectGuid>{AC75380E-F196-4F32-9BCF-F0589AF864E6}</ProjectGuid>
++ <OutputType>Exe</OutputType>
++ <ProjectTypeGuids>{786C830F-07A1-408B-BD7F-6EE04809D6DB};{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}</ProjectTypeGuids>
++ <SolutionDir Condition="$(SolutionDir) == '' Or $(SolutionDir) == '*Undefined*'">..\..\</SolutionDir>
++ <AllowUnsafeBlocks>true</AllowUnsafeBlocks>
++ </PropertyGroup>
++ <!-- Default configurations to help VS understand the configurations -->
++ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
++ </PropertyGroup>
++ <PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
++ </PropertyGroup>
++ <ItemGroup>
++ <CodeAnalysisDependentAssemblyPaths Condition=" '$(VS100COMNTOOLS)' != '' " Include="$(VS100COMNTOOLS)..\IDE\PrivateAssemblies">
++ <Visible>False</Visible>
++ </CodeAnalysisDependentAssemblyPaths>
++ </ItemGroup>
++ <ItemGroup>
++ <Compile Include="main.cs" />
++ </ItemGroup>
++ <ItemGroup>
++ <Service Include="{82A7F48D-3B50-4B1E-B82E-3ADA8210C358}" />
++ </ItemGroup>
++ <Import Project="$([MSBuild]::GetDirectoryNameOfFileAbove($(MSBuildThisFileDirectory), dir.targets))\dir.targets" />
++</Project>
+--
+2.7.4
+
diff --git a/packaging/coreclr.spec b/packaging/coreclr.spec
index bd645251f8..f437299462 100644
--- a/packaging/coreclr.spec
+++ b/packaging/coreclr.spec
@@ -23,7 +23,7 @@ Source1000: downloaded_files.tar.gz
Source1001: %{name}.manifest
Source1002: libicu.tar.gz
Source1003: dep_libs.tar.gz
-# Gbp-Ignore-Patches: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45
+# Gbp-Ignore-Patches: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
Patch0: 0001-Add-project.assets.json-files.patch
Patch1: 0001-ARM-Linux-Support-unaligned-struct-read-write-11290.patch
Patch2: 0002-x86-Linux-Thread-safe-UMThunkMarshInfo-RunTimeInit-1.patch
@@ -70,6 +70,38 @@ Patch42: 0001-Fix-build-with-Asan-15372.patch
Patch43: 0002-Fix-asan-false-positive-errors-15563.patch
Patch44: 0003-ThrowExceptionFromContextInternal-RtlCaptureContext-.patch
Patch45: 0004-ExecuteHandlerOnOriginalStack-handle-case-when-it-is.patch
+Patch46: 0001-Extract-PEImage-CreateLayoutMapped-and-PEImage-Creat.patch
+Patch47: 0002-Direct-mapping-of-IL-assembly-images-that-don-t-cont.patch
+Patch48: 0003-Delete-default-copy-move-constructors-and-assignment.patch
+Patch49: 0004-Change-relocations-in-ngen-ed-code-with-PC-relative-.patch
+Patch50: 0005-Allow-RelativePointer-SetValue-usage-for-non-DAC-bui.patch
+Patch51: 0006-Remove-relocations-from-SECTION_MethodDesc-for-ngene.patch
+Patch52: 0007-FIX-fix-No.1-missing-GetImplementedMDs.patch
+Patch53: 0008-Fix-issues-with-RelativePointer-instead-of-RelativeF.patch
+Patch54: 0009-Remove-relocations-from-SECTION_MethodDesc-for-ngene.patch
+Patch55: 0010-Partially-remove-relocations-for-ModuleSection-ZapVi.patch
+Patch56: 0011-FIX-fix-No.2-incorrect-m_pBeginInvokeMethod.patch
+Patch57: 0012-Replace-array-type-handle-with-method-table-in-argum.patch
+Patch58: 0013-Implement-JIT_NewArr1_R2R-as-R2R-wrapper-for-JIT_New.patch
+Patch59: 0014-Fix-JIT_NewArr1-8-byte-alignment-for-ELEMENT_TYPE_R8.patch
+Patch60: 0015-Partially-remove-relocations-from-Class-section-of-N.patch
+Patch61: 0016-Fix-copying-of-FieldMarshaler-structures-in-EEClassL.patch
+Patch62: 0017-Fix-alignment-of-reads-in-MD5Transform.-12800.patch
+Patch63: 0018-Simplify-SHM-allocator-12815.patch
+Patch64: 0019-Remove-relocations-from-SECTION_Readonly-for-fields-.patch
+Patch65: 0020-Add-FixupPlainOrRelativePointerField-for-MethodDesc-.patch
+Patch66: 0021-Additional-fixes-for-RelativePointer-FixupPointer-Re.patch
+Patch67: 0022-Remove-relocations-for-InterfaceInfo_t-m_pMethodTabl.patch
+Patch68: 0023-Remove-relocations-for-MethodTable-m_pWriteableData-.patch
+Patch69: 0024-Remove-relocations-for-MethodTable-m_pPerInstInfo-fo.patch
+Patch70: 0025-Remove-relocations-for-MethodTable-s-vtable-1st-leve.patch
+Patch71: 0026-Move-ITEM_DICTIONARY-and-ITEM_VTABLE_CHUNK-to-separa.patch
+Patch72: 0027-Update-GUID.patch
+Patch73: 0028-Review-fixes.patch
+Patch74: 0029-Allocate-FileMappingImmutableData-szFileName-and-CFi.patch
+Patch75: 0030-Remove-relocations-for-MethodTable-m_pParentMethodTa.patch
+Patch76: 0031-Fix-build-break-with-older-VS-versions-16522.patch
+Patch77: 0032-Fix-handling-of-incorrect-assemblies-on-Unix-16747.patch
ExcludeArch: aarch64
@@ -214,6 +246,38 @@ cp %{SOURCE1001} .
%patch43 -p1
%patch44 -p1
%patch45 -p1
+%patch46 -p1
+%patch47 -p1
+%patch48 -p1
+%patch49 -p1
+%patch50 -p1
+%patch51 -p1
+%patch52 -p1
+%patch53 -p1
+%patch54 -p1
+%patch55 -p1
+%patch56 -p1
+%patch57 -p1
+%patch58 -p1
+%patch59 -p1
+%patch60 -p1
+%patch61 -p1
+%patch62 -p1
+%patch63 -p1
+%patch64 -p1
+%patch65 -p1
+%patch66 -p1
+%patch67 -p1
+%patch68 -p1
+%patch69 -p1
+%patch70 -p1
+%patch71 -p1
+%patch72 -p1
+%patch73 -p1
+%patch74 -p1
+%patch75 -p1
+%patch76 -p1
+%patch77 -p1
%if 0%{skipmscorlib}
%else