summaryrefslogtreecommitdiff
path: root/packaging/0010-Partially-remove-relocations-for-ModuleSection-ZapVi.patch
diff options
context:
space:
mode:
Diffstat (limited to 'packaging/0010-Partially-remove-relocations-for-ModuleSection-ZapVi.patch')
-rw-r--r--packaging/0010-Partially-remove-relocations-for-ModuleSection-ZapVi.patch1759
1 files changed, 1759 insertions, 0 deletions
diff --git a/packaging/0010-Partially-remove-relocations-for-ModuleSection-ZapVi.patch b/packaging/0010-Partially-remove-relocations-for-ModuleSection-ZapVi.patch
new file mode 100644
index 0000000000..62647ea3ae
--- /dev/null
+++ b/packaging/0010-Partially-remove-relocations-for-ModuleSection-ZapVi.patch
@@ -0,0 +1,1759 @@
+From 9fa388f63a78596afdbe59ecc792487f96576b6c Mon Sep 17 00:00:00 2001
+From: Ruben Ayrapetyan <ruben-ayrapetyan@users.noreply.github.com>
+Date: Tue, 13 Jun 2017 16:58:41 +0300
+Subject: [PATCH 10/32] Partially remove relocations for ModuleSection
+ (ZapVirtualSectionType). (#11853)
+
+---
+ src/debug/daccess/nidump.cpp | 26 +++---
+ src/vm/class.cpp | 14 ++--
+ src/vm/class.h | 67 ++++++++++-----
+ src/vm/class.inl | 2 +-
+ src/vm/classhash.cpp | 57 ++++++-------
+ src/vm/comcallablewrapper.cpp | 2 +-
+ src/vm/comdelegate.cpp | 38 ++++-----
+ src/vm/comsynchronizable.cpp | 2 +-
+ src/vm/instmethhash.cpp | 8 +-
+ src/vm/jitinterface.cpp | 4 +-
+ src/vm/methodtablebuilder.cpp | 14 ++--
+ src/vm/ngenhash.h | 114 ++++++++++++++++++++++---
+ src/vm/ngenhash.inl | 189 ++++++++++++++++++++++++------------------
+ src/vm/stubhelpers.cpp | 4 +-
+ src/vm/typedesc.cpp | 18 ++--
+ src/vm/typedesc.h | 7 +-
+ src/vm/typehash.cpp | 39 +++++----
+ src/vm/typehash.h | 16 +++-
+ 18 files changed, 397 insertions(+), 224 deletions(-)
+
+diff --git a/src/debug/daccess/nidump.cpp b/src/debug/daccess/nidump.cpp
+index d1e69f6..42705a5 100644
+--- a/src/debug/daccess/nidump.cpp
++++ b/src/debug/daccess/nidump.cpp
+@@ -4492,14 +4492,14 @@ void NativeImageDumper::TraverseNgenHash(DPTR(HASH_CLASS) pTable,
+ }
+
+ DisplayWriteFieldPointer(m_pModule,
+- DPtrToPreferredAddr(pTable->m_pModule),
++ DPtrToPreferredAddr(pTable->GetModule()),
+ HASH_CLASS, MODULE);
+
+ // Dump warm (volatile) entries.
+ DisplayWriteFieldUInt(m_cWarmEntries, pTable->m_cWarmEntries, HASH_CLASS, MODULE);
+ DisplayWriteFieldUInt(m_cWarmBuckets, pTable->m_cWarmBuckets, HASH_CLASS, MODULE);
+ DisplayWriteFieldAddress(m_pWarmBuckets,
+- DPtrToPreferredAddr(pTable->m_pWarmBuckets),
++ DPtrToPreferredAddr(pTable->GetWarmBuckets()),
+ sizeof(HASH_ENTRY_CLASS*) * pTable->m_cWarmBuckets,
+ HASH_CLASS, MODULE);
+
+@@ -4535,11 +4535,11 @@ void NativeImageDumper::TraverseNgenPersistedEntries(DPTR(HASH_CLASS) pTable,
+ DisplayWriteFieldUInt(m_cEntries, pEntries->m_cEntries, typename HASH_CLASS::PersistedEntries, MODULE);
+ DisplayWriteFieldUInt(m_cBuckets, pEntries->m_cBuckets, typename HASH_CLASS::PersistedEntries, MODULE);
+ DisplayWriteFieldAddress(m_pBuckets,
+- DPtrToPreferredAddr(pEntries->m_pBuckets),
+- pEntries->m_cBuckets ? pEntries->m_pBuckets->GetSize(pEntries->m_cBuckets) : 0,
++ DPtrToPreferredAddr(pTable->GetPersistedBuckets(pEntries)),
++ pEntries->m_cBuckets ? pTable->GetPersistedBuckets(pEntries)->GetSize(pEntries->m_cBuckets) : 0,
+ typename HASH_CLASS::PersistedEntries, MODULE);
+ DisplayWriteFieldAddress(m_pEntries,
+- DPtrToPreferredAddr(pEntries->m_pEntries),
++ DPtrToPreferredAddr(pTable->GetPersistedEntries(pEntries)),
+ sizeof(typename HASH_CLASS::PersistedEntry) * pEntries->m_cEntries,
+ typename HASH_CLASS::PersistedEntries, MODULE);
+
+@@ -4551,7 +4551,7 @@ void NativeImageDumper::TraverseNgenPersistedEntries(DPTR(HASH_CLASS) pTable,
+ {
+ // Get index of the first entry and the count of entries in the bucket.
+ DWORD dwEntryId, cEntries;
+- pEntries->m_pBuckets->GetBucket(i, &dwEntryId, &cEntries);
++ pTable->GetPersistedBuckets(pEntries)->GetBucket(i, &dwEntryId, &cEntries);
+
+ // Loop over entries.
+ while (cEntries && (CHECK_OPT(SLIM_MODULE_TBLS)
+@@ -4559,7 +4559,7 @@ void NativeImageDumper::TraverseNgenPersistedEntries(DPTR(HASH_CLASS) pTable,
+ || CHECK_OPT(METHODTABLES)))
+ {
+ // Lookup entry in the array via the index we have.
+- typename HASH_CLASS::PTR_PersistedEntry pEntry(PTR_TO_TADDR(pEntries->m_pEntries) +
++ typename HASH_CLASS::PTR_PersistedEntry pEntry(PTR_TO_TADDR(pTable->GetPersistedEntries(pEntries)) +
+ (dwEntryId * sizeof(typename HASH_CLASS::PersistedEntry)));
+
+ IF_OPT(SLIM_MODULE_TBLS)
+@@ -8297,7 +8297,7 @@ NativeImageDumper::DumpEEClassForMethodTable( PTR_MethodTable mt )
+ EEClass, EECLASSES );
+ #endif
+
+- WriteFieldMethodTable( m_pMethodTable, clazz->m_pMethodTable, EEClass,
++ WriteFieldMethodTable( m_pMethodTable, clazz->GetMethodTable(), EEClass,
+ EECLASSES );
+
+ WriteFieldCorElementType( m_NormType, (CorElementType)clazz->m_NormType,
+@@ -8558,7 +8558,7 @@ NativeImageDumper::DumpEEClassForMethodTable( PTR_MethodTable mt )
+ DelegateEEClass, EECLASSES );
+
+ WriteFieldMethodDesc( m_pInvokeMethod,
+- delegateClass->m_pInvokeMethod,
++ delegateClass->GetInvokeMethod(),
+ DelegateEEClass, EECLASSES );
+ DumpFieldStub( m_pMultiCastInvokeStub,
+ delegateClass->m_pMultiCastInvokeStub,
+@@ -8585,10 +8585,10 @@ NativeImageDumper::DumpEEClassForMethodTable( PTR_MethodTable mt )
+ }
+
+ WriteFieldMethodDesc( m_pBeginInvokeMethod,
+- delegateClass->m_pBeginInvokeMethod,
++ delegateClass->GetBeginInvokeMethod(),
+ DelegateEEClass, EECLASSES );
+ WriteFieldMethodDesc( m_pEndInvokeMethod,
+- delegateClass->m_pEndInvokeMethod,
++ delegateClass->GetEndInvokeMethod(),
+ DelegateEEClass, EECLASSES );
+ DisplayWriteFieldPointer( m_pMarshalStub, delegateClass->m_pMarshalStub,
+ DelegateEEClass, EECLASSES );
+@@ -8717,7 +8717,7 @@ NativeImageDumper::DumpEEClassForMethodTable( PTR_MethodTable mt )
+ }
+ }
+ }
+- PTR_BYTE varianceInfo = TO_TADDR(pClassOptional->m_pVarianceInfo);
++ PTR_BYTE varianceInfo = pClassOptional->GetVarianceInfo();
+ if( varianceInfo == NULL )
+ {
+ DisplayWriteFieldPointer( m_pVarianceInfo, NULL,
+@@ -8879,7 +8879,7 @@ void NativeImageDumper::DumpTypeDesc( PTR_TypeDesc td )
+ PTR_TypeVarTypeDesc tvtd(td);
+ DisplayStartVStructure( "TypeVarTypeDesc", TYPEDESCS );
+ DisplayWriteFieldPointer( m_pModule,
+- DPtrToPreferredAddr(tvtd->m_pModule),
++ DPtrToPreferredAddr(tvtd->GetModule()),
+ TypeVarTypeDesc, TYPEDESCS );
+ DisplayWriteFieldUInt( m_typeOrMethodDef,
+ tvtd->m_typeOrMethodDef,
+diff --git a/src/vm/class.cpp b/src/vm/class.cpp
+index 2172090..0259b1e 100644
+--- a/src/vm/class.cpp
++++ b/src/vm/class.cpp
+@@ -2884,7 +2884,7 @@ void EEClass::Save(DataImage *image, MethodTable *pMT)
+ {
+ // make sure we don't store a GUID_NULL guid in the NGEN image
+ // instead we'll compute the GUID at runtime, and throw, if appropriate
+- m_pGuidInfo = NULL;
++ m_pGuidInfo.SetValueMaybeNull(NULL);
+ }
+ }
+ }
+@@ -2961,14 +2961,14 @@ void EEClass::Fixup(DataImage *image, MethodTable *pMT)
+ }
+
+ if (HasOptionalFields())
+- image->FixupPointerField(GetOptionalFields(), offsetof(EEClassOptionalFields, m_pVarianceInfo));
++ image->FixupRelativePointerField(GetOptionalFields(), offsetof(EEClassOptionalFields, m_pVarianceInfo));
+
+ //
+ // We pass in the method table, because some classes (e.g. remoting proxy)
+ // have fake method tables set up in them & we want to restore the regular
+ // one.
+ //
+- image->FixupField(this, offsetof(EEClass, m_pMethodTable), pMT);
++ image->FixupField(this, offsetof(EEClass, m_pMethodTable), pMT, 0, IMAGE_REL_BASED_RelativePointer);
+
+ //
+ // Fixup MethodDescChunk and MethodDescs
+@@ -3043,9 +3043,9 @@ void EEClass::Fixup(DataImage *image, MethodTable *pMT)
+ }
+ else if (IsDelegate())
+ {
+- image->FixupPointerField(this, offsetof(DelegateEEClass, m_pInvokeMethod));
+- image->FixupPointerField(this, offsetof(DelegateEEClass, m_pBeginInvokeMethod));
+- image->FixupPointerField(this, offsetof(DelegateEEClass, m_pEndInvokeMethod));
++ image->FixupRelativePointerField(this, offsetof(DelegateEEClass, m_pInvokeMethod));
++ image->FixupRelativePointerField(this, offsetof(DelegateEEClass, m_pBeginInvokeMethod));
++ image->FixupRelativePointerField(this, offsetof(DelegateEEClass, m_pEndInvokeMethod));
+
+ image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pUMThunkMarshInfo));
+ image->ZeroPointerField(this, offsetof(DelegateEEClass, m_pStaticCallStub));
+@@ -3078,7 +3078,7 @@ void EEClass::Fixup(DataImage *image, MethodTable *pMT)
+ //
+
+ if (IsInterface() && GetGuidInfo() != NULL)
+- image->FixupPointerField(this, offsetof(EEClass, m_pGuidInfo));
++ image->FixupRelativePointerField(this, offsetof(EEClass, m_pGuidInfo));
+ else
+ image->ZeroPointerField(this, offsetof(EEClass, m_pGuidInfo));
+
+diff --git a/src/vm/class.h b/src/vm/class.h
+index 6358624..13b2e50 100644
+--- a/src/vm/class.h
++++ b/src/vm/class.h
+@@ -676,7 +676,7 @@ class EEClassOptionalFields
+
+ // Variance info for each type parameter (gpNonVariant, gpCovariant, or gpContravariant)
+ // If NULL, this type has no type parameters that are co/contravariant
+- BYTE* m_pVarianceInfo;
++ RelativePointer<PTR_BYTE> m_pVarianceInfo;
+
+ //
+ // COM RELATED FIELDS.
+@@ -717,6 +717,13 @@ class EEClassOptionalFields
+
+ // Set default values for optional fields.
+ inline void Init();
++
++ PTR_BYTE GetVarianceInfo()
++ {
++ LIMITED_METHOD_DAC_CONTRACT;
++
++ return ReadPointerMaybeNull(this, &EEClassOptionalFields::m_pVarianceInfo);
++ }
+ };
+ typedef DPTR(EEClassOptionalFields) PTR_EEClassOptionalFields;
+
+@@ -1014,12 +1021,12 @@ public:
+ // will return the method table pointer corresponding to the "canonical"
+ // instantiation, as defined in typehandle.h.
+ //
+- inline MethodTable* GetMethodTable()
++ inline PTR_MethodTable GetMethodTable()
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+
+- return m_pMethodTable;
++ return ReadPointerMaybeNull(this, &EEClass::m_pMethodTable);
+ }
+
+ // DO NOT ADD ANY ASSERTS TO THIS METHOD.
+@@ -1036,14 +1043,14 @@ public:
+ CANNOT_HAVE_CONTRACT;
+ SUPPORTS_DAC;
+
+- return m_pMethodTable;
++ return ReadPointerMaybeNull(this, &EEClass::m_pMethodTable);
+ }
+-#ifndef DACCESS_COMPILE
+
++#ifndef DACCESS_COMPILE
+ inline void SetMethodTable(MethodTable* pMT)
+ {
+ LIMITED_METHOD_CONTRACT;
+- m_pMethodTable = pMT;
++ m_pMethodTable.SetValueMaybeNull(pMT);
+ }
+ #endif // !DACCESS_COMPILE
+
+@@ -1710,14 +1717,15 @@ public:
+ inline PTR_GuidInfo GetGuidInfo()
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+- return m_pGuidInfo;
++
++ return ReadPointerMaybeNull(this, &EEClass::m_pGuidInfo);
+ }
+
+ inline void SetGuidInfo(GuidInfo* pGuidInfo)
+ {
+ WRAPPER_NO_CONTRACT;
+ #ifndef DACCESS_COMPILE
+- *EnsureWritablePages(&m_pGuidInfo) = pGuidInfo;
++ EnsureWritablePages(&m_pGuidInfo)->SetValueMaybeNull(pGuidInfo);
+ #endif // DACCESS_COMPILE
+ }
+
+@@ -1879,6 +1887,7 @@ public:
+ GetOptionalFields()->m_pDictLayout = pLayout;
+ }
+
++#ifndef DACCESS_COMPILE
+ static CorGenericParamAttr GetVarianceOfTypeParameter(BYTE * pbVarianceInfo, DWORD i)
+ {
+ LIMITED_METHOD_CONTRACT;
+@@ -1897,15 +1906,16 @@ public:
+ BYTE* GetVarianceInfo()
+ {
+ LIMITED_METHOD_CONTRACT;
+- return HasOptionalFields() ? GetOptionalFields()->m_pVarianceInfo : NULL;
++ return HasOptionalFields() ? GetOptionalFields()->GetVarianceInfo() : NULL;
+ }
+
+ void SetVarianceInfo(BYTE *pVarianceInfo)
+ {
+ LIMITED_METHOD_CONTRACT;
+ _ASSERTE(HasOptionalFields());
+- GetOptionalFields()->m_pVarianceInfo = pVarianceInfo;
++ GetOptionalFields()->m_pVarianceInfo.SetValueMaybeNull(pVarianceInfo);
+ }
++#endif // !DACCESS_COMPILE
+
+ // Check that a signature blob uses type parameters correctly
+ // in accordance with the variance annotations specified by this class
+@@ -2145,7 +2155,7 @@ public:
+ // C_ASSERTs in Jitinterface.cpp need this to be public to check the offset.
+ // Put it first so the offset rarely changes, which just reduces the number of times we have to fiddle
+ // with the offset.
+- PTR_GuidInfo m_pGuidInfo; // The cached guid inforation for interfaces.
++ RelativePointer<PTR_GuidInfo> m_pGuidInfo; // The cached guid information for interfaces.
+
+ #ifdef _DEBUG
+ public:
+@@ -2159,7 +2169,7 @@ private:
+ RelativePointer<PTR_EEClassOptionalFields> m_rpOptionalFields;
+
+ // TODO: Remove this field. It is only used by SOS and object validation for stress.
+- PTR_MethodTable m_pMethodTable;
++ RelativePointer<PTR_MethodTable> m_pMethodTable;
+
+ RelativePointer<PTR_FieldDesc> m_pFieldDescList;
+ RelativePointer<PTR_MethodDescChunk> m_pChunks;
+@@ -2355,15 +2365,15 @@ struct ComPlusCallInfo;
+ class DelegateEEClass : public EEClass
+ {
+ public:
+- PTR_Stub m_pStaticCallStub;
+- PTR_Stub m_pInstRetBuffCallStub;
+- PTR_MethodDesc m_pInvokeMethod;
+- PTR_Stub m_pMultiCastInvokeStub;
+- PTR_Stub m_pSecureDelegateInvokeStub;
+- UMThunkMarshInfo* m_pUMThunkMarshInfo;
+- PTR_MethodDesc m_pBeginInvokeMethod;
+- PTR_MethodDesc m_pEndInvokeMethod;
+- Volatile<PCODE> m_pMarshalStub;
++ PTR_Stub m_pStaticCallStub;
++ PTR_Stub m_pInstRetBuffCallStub;
++ RelativePointer<PTR_MethodDesc> m_pInvokeMethod;
++ PTR_Stub m_pMultiCastInvokeStub;
++ PTR_Stub m_pSecureDelegateInvokeStub;
++ UMThunkMarshInfo* m_pUMThunkMarshInfo;
++ RelativePointer<PTR_MethodDesc> m_pBeginInvokeMethod;
++ RelativePointer<PTR_MethodDesc> m_pEndInvokeMethod;
++ Volatile<PCODE> m_pMarshalStub;
+
+ #ifdef FEATURE_COMINTEROP
+ ComPlusCallInfo *m_pComPlusCallInfo;
+@@ -2376,6 +2386,21 @@ public:
+ MethodDesc* m_pForwardStubMD; // marshaling stub for calls to unmanaged code
+ MethodDesc* m_pReverseStubMD; // marshaling stub for calls from unmanaged code
+
++ PTR_MethodDesc GetInvokeMethod()
++ {
++ return ReadPointer(this, &DelegateEEClass::m_pInvokeMethod);
++ }
++
++ PTR_MethodDesc GetBeginInvokeMethod()
++ {
++ return ReadPointer(this, &DelegateEEClass::m_pBeginInvokeMethod);
++ }
++
++ PTR_MethodDesc GetEndInvokeMethod()
++ {
++ return ReadPointer(this, &DelegateEEClass::m_pEndInvokeMethod);
++ }
++
+ #ifndef DACCESS_COMPILE
+ DelegateEEClass() : EEClass(sizeof(DelegateEEClass))
+ {
+diff --git a/src/vm/class.inl b/src/vm/class.inl
+index 1a7e169..755463d 100644
+--- a/src/vm/class.inl
++++ b/src/vm/class.inl
+@@ -39,7 +39,7 @@ inline void EEClassOptionalFields::Init()
+ {
+ LIMITED_METHOD_CONTRACT;
+ m_pDictLayout = NULL;
+- m_pVarianceInfo = NULL;
++ m_pVarianceInfo.SetValueMaybeNull(NULL);
+ #ifdef FEATURE_COMINTEROP
+ m_pSparseVTableMap = NULL;
+ m_pCoClassForIntf = TypeHandle();
+diff --git a/src/vm/classhash.cpp b/src/vm/classhash.cpp
+index 2ffc612..31c7e84 100644
+--- a/src/vm/classhash.cpp
++++ b/src/vm/classhash.cpp
+@@ -145,7 +145,7 @@ VOID EEClassHashTable::UncompressModuleAndNonExportClassDef(HashDatum Data, Modu
+ _ASSERTE(!(dwData & EECLASSHASH_MDEXPORT_DISCR));
+
+ *pCL = ((dwData >> 1) & 0x00ffffff) | mdtTypeDef;
+- *ppModule = m_pModule;
++ *ppModule = GetModule();
+ }
+
+ bool EEClassHashTable::UncompressModuleAndClassDef(HashDatum Data, Loader::LoadFlag loadFlag,
+@@ -172,8 +172,7 @@ bool EEClassHashTable::UncompressModuleAndClassDef(HashDatum Data, Loader::LoadF
+ if(dwData & EECLASSHASH_MDEXPORT_DISCR) {
+ *pmdFoundExportedType = ((dwData >> 1) & 0x00ffffff) | mdtExportedType;
+
+- *ppModule = m_pModule->GetAssembly()->
+- FindModuleByExportedType(*pmdFoundExportedType, loadFlag, mdTypeDefNil, pCL);
++ *ppModule = GetModule()->GetAssembly()->FindModuleByExportedType(*pmdFoundExportedType, loadFlag, mdTypeDefNil, pCL);
+ }
+ else {
+ UncompressModuleAndNonExportClassDef(Data, ppModule, pCL);
+@@ -232,7 +231,7 @@ VOID EEClassHashTable::ConstructKeyFromData(PTR_EEClassHashEntry pEntry, // IN
+ // in this case, the lifetime of Key is bounded by the lifetime of cqb, which will free the memory
+ // it allocated on destruction.
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ LPSTR pszName = NULL;
+ LPSTR pszNameSpace = NULL;
+ IMDInternalImport *pInternalImport = NULL;
+@@ -259,7 +258,7 @@ VOID EEClassHashTable::ConstructKeyFromData(PTR_EEClassHashEntry pEntry, // IN
+ mdToken mdtUncompressed = UncompressModuleAndClassDef(Data);
+ if (TypeFromToken(mdtUncompressed) == mdtExportedType)
+ {
+- IfFailThrow(m_pModule->GetClassLoader()->GetAssembly()->GetManifestImport()->GetExportedTypeProps(
++ IfFailThrow(GetModule()->GetClassLoader()->GetAssembly()->GetManifestImport()->GetExportedTypeProps(
+ mdtUncompressed,
+ (LPCSTR *)&pszNameSpace,
+ (LPCSTR *)&pszName,
+@@ -355,7 +354,7 @@ EEClassHashEntry_t *EEClassHashTable::InsertValue(LPCUTF8 pszNamespace, LPCUTF8
+
+ _ASSERTE(pszNamespace != NULL);
+ _ASSERTE(pszClassName != NULL);
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+
+ EEClassHashEntry *pEntry = BaseAllocateEntry(pamTracker);
+
+@@ -433,10 +432,9 @@ EEClassHashEntry_t *EEClassHashTable::InsertValueIfNotFound(LPCUTF8 pszNamespace
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ _ASSERTE(pszNamespace != NULL);
+ _ASSERTE(pszClassName != NULL);
+- _ASSERTE(m_pModule);
+
+ EEClassHashEntry_t * pNewEntry = FindItem(pszNamespace, pszClassName, IsNested, NULL);
+
+@@ -479,7 +477,7 @@ EEClassHashEntry_t *EEClassHashTable::FindItem(LPCUTF8 pszNamespace, LPCUTF8 psz
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ _ASSERTE(pszNamespace != NULL);
+ _ASSERTE(pszClassName != NULL);
+
+@@ -533,7 +531,7 @@ EEClassHashEntry_t *EEClassHashTable::FindNextNestedClass(NameHandle* pName, PTR
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ _ASSERTE(pName);
+
+ if (pName->GetNameSpace())
+@@ -564,7 +562,7 @@ EEClassHashEntry_t *EEClassHashTable::FindNextNestedClass(LPCUTF8 pszNamespace,
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+
+ PTR_EEClassHashEntry pSearch = BaseFindNextEntryByHash(pContext);
+
+@@ -597,7 +595,7 @@ EEClassHashEntry_t *EEClassHashTable::FindNextNestedClass(LPCUTF8 pszFullyQualif
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+
+ CQuickBytes szNamespace;
+
+@@ -639,7 +637,7 @@ EEClassHashEntry_t * EEClassHashTable::GetValue(LPCUTF8 pszFullyQualifiedName, P
+ }
+ CONTRACTL_END;
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+
+ CQuickBytes szNamespace;
+
+@@ -685,7 +683,7 @@ EEClassHashEntry_t * EEClassHashTable::GetValue(LPCUTF8 pszNamespace, LPCUTF8 ps
+ CONTRACTL_END;
+
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ EEClassHashEntry_t *pItem = FindItem(pszNamespace, pszClassName, IsNested, pContext);
+ if (pItem)
+ *pData = pItem->GetData();
+@@ -709,7 +707,7 @@ EEClassHashEntry_t * EEClassHashTable::GetValue(NameHandle* pName, PTR_VOID *pDa
+
+
+ _ASSERTE(pName);
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ if(pName->GetNameSpace() == NULL) {
+ return GetValue(pName->GetName(), pData, IsNested, pContext);
+ }
+@@ -753,7 +751,7 @@ BOOL EEClassHashTable::CompareKeys(PTR_EEClassHashEntry pEntry, LPCUTF8 * pKey2)
+ CONTRACTL_END;
+
+
+- _ASSERTE(m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
+ _ASSERTE (pEntry);
+ _ASSERTE (pKey2);
+
+@@ -778,7 +776,7 @@ void EEClassHashTable::Save(DataImage *image, CorProfileData *profileData)
+ STANDARD_VM_CONTRACT;
+
+ // See comment on PrepareExportedTypesForSaving for what's going on here.
+- if (m_pModule->IsManifest())
++ if (GetModule()->IsManifest())
+ PrepareExportedTypesForSaving(image);
+
+ // The base class handles most of the saving logic (it controls the layout of the hash memory). It will
+@@ -876,11 +874,11 @@ void EEClassHashTable::PrepareExportedTypesForSaving(DataImage *image)
+ THROWS;
+ GC_TRIGGERS;
+ PRECONDITION(GetAppDomain()->IsCompilationDomain());
+- PRECONDITION(m_pModule->IsManifest());
++ PRECONDITION(GetModule()->IsManifest());
+ }
+ CONTRACTL_END
+
+- IMDInternalImport *pImport = m_pModule->GetMDImport();
++ IMDInternalImport *pImport = GetModule()->GetMDImport();
+
+ HENUMInternalHolder phEnum(pImport);
+ phEnum.EnumInit(mdtExportedType, mdTokenNil);
+@@ -900,7 +898,7 @@ void EEClassHashTable::PrepareExportedTypesForSaving(DataImage *image)
+ &typeDef,
+ &dwFlags)))
+ {
+- THROW_BAD_FORMAT(BFA_NOFIND_EXPORTED_TYPE, m_pModule);
++ THROW_BAD_FORMAT(BFA_NOFIND_EXPORTED_TYPE, GetModule());
+ continue;
+ }
+
+@@ -936,16 +934,19 @@ void EEClassHashTable::PrepareExportedTypesForSaving(DataImage *image)
+ // "CompareNestedEntryWithExportedType" will check if "pEntry->pEncloser" is a type of "mdImpl",
+ // as well as walking up the enclosing chain.
+ _ASSERTE (TypeFromToken(mdImpl) == mdtExportedType);
+- while ((!m_pModule->GetClassLoader()->CompareNestedEntryWithExportedType(pImport,
+- mdImpl,
+- this,
+- pEntry->GetEncloser())) &&
+- (pEntry = FindNextNestedClass(pszNameSpace, pszName, &data, &sContext)) != NULL);
++ while ((!GetModule()->GetClassLoader()->CompareNestedEntryWithExportedType(pImport,
++ mdImpl,
++ this,
++ pEntry->GetEncloser()))
++ && (pEntry = FindNextNestedClass(pszNameSpace, pszName, &data, &sContext)) != NULL)
++ {
++ ;
++ }
+ }
+ }
+
+ if (!pEntry) {
+- THROW_BAD_FORMAT(BFA_NOFIND_EXPORTED_TYPE, m_pModule);
++ THROW_BAD_FORMAT(BFA_NOFIND_EXPORTED_TYPE, GetModule());
+ continue;
+ }
+
+@@ -1057,8 +1058,8 @@ EEClassHashTable *EEClassHashTable::MakeCaseInsensitiveTable(Module *pModule, Al
+
+
+
+- _ASSERTE(m_pModule);
+- _ASSERTE (pModule == m_pModule);
++ _ASSERTE(!m_pModule.IsNull());
++ _ASSERTE(pModule == GetModule());
+
+ // Allocate the table and verify that we actually got one.
+ EEClassHashTable * pCaseInsTable = EEClassHashTable::Create(pModule,
+diff --git a/src/vm/comcallablewrapper.cpp b/src/vm/comcallablewrapper.cpp
+index 540c708..719d5ba 100644
+--- a/src/vm/comcallablewrapper.cpp
++++ b/src/vm/comcallablewrapper.cpp
+@@ -5044,7 +5044,7 @@ void ComMethodTable::LayOutDelegateMethodTable()
+
+ // Some space for a CALL xx xx xx xx stub is reserved before the beginning of the MethodDesc
+ ComCallMethodDescHolder NewMDHolder = (ComCallMethodDesc *) (pMethodDescMemory + COMMETHOD_PREPAD);
+- MethodDesc* pInvokeMD = ((DelegateEEClass *)(pDelegateMT->GetClass()))->m_pInvokeMethod;
++ MethodDesc* pInvokeMD = ((DelegateEEClass *)(pDelegateMT->GetClass()))->GetInvokeMethod();
+
+ if (pInvokeMD->IsSharedByGenericInstantiations())
+ {
+diff --git a/src/vm/comdelegate.cpp b/src/vm/comdelegate.cpp
+index 2682c2d..e920a4a 100644
+--- a/src/vm/comdelegate.cpp
++++ b/src/vm/comdelegate.cpp
+@@ -581,7 +581,7 @@ ComPlusCallInfo * COMDelegate::PopulateComPlusCallInfo(MethodTable * pDelMT)
+ // We need a LoaderHeap that lives at least as long as the DelegateEEClass, but ideally no longer
+ LoaderHeap *DelegateEEClass::GetStubHeap()
+ {
+- return m_pInvokeMethod->GetLoaderAllocator()->GetStubHeap();
++ return GetInvokeMethod()->GetLoaderAllocator()->GetStubHeap();
+ }
+
+
+@@ -600,7 +600,7 @@ Stub* COMDelegate::SetupShuffleThunk(MethodTable * pDelMT, MethodDesc *pTargetMe
+
+ DelegateEEClass * pClass = (DelegateEEClass *)pDelMT->GetClass();
+
+- MethodDesc *pMD = pClass->m_pInvokeMethod;
++ MethodDesc *pMD = pClass->GetInvokeMethod();
+
+ StackSArray<ShuffleEntry> rShuffleEntryArray;
+ GenerateShuffleArray(pMD, pTargetMeth, &rShuffleEntryArray);
+@@ -2385,7 +2385,7 @@ PCODE COMDelegate::GetInvokeMethodStub(EEImplMethodDesc* pMD)
+ MethodTable * pDelMT = pMD->GetMethodTable();
+ DelegateEEClass* pClass = (DelegateEEClass*) pDelMT->GetClass();
+
+- if (pMD == pClass->m_pInvokeMethod)
++ if (pMD == pClass->GetInvokeMethod())
+ {
+ // Validate the invoke method, which at the moment just means checking the calling convention
+
+@@ -2401,7 +2401,7 @@ PCODE COMDelegate::GetInvokeMethodStub(EEImplMethodDesc* pMD)
+ // and not an invalid-delegate-layout condition.
+ //
+ // If the call was indeed for async delegate invocation, we will just throw an exception.
+- if ((pMD == pClass->m_pBeginInvokeMethod) || (pMD == pClass->m_pEndInvokeMethod))
++ if ((pMD == pClass->GetBeginInvokeMethod()) || (pMD == pClass->GetEndInvokeMethod()))
+ {
+ COMPlusThrow(kPlatformNotSupportedException);
+ }
+@@ -2525,7 +2525,7 @@ DELEGATEREF COMDelegate::CreateSecureDelegate(DELEGATEREF delegate, MethodDesc*
+ CONTRACTL_END;
+
+ MethodTable *pDelegateType = delegate->GetMethodTable();
+- MethodDesc *pMD = ((DelegateEEClass*)(pDelegateType->GetClass()))->m_pInvokeMethod;
++ MethodDesc *pMD = ((DelegateEEClass*)(pDelegateType->GetClass()))->GetInvokeMethod();
+ // allocate the object
+ struct _gc {
+ DELEGATEREF refSecDel;
+@@ -2625,7 +2625,7 @@ FCIMPL1(MethodDesc*, COMDelegate::GetInvokeMethod, Object* refThisIn)
+ OBJECTREF refThis = ObjectToOBJECTREF(refThisIn);
+ MethodTable * pDelMT = refThis->GetMethodTable();
+
+- MethodDesc* pMD = ((DelegateEEClass*)(pDelMT->GetClass()))->m_pInvokeMethod;
++ MethodDesc* pMD = ((DelegateEEClass*)(pDelMT->GetClass()))->GetInvokeMethod();
+ _ASSERTE(pMD);
+ return pMD;
+ }
+@@ -2643,7 +2643,7 @@ FCIMPL1(PCODE, COMDelegate::GetMulticastInvoke, Object* refThisIn)
+ Stub *pStub = delegateEEClass->m_pMultiCastInvokeStub;
+ if (pStub == NULL)
+ {
+- MethodDesc* pMD = delegateEEClass->m_pInvokeMethod;
++ MethodDesc* pMD = delegateEEClass->GetInvokeMethod();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+@@ -2767,7 +2767,7 @@ FCIMPL1(PCODE, COMDelegate::GetMulticastInvoke, Object* refThisIn)
+ Stub *pStub = delegateEEClass->m_pMultiCastInvokeStub;
+ if (pStub == NULL)
+ {
+- MethodDesc* pMD = delegateEEClass->m_pInvokeMethod;
++ MethodDesc* pMD = delegateEEClass->GetInvokeMethod();
+
+ HELPER_METHOD_FRAME_BEGIN_RET_0();
+
+@@ -3101,7 +3101,7 @@ MethodDesc* COMDelegate::FindDelegateInvokeMethod(MethodTable *pMT)
+
+ _ASSERTE(pMT->IsDelegate());
+
+- MethodDesc * pMD = ((DelegateEEClass*)pMT->GetClass())->m_pInvokeMethod;
++ MethodDesc * pMD = ((DelegateEEClass*)pMT->GetClass())->GetInvokeMethod();
+ if (pMD == NULL)
+ COMPlusThrowNonLocalized(kMissingMethodException, W("Invoke"));
+ return pMD;
+@@ -3114,7 +3114,7 @@ BOOL COMDelegate::IsDelegateInvokeMethod(MethodDesc *pMD)
+ MethodTable *pMT = pMD->GetMethodTable();
+ _ASSERTE(pMT->IsDelegate());
+
+- return (pMD == ((DelegateEEClass *)pMT->GetClass())->m_pInvokeMethod);
++ return (pMD == ((DelegateEEClass *)pMT->GetClass())->GetInvokeMethod());
+ }
+
+ BOOL COMDelegate::IsMethodDescCompatible(TypeHandle thFirstArg,
+@@ -3667,7 +3667,7 @@ BOOL COMDelegate::ValidateCtor(TypeHandle instHnd,
+
+ DelegateEEClass *pdlgEEClass = (DelegateEEClass*)dlgtHnd.AsMethodTable()->GetClass();
+ PREFIX_ASSUME(pdlgEEClass != NULL);
+- MethodDesc *pDlgtInvoke = pdlgEEClass->m_pInvokeMethod;
++ MethodDesc *pDlgtInvoke = pdlgEEClass->GetInvokeMethod();
+ if (pDlgtInvoke == NULL)
+ return FALSE;
+ return IsMethodDescCompatible(instHnd, ftnParentHnd, pFtn, dlgtHnd, pDlgtInvoke, DBF_RelaxedSignature, pfIsOpenDelegate);
+@@ -3716,18 +3716,18 @@ BOOL COMDelegate::ValidateBeginInvoke(DelegateEEClass* pClass)
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pClass));
+- PRECONDITION(CheckPointer(pClass->m_pBeginInvokeMethod));
++ PRECONDITION(CheckPointer(pClass->GetBeginInvokeMethod()));
+
+ // insert fault. Can the binder throw an OOM?
+ }
+ CONTRACTL_END;
+
+- if (pClass->m_pInvokeMethod == NULL)
++ if (pClass->GetInvokeMethod() == NULL)
+ return FALSE;
+
+ // We check the signatures under the typical instantiation of the possibly generic class
+- MetaSig beginInvokeSig(pClass->m_pBeginInvokeMethod->LoadTypicalMethodDefinition());
+- MetaSig invokeSig(pClass->m_pInvokeMethod->LoadTypicalMethodDefinition());
++ MetaSig beginInvokeSig(pClass->GetBeginInvokeMethod()->LoadTypicalMethodDefinition());
++ MetaSig invokeSig(pClass->GetInvokeMethod()->LoadTypicalMethodDefinition());
+
+ if (beginInvokeSig.GetCallingConventionInfo() != (IMAGE_CEE_CS_CALLCONV_HASTHIS | IMAGE_CEE_CS_CALLCONV_DEFAULT))
+ return FALSE;
+@@ -3768,18 +3768,18 @@ BOOL COMDelegate::ValidateEndInvoke(DelegateEEClass* pClass)
+ MODE_ANY;
+
+ PRECONDITION(CheckPointer(pClass));
+- PRECONDITION(CheckPointer(pClass->m_pEndInvokeMethod));
++ PRECONDITION(CheckPointer(pClass->GetEndInvokeMethod()));
+
+ // insert fault. Can the binder throw an OOM?
+ }
+ CONTRACTL_END;
+
+- if (pClass->m_pInvokeMethod == NULL)
++ if (pClass->GetInvokeMethod() == NULL)
+ return FALSE;
+
+ // We check the signatures under the typical instantiation of the possibly generic class
+- MetaSig endInvokeSig(pClass->m_pEndInvokeMethod->LoadTypicalMethodDefinition());
+- MetaSig invokeSig(pClass->m_pInvokeMethod->LoadTypicalMethodDefinition());
++ MetaSig endInvokeSig(pClass->GetEndInvokeMethod()->LoadTypicalMethodDefinition());
++ MetaSig invokeSig(pClass->GetInvokeMethod()->LoadTypicalMethodDefinition());
+
+ if (endInvokeSig.GetCallingConventionInfo() != (IMAGE_CEE_CS_CALLCONV_HASTHIS | IMAGE_CEE_CS_CALLCONV_DEFAULT))
+ return FALSE;
+diff --git a/src/vm/comsynchronizable.cpp b/src/vm/comsynchronizable.cpp
+index 08b5281..01ba496 100644
+--- a/src/vm/comsynchronizable.cpp
++++ b/src/vm/comsynchronizable.cpp
+@@ -235,7 +235,7 @@ void ThreadNative::KickOffThread_Worker(LPVOID ptr)
+ delete args->share;
+ args->share = 0;
+
+- MethodDesc *pMeth = ((DelegateEEClass*)( gc.orDelegate->GetMethodTable()->GetClass() ))->m_pInvokeMethod;
++ MethodDesc *pMeth = ((DelegateEEClass*)( gc.orDelegate->GetMethodTable()->GetClass() ))->GetInvokeMethod();
+ _ASSERTE(pMeth);
+ MethodDescCallSite invokeMethod(pMeth, &gc.orDelegate);
+
+diff --git a/src/vm/instmethhash.cpp b/src/vm/instmethhash.cpp
+index 250a6d5..560e955 100644
+--- a/src/vm/instmethhash.cpp
++++ b/src/vm/instmethhash.cpp
+@@ -86,8 +86,8 @@ PTR_LoaderAllocator InstMethodHashTable::GetLoaderAllocator()
+ }
+ else
+ {
+- _ASSERTE(m_pModule != NULL);
+- return m_pModule->GetLoaderAllocator();
++ _ASSERTE(!m_pModule.IsNull());
++ return GetModule()->GetLoaderAllocator();
+ }
+ }
+
+@@ -188,7 +188,7 @@ MethodDesc* InstMethodHashTable::FindMethodDesc(TypeHandle declaringType,
+ RelativeFixupPointer<PTR_MethodTable> * ppMT = pMD->GetMethodTablePtr();
+ TADDR pMT = ppMT->GetValueMaybeTagged((TADDR)ppMT);
+
+- if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, pMT, declaringType))
++ if (!ZapSig::CompareTaggedPointerToTypeHandle(GetModule(), pMT, declaringType))
+ {
+ continue; // Next iteration of the for loop
+ }
+@@ -208,7 +208,7 @@ MethodDesc* InstMethodHashTable::FindMethodDesc(TypeHandle declaringType,
+ // asserts on encoded fixups.
+ TADDR candidateArg = ((FixupPointer<TADDR> *)candidateInst.GetRawArgs())[i].GetValue();
+
+- if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, candidateArg, inst[i]))
++ if (!ZapSig::CompareTaggedPointerToTypeHandle(GetModule(), candidateArg, inst[i]))
+ {
+ match = false;
+ break;
+diff --git a/src/vm/jitinterface.cpp b/src/vm/jitinterface.cpp
+index 08965a7..84f635a 100644
+--- a/src/vm/jitinterface.cpp
++++ b/src/vm/jitinterface.cpp
+@@ -5286,7 +5286,7 @@ void CEEInfo::getCallInfo(
+ // 2) Delegate.Invoke() - since a Delegate is a sealed class as per ECMA spec
+ // 3) JIT intrinsics - since they have pre-defined behavior
+ devirt = pTargetMD->GetMethodTable()->IsValueType() ||
+- (pTargetMD->GetMethodTable()->IsDelegate() && ((DelegateEEClass*)(pTargetMD->GetMethodTable()->GetClass()))->m_pInvokeMethod == pMD) ||
++ (pTargetMD->GetMethodTable()->IsDelegate() && ((DelegateEEClass*)(pTargetMD->GetMethodTable()->GetClass()))->GetInvokeMethod() == pMD) ||
+ (pTargetMD->IsFCall() && ECall::GetIntrinsicID(pTargetMD) != CORINFO_INTRINSIC_Illegal);
+
+ callVirtCrossingVersionBubble = true;
+@@ -6705,7 +6705,7 @@ DWORD CEEInfo::getMethodAttribsInternal (CORINFO_METHOD_HANDLE ftn)
+ result |= CORINFO_FLG_FORCEINLINE;
+ }
+
+- if (pMT->IsDelegate() && ((DelegateEEClass*)(pMT->GetClass()))->m_pInvokeMethod == pMD)
++ if (pMT->IsDelegate() && ((DelegateEEClass*)(pMT->GetClass()))->GetInvokeMethod() == pMD)
+ {
+ // This is now used to emit efficient invoke code for any delegate invoke,
+ // including multicast.
+diff --git a/src/vm/methodtablebuilder.cpp b/src/vm/methodtablebuilder.cpp
+index fdf4f48..05ab438 100644
+--- a/src/vm/methodtablebuilder.cpp
++++ b/src/vm/methodtablebuilder.cpp
+@@ -6156,18 +6156,18 @@ MethodTableBuilder::InitMethodDesc(
+
+ if (strcmp(pMethodName, "Invoke") == 0)
+ {
+- BAD_FORMAT_NOTHROW_ASSERT(NULL == ((DelegateEEClass*)GetHalfBakedClass())->m_pInvokeMethod);
+- ((DelegateEEClass*)GetHalfBakedClass())->m_pInvokeMethod = pNewMD;
++ BAD_FORMAT_NOTHROW_ASSERT(((DelegateEEClass*)GetHalfBakedClass())->m_pInvokeMethod.IsNull());
++ ((DelegateEEClass*)GetHalfBakedClass())->m_pInvokeMethod.SetValue(pNewMD);
+ }
+ else if (strcmp(pMethodName, "BeginInvoke") == 0)
+ {
+- BAD_FORMAT_NOTHROW_ASSERT(NULL == ((DelegateEEClass*)GetHalfBakedClass())->m_pBeginInvokeMethod);
+- ((DelegateEEClass*)GetHalfBakedClass())->m_pBeginInvokeMethod = pNewMD;
++ BAD_FORMAT_NOTHROW_ASSERT(((DelegateEEClass*)GetHalfBakedClass())->m_pBeginInvokeMethod.IsNull());
++ ((DelegateEEClass*)GetHalfBakedClass())->m_pBeginInvokeMethod.SetValue(pNewMD);
+ }
+ else if (strcmp(pMethodName, "EndInvoke") == 0)
+ {
+- BAD_FORMAT_NOTHROW_ASSERT(NULL == ((DelegateEEClass*)GetHalfBakedClass())->m_pEndInvokeMethod);
+- ((DelegateEEClass*)GetHalfBakedClass())->m_pEndInvokeMethod = pNewMD;
++ BAD_FORMAT_NOTHROW_ASSERT(((DelegateEEClass*)GetHalfBakedClass())->m_pEndInvokeMethod.IsNull());
++ ((DelegateEEClass*)GetHalfBakedClass())->m_pEndInvokeMethod.SetValue(pNewMD);
+ }
+ else
+ {
+@@ -10332,7 +10332,7 @@ MethodTableBuilder::SetupMethodTable2(
+ GetMemTracker());
+
+ pMT->SetClass(pClass);
+- pClass->m_pMethodTable = pMT;
++ pClass->m_pMethodTable.SetValue(pMT);
+ m_pHalfBakedMT = pMT;
+
+ #ifdef _DEBUG
+diff --git a/src/vm/ngenhash.h b/src/vm/ngenhash.h
+index 667a55e..c59eb8e 100644
+--- a/src/vm/ngenhash.h
++++ b/src/vm/ngenhash.h
+@@ -203,7 +203,7 @@ protected:
+ private:
+ friend class NgenHashTable<NGEN_HASH_ARGS>;
+
+- NgenHashTable<NGEN_HASH_ARGS> *m_pTable; // Pointer back to the table being enumerated.
++ DPTR(NgenHashTable<NGEN_HASH_ARGS>) m_pTable; // Pointer back to the table being enumerated.
+ TADDR m_pEntry; // The entry the caller is currently looking at (or
+ // NULL to begin with). This is a VolatileEntry* or
+ // PersistedEntry* (depending on m_eType below) and
+@@ -303,8 +303,13 @@ protected:
+ void BaseEnumMemoryRegions(CLRDataEnumMemoryFlags flags);
+ #endif // DACCESS_COMPILE
+
++ PTR_Module GetModule()
++ {
++ return ReadPointerMaybeNull(this, &NgenHashTable<NGEN_HASH_ARGS>::m_pModule);
++ }
++
+ // Owning module set at hash creation time (possibly NULL if this hash instance is not to be ngen'd).
+- PTR_Module m_pModule;
++ RelativePointer<PTR_Module> m_pModule;
+
+ private:
+ // Internal implementation details. Nothing of interest to sub-classers for here on.
+@@ -385,13 +390,13 @@ private:
+ // because this logic is replicated for Hot and Cold entries so we can factor some common code.
+ struct PersistedEntries
+ {
+- APTR_PersistedEntry m_pEntries; // Pointer to a contiguous block of PersistedEntry structures
+- // (NULL if zero entries)
+- PTR_PersistedBucketList m_pBuckets; // Pointer to abstracted bucket list mapping above entries
+- // into a hash (NULL if zero buckets, which is iff zero
+- // entries)
+- DWORD m_cEntries; // Count of entries in the above block
+- DWORD m_cBuckets; // Count of buckets in the above bucket list
++ RelativePointer<APTR_PersistedEntry> m_pEntries; // Pointer to a contiguous block of PersistedEntry structures
++ // (NULL if zero entries)
++ RelativePointer<PTR_PersistedBucketList> m_pBuckets; // Pointer to abstracted bucket list mapping above entries
++ // into a hash (NULL if zero buckets, which is iff zero
++ // entries)
++ DWORD m_cEntries; // Count of entries in the above block
++ DWORD m_cBuckets; // Count of buckets in the above bucket list
+ };
+ #endif // FEATURE_PREJIT
+
+@@ -439,13 +444,98 @@ private:
+ DWORD NextLargestPrime(DWORD dwNumber);
+ #endif // !DACCESS_COMPILE
+
++ DPTR(PTR_VolatileEntry) GetWarmBuckets()
++ {
++ SUPPORTS_DAC;
++
++ return ReadPointer(this, &NgenHashTable<NGEN_HASH_ARGS>::m_pWarmBuckets);
++ }
++
++#ifdef FEATURE_PREJIT
++ APTR_PersistedEntry GetPersistedHotEntries()
++ {
++ SUPPORTS_DAC;
++
++ return ReadPointerMaybeNull(this,
++ &NgenHashTable<NGEN_HASH_ARGS>::m_sHotEntries,
++ &decltype(NgenHashTable<NGEN_HASH_ARGS>::m_sHotEntries)::m_pEntries);
++ }
++
++ PTR_PersistedBucketList GetPersistedHotBuckets()
++ {
++ SUPPORTS_DAC;
++
++ return ReadPointerMaybeNull(this,
++ &NgenHashTable<NGEN_HASH_ARGS>::m_sHotEntries,
++ &decltype(NgenHashTable<NGEN_HASH_ARGS>::m_sHotEntries)::m_pBuckets);
++ }
++
++ APTR_PersistedEntry GetPersistedColdEntries()
++ {
++ SUPPORTS_DAC;
++
++ return ReadPointerMaybeNull(this,
++ &NgenHashTable<NGEN_HASH_ARGS>::m_sColdEntries,
++ &decltype(NgenHashTable<NGEN_HASH_ARGS>::m_sColdEntries)::m_pEntries);
++ }
++
++ PTR_PersistedBucketList GetPersistedColdBuckets()
++ {
++ SUPPORTS_DAC;
++
++ return ReadPointerMaybeNull(this,
++ &NgenHashTable<NGEN_HASH_ARGS>::m_sColdEntries,
++ &decltype(NgenHashTable<NGEN_HASH_ARGS>::m_sColdEntries)::m_pBuckets);
++ }
++
++#ifdef DACCESS_COMPILE
++ APTR_PersistedEntry GetPersistedEntries(DPTR(PersistedEntries) pEntries)
++ {
++ SUPPORTS_DAC;
++
++ TADDR hotEntriesAddr = dac_cast<TADDR>(this) + offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries);
++ TADDR coldEntriesAddr = dac_cast<TADDR>(this) + offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries);
++
++ if (hotEntriesAddr == dac_cast<TADDR>(pEntries))
++ {
++ return GetPersistedHotEntries();
++ }
++ else
++ {
++ _ASSERTE(hotEntriesAddr == dac_cast<TADDR>(pEntries));
++
++ return GetPersistedColdEntries();
++ }
++ }
++
++ PTR_PersistedBucketList GetPersistedBuckets(DPTR(PersistedEntries) pEntries)
++ {
++ SUPPORTS_DAC;
++
++ TADDR hotEntriesAddr = dac_cast<TADDR>(this) + offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries);
++ TADDR coldEntriesAddr = dac_cast<TADDR>(this) + offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries);
++
++ if (hotEntriesAddr == dac_cast<TADDR>(pEntries))
++ {
++ return GetPersistedHotBuckets();
++ }
++ else
++ {
++ _ASSERTE(hotEntriesAddr == dac_cast<TADDR>(pEntries));
++
++ return GetPersistedColdBuckets();
++ }
++ }
++#endif // DACCESS_COMPILE
++#endif // FEATURE_PREJIT
++
+ // Loader heap provided at construction time. May be NULL (in which case m_pModule must *not* be NULL).
+ LoaderHeap *m_pHeap;
+
+ // Fields related to the runtime (volatile or warm) part of the hash.
+- DPTR(PTR_VolatileEntry) m_pWarmBuckets; // Pointer to a simple bucket list (array of VolatileEntry pointers)
+- DWORD m_cWarmBuckets; // Count of buckets in the above array (always non-zero)
+- DWORD m_cWarmEntries; // Count of elements in the warm section of the hash
++ RelativePointer<DPTR(PTR_VolatileEntry)> m_pWarmBuckets; // Pointer to a simple bucket list (array of VolatileEntry pointers)
++ DWORD m_cWarmBuckets; // Count of buckets in the above array (always non-zero)
++ DWORD m_cWarmEntries; // Count of elements in the warm section of the hash
+
+ #ifdef FEATURE_PREJIT
+ PersistedEntries m_sHotEntries; // Hot persisted hash entries (if any)
+diff --git a/src/vm/ngenhash.inl b/src/vm/ngenhash.inl
+index 070b1da..6e55345 100644
+--- a/src/vm/ngenhash.inl
++++ b/src/vm/ngenhash.inl
+@@ -48,14 +48,14 @@ NgenHashTable<NGEN_HASH_ARGS>::NgenHashTable(Module *pModule, LoaderHeap *pHeap,
+ // At least one of module or heap must have been specified or we won't know how to allocate entries and
+ // buckets.
+ _ASSERTE(pModule || pHeap);
+- m_pModule = pModule;
++ m_pModule.SetValueMaybeNull(pModule);
+ m_pHeap = pHeap;
+
+ S_SIZE_T cbBuckets = S_SIZE_T(sizeof(VolatileEntry*)) * S_SIZE_T(cInitialBuckets);
+
+ m_cWarmEntries = 0;
+ m_cWarmBuckets = cInitialBuckets;
+- m_pWarmBuckets = (PTR_VolatileEntry*)(void*)GetHeap()->AllocMem(cbBuckets);
++ m_pWarmBuckets.SetValue((PTR_VolatileEntry*)(void*)GetHeap()->AllocMem(cbBuckets));
+
+ // Note: Memory allocated on loader heap is zero filled
+ // memset(m_pWarmBuckets, 0, sizeof(VolatileEntry*) * cInitialBuckets);
+@@ -83,7 +83,7 @@ VALUE *NgenHashTable<NGEN_HASH_ARGS>::BaseAllocateEntry(AllocMemTracker *pamTrac
+
+ // Faults are forbidden in BaseInsertEntry. Make the table writeable now that the faults are still allowed.
+ EnsureWritablePages(this);
+- EnsureWritablePages(this->m_pWarmBuckets, m_cWarmBuckets * sizeof(PTR_VolatileEntry));
++ EnsureWritablePages(this->GetWarmBuckets(), m_cWarmBuckets * sizeof(PTR_VolatileEntry));
+
+ TaggedMemAllocPtr pMemory = GetHeap()->AllocMem(S_SIZE_T(sizeof(VolatileEntry)));
+
+@@ -119,8 +119,8 @@ LoaderHeap *NgenHashTable<NGEN_HASH_ARGS>::GetHeap()
+
+ // If not specified then we fall back to the owning module's heap (a module must have been specified in
+ // this case).
+- _ASSERTE(m_pModule != NULL);
+- return m_pModule->GetAssembly()->GetLowFrequencyHeap();
++ _ASSERTE(!m_pModule.IsNull());
++ return GetModule()->GetAssembly()->GetLowFrequencyHeap();
+ }
+
+ // Insert an entry previously allocated via BaseAllocateEntry (you cannot allocated entries in any other
+@@ -154,13 +154,13 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseInsertEntry(NgenHashValue iHash, VALUE *
+ DWORD dwBucket = iHash % m_cWarmBuckets;
+
+ // Prepare to link the new entry at the head of the bucket chain.
+- pVolatileEntry->m_pNextEntry = m_pWarmBuckets[dwBucket];
++ pVolatileEntry->m_pNextEntry = (GetWarmBuckets())[dwBucket];
+
+ // Make sure that all writes to the entry are visible before publishing the entry.
+ MemoryBarrier();
+
+ // Publish the entry by pointing the bucket at it.
+- m_pWarmBuckets[dwBucket] = pVolatileEntry;
++ (GetWarmBuckets())[dwBucket] = pVolatileEntry;
+
+ m_cWarmEntries++;
+
+@@ -205,7 +205,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::GrowTable()
+ // again.
+ for (DWORD i = 0; i < m_cWarmBuckets; i++)
+ {
+- PTR_VolatileEntry pEntry = m_pWarmBuckets[i];
++ PTR_VolatileEntry pEntry = (GetWarmBuckets())[i];
+
+ // Try to lock out readers from scanning this bucket. This is obviously a race which may fail.
+ // However, note that it's OK if somebody is already in the list - it's OK if we mess with the bucket
+@@ -213,7 +213,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::GrowTable()
+ // comparison even if it wanders aimlessly amongst entries while we are rearranging things. If a
+ // lookup finds a match under those circumstances, great. If not, they will have to acquire the lock &
+ // try again anyway.
+- m_pWarmBuckets[i] = NULL;
++ (GetWarmBuckets())[i] = NULL;
+
+ while (pEntry != NULL)
+ {
+@@ -229,7 +229,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::GrowTable()
+
+ // Make sure that all writes are visible before publishing the new array.
+ MemoryBarrier();
+- m_pWarmBuckets = pNewBuckets;
++ m_pWarmBuckets.SetValue(pNewBuckets);
+
+ // The new number of buckets has to be published last (prior to this readers may miscalculate a bucket
+ // index, but the result will always be in range and they'll simply walk the wrong chain and get a miss,
+@@ -697,7 +697,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ // Persisted hashes had better have supplied an owning module at creation time (otherwise we won't know
+ // how to find a loader heap for further allocations at runtime: we don't know how to serialize a loader
+ // heap pointer).
+- _ASSERTE(m_pModule != NULL);
++ _ASSERTE(!m_pModule.IsNull());
+
+ // We can only save once during ngen so the hot and cold sections of the hash cannot have been populated
+ // yet.
+@@ -732,7 +732,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ for (i = 0; i < m_cWarmBuckets; i++)
+ {
+ // Iterate through the chain of warm entries for this bucket.
+- VolatileEntry *pOldEntry = m_pWarmBuckets[i];
++ VolatileEntry *pOldEntry = (GetWarmBuckets())[i];
+ while (pOldEntry)
+ {
+ // Is the current entry being saved into the image?
+@@ -842,18 +842,18 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ {
+ m_sHotEntries.m_cEntries = cHotEntries;
+ m_sHotEntries.m_cBuckets = cHotBuckets;
+- m_sHotEntries.m_pEntries = new PersistedEntry[cHotEntries];
+- m_sHotEntries.m_pBuckets = PersistedBucketList::CreateList(cHotBuckets, cHotEntries, cMaxHotChain);
+- memset(m_sHotEntries.m_pEntries, 0, cHotEntries * sizeof(PersistedEntry)); // NGen determinism
++ m_sHotEntries.m_pEntries.SetValue(new PersistedEntry[cHotEntries]);
++ m_sHotEntries.m_pBuckets.SetValue(PersistedBucketList::CreateList(cHotBuckets, cHotEntries, cMaxHotChain));
++ memset(GetPersistedHotEntries(), 0, cHotEntries * sizeof(PersistedEntry)); // NGen determinism
+ }
+
+ if (cColdEntries)
+ {
+ m_sColdEntries.m_cEntries = cColdEntries;
+ m_sColdEntries.m_cBuckets = cColdBuckets;
+- m_sColdEntries.m_pEntries = new PersistedEntry[cColdEntries];
+- m_sColdEntries.m_pBuckets = PersistedBucketList::CreateList(cColdBuckets, cColdEntries, cMaxColdChain);
+- memset(m_sColdEntries.m_pEntries, 0, cColdEntries * sizeof(PersistedEntry)); // NGen determinism
++ m_sColdEntries.m_pEntries.SetValue(new PersistedEntry[cColdEntries]);
++ m_sColdEntries.m_pBuckets.SetValue(PersistedBucketList::CreateList(cColdBuckets, cColdEntries, cMaxColdChain));
++ memset(GetPersistedColdEntries(), 0, cColdEntries * sizeof(PersistedEntry)); // NGen determinism
+ }
+
+ //
+@@ -871,7 +871,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ DWORD dwNextId = 0; // This represents the index of the next entry to start a bucket chain
+ for (i = 0; i < cHotBuckets; i++)
+ {
+- m_sHotEntries.m_pBuckets->SetBucket(i, dwNextId, pHotBucketSizes[i]);
++ m_sHotEntries.m_pBuckets.GetValue()->SetBucket(i, dwNextId, pHotBucketSizes[i]);
+ dwNextId += pHotBucketSizes[i];
+ }
+ _ASSERTE(dwNextId == m_sHotEntries.m_cEntries);
+@@ -879,7 +879,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ dwNextId = 0; // Reset index for the cold entries (remember they have their own table of entries)
+ for (i = 0; i < cColdBuckets; i++)
+ {
+- m_sColdEntries.m_pBuckets->SetBucket(i, dwNextId, pColdBucketSizes[i]);
++ m_sColdEntries.m_pBuckets.GetValue()->SetBucket(i, dwNextId, pColdBucketSizes[i]);
+ dwNextId += pColdBucketSizes[i];
+ }
+ _ASSERTE(dwNextId == m_sColdEntries.m_cEntries);
+@@ -897,15 +897,16 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ typename EntryMappingTable::Entry *pMapEntry = &sEntryMap.m_pEntries[i];
+
+ // Entry block depends on whether this entry is hot or cold.
+- PersistedEntries *pEntries = pMapEntry->m_fHot ? &m_sHotEntries : &m_sColdEntries;
++ APTR_PersistedEntry pPersistedEntries = pMapEntry->m_fHot ? GetPersistedHotEntries() : GetPersistedColdEntries();
++ PTR_PersistedBucketList pPersistedBucketsList = pMapEntry->m_fHot ? GetPersistedHotBuckets() : GetPersistedColdBuckets();
+
+ // We already know the new bucket this entry will go into. Retrieve the index of the first entry in
+ // that bucket chain.
+- DWORD dwBaseChainIndex = pEntries->m_pBuckets->GetInitialEntry(pMapEntry->m_dwNewBucket);
++ DWORD dwBaseChainIndex = pPersistedBucketsList->GetInitialEntry(pMapEntry->m_dwNewBucket);
+
+ // This entry will be located at some offset from the index above (we calculated this ordinal in phase
+ // 2).
+- PersistedEntry *pNewEntry = &pEntries->m_pEntries[dwBaseChainIndex + pMapEntry->m_dwChainOrdinal];
++ PersistedEntry *pNewEntry = &pPersistedEntries[dwBaseChainIndex + pMapEntry->m_dwChainOrdinal];
+
+ // Record the address of the embedded sub-class hash entry in the map entry (sub-classes will use this
+ // info to map old entry addresses to their new locations).
+@@ -931,7 +932,11 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+
+ bool fAllEntriesImmutable = true;
+ for (i = 0; i < sEntryMap.m_cEntries; i++)
+- if (!DOWNCALL(SaveEntry)(pImage, pProfileData, sEntryMap.m_pEntries[i].m_pOldEntry, sEntryMap.m_pEntries[i].m_pNewEntry, &sEntryMap))
++ if (!DOWNCALL(SaveEntry)(pImage,
++ pProfileData,
++ sEntryMap.m_pEntries[i].m_pOldEntry,
++ sEntryMap.m_pEntries[i].m_pNewEntry,
++ &sEntryMap))
+ fAllEntriesImmutable = false;
+
+ // We're mostly done. Now just some cleanup and the actual DataImage storage operations.
+@@ -943,24 +948,24 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ // If there are any hot entries store the entry array and bucket list.
+ if (cHotEntries)
+ {
+- pImage->StoreStructure(m_sHotEntries.m_pEntries,
++ pImage->StoreStructure(GetPersistedHotEntries(),
+ static_cast<ULONG>(sizeof(PersistedEntry) * cHotEntries),
+ fAllEntriesImmutable ? DataImage::ITEM_NGEN_HASH_ENTRIES_RO_HOT : DataImage::ITEM_NGEN_HASH_ENTRIES_HOT);
+
+- pImage->StoreStructure(m_sHotEntries.m_pBuckets,
+- static_cast<ULONG>(m_sHotEntries.m_pBuckets->GetSize(m_sHotEntries.m_cBuckets)),
++ pImage->StoreStructure(GetPersistedHotBuckets(),
++ static_cast<ULONG>(m_sHotEntries.m_pBuckets.GetValue()->GetSize(m_sHotEntries.m_cBuckets)),
+ DataImage::ITEM_NGEN_HASH_BUCKETLIST_HOT);
+ }
+
+ // If there are any cold entries store the entry array and bucket list.
+ if (cColdEntries)
+ {
+- pImage->StoreStructure(m_sColdEntries.m_pEntries,
++ pImage->StoreStructure(GetPersistedColdEntries(),
+ static_cast<ULONG>(sizeof(PersistedEntry) * cColdEntries),
+ fAllEntriesImmutable ? DataImage::ITEM_NGEN_HASH_ENTRIES_RO_COLD : DataImage::ITEM_NGEN_HASH_ENTRIES_COLD);
+
+- pImage->StoreStructure(m_sColdEntries.m_pBuckets,
+- static_cast<ULONG>(m_sColdEntries.m_pBuckets->GetSize(m_sColdEntries.m_cBuckets)),
++ pImage->StoreStructure(GetPersistedColdBuckets(),
++ static_cast<ULONG>(GetPersistedColdBuckets()->GetSize(m_sColdEntries.m_cBuckets)),
+ DataImage::ITEM_NGEN_HASH_BUCKETLIST_COLD);
+ }
+
+@@ -987,7 +992,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ DWORD cNewWarmBuckets = min(m_cInitialBuckets, 11);
+
+ // Create the ngen version of the warm buckets.
+- pImage->StoreStructure(m_pWarmBuckets,
++ pImage->StoreStructure(GetWarmBuckets(),
+ cNewWarmBuckets * sizeof(VolatileEntry*),
+ DataImage::ITEM_NGEN_HASH_HOT);
+
+@@ -997,7 +1002,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseSave(DataImage *pImage, CorProfileData *
+ pNewTable->m_cWarmBuckets = cNewWarmBuckets;
+
+ // Zero-out the ngen version of the warm buckets.
+- VolatileEntry *pNewBuckets = (VolatileEntry*)pImage->GetImagePointer(m_pWarmBuckets);
++ VolatileEntry *pNewBuckets = (VolatileEntry*)pImage->GetImagePointer(GetWarmBuckets());
+ memset(pNewBuckets, 0, cNewWarmBuckets * sizeof(VolatileEntry*));
+ }
+
+@@ -1011,7 +1016,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseFixup(DataImage *pImage)
+ DWORD i;
+
+ // Fixup the module pointer.
+- pImage->FixupPointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pModule));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pModule));
+
+ // Throw away the heap pointer, we can't serialize it into the image. We'll rely on the loader heap
+ // associated with the module above at runtime.
+@@ -1023,29 +1028,27 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseFixup(DataImage *pImage)
+ // be relative to the base of this array.
+
+ for (i = 0; i < m_sHotEntries.m_cEntries; i++)
+- DOWNCALL(FixupEntry)(pImage, &m_sHotEntries.m_pEntries[i].m_sValue, m_sHotEntries.m_pEntries, i * sizeof(PersistedEntry));
++ DOWNCALL(FixupEntry)(pImage,
++ &(GetPersistedHotEntries())[i].m_sValue,
++ GetPersistedHotEntries(),
++ i * sizeof(PersistedEntry));
+
+ for (i = 0; i < m_sColdEntries.m_cEntries; i++)
+- DOWNCALL(FixupEntry)(pImage, &m_sColdEntries.m_pEntries[i].m_sValue, m_sColdEntries.m_pEntries, i * sizeof(PersistedEntry));
++ DOWNCALL(FixupEntry)(pImage,
++ &(GetPersistedColdEntries())[i].m_sValue,
++ GetPersistedColdEntries(),
++ i * sizeof(PersistedEntry));
+
+ // Fixup the warm (empty) bucket list.
+- pImage->FixupPointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pWarmBuckets));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_pWarmBuckets));
+
+ // Fixup the hot entry array and bucket list.
+- pImage->FixupPointerField(this,
+- offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) +
+- offsetof(PersistedEntries, m_pEntries));
+- pImage->FixupPointerField(this,
+- offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) +
+- offsetof(PersistedEntries, m_pBuckets));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) + offsetof(PersistedEntries, m_pEntries));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sHotEntries) + offsetof(PersistedEntries, m_pBuckets));
+
+ // Fixup the cold entry array and bucket list.
+- pImage->FixupPointerField(this,
+- offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) +
+- offsetof(PersistedEntries, m_pEntries));
+- pImage->FixupPointerField(this,
+- offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) +
+- offsetof(PersistedEntries, m_pBuckets));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) + offsetof(PersistedEntries, m_pEntries));
++ pImage->FixupRelativePointerField(this, offsetof(NgenHashTable<NGEN_HASH_ARGS>, m_sColdEntries) + offsetof(PersistedEntries, m_pBuckets));
+ }
+ #endif // !DACCESS_COMPILE
+ #endif // FEATURE_PREJIT
+@@ -1064,14 +1067,14 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseEnumMemoryRegions(CLRDataEnumMemoryFlags
+ DacEnumMemoryRegion(dac_cast<TADDR>(this), sizeof(FINAL_CLASS));
+
+ // Save the warm bucket list.
+- DacEnumMemoryRegion(dac_cast<TADDR>(m_pWarmBuckets), m_cWarmBuckets * sizeof(VolatileEntry*));
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetWarmBuckets()), m_cWarmBuckets * sizeof(VolatileEntry*));
+
+ // Save all the warm entries.
+- if (m_pWarmBuckets.IsValid())
++ if (GetWarmBuckets().IsValid())
+ {
+ for (DWORD i = 0; i < m_cWarmBuckets; i++)
+ {
+- PTR_VolatileEntry pEntry = m_pWarmBuckets[i];
++ PTR_VolatileEntry pEntry = (GetWarmBuckets())[i];
+ while (pEntry.IsValid())
+ {
+ pEntry.EnumMem();
+@@ -1088,25 +1091,35 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseEnumMemoryRegions(CLRDataEnumMemoryFlags
+ // Save hot buckets and entries.
+ if (m_sHotEntries.m_cEntries > 0)
+ {
+- DacEnumMemoryRegion(dac_cast<TADDR>(m_sHotEntries.m_pEntries), m_sHotEntries.m_cEntries * sizeof(PersistedEntry));
+- DacEnumMemoryRegion(dac_cast<TADDR>(m_sHotEntries.m_pBuckets), m_sHotEntries.m_pBuckets->GetSize(m_sHotEntries.m_cBuckets));
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedHotEntries()),
++ m_sHotEntries.m_cEntries * sizeof(PersistedEntry));
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedHotBuckets()),
++ GetPersistedHotBuckets()->GetSize(m_sHotEntries.m_cBuckets));
+ for (DWORD i = 0; i < m_sHotEntries.m_cEntries; i++)
+- DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(dac_cast<PTR_PersistedEntry>(&m_sHotEntries.m_pEntries[i])), flags);
++ {
++ PTR_PersistedEntry pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedHotEntries())[i]);
++ DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(pEntry), flags);
++ }
+ }
+
+ // Save cold buckets and entries.
+ if (m_sColdEntries.m_cEntries > 0)
+ {
+- DacEnumMemoryRegion(dac_cast<TADDR>(m_sColdEntries.m_pEntries), m_sColdEntries.m_cEntries * sizeof(PersistedEntry));
+- DacEnumMemoryRegion(dac_cast<TADDR>(m_sColdEntries.m_pBuckets), m_sColdEntries.m_pBuckets->GetSize(m_sColdEntries.m_cBuckets));
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedColdEntries()),
++ m_sColdEntries.m_cEntries * sizeof(PersistedEntry));
++ DacEnumMemoryRegion(dac_cast<TADDR>(GetPersistedColdBuckets()),
++ GetPersistedColdBuckets()->GetSize(m_sColdEntries.m_cBuckets));
+ for (DWORD i = 0; i < m_sColdEntries.m_cEntries; i++)
+- DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(dac_cast<PTR_PersistedEntry>(&m_sColdEntries.m_pEntries[i])), flags);
++ {
++ PTR_PersistedEntry pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedColdEntries())[i]);
++ DOWNCALL(EnumMemoryRegionsForEntry)(VALUE_FROM_PERSISTED_ENTRY(pEntry), flags);
++ }
+ }
+ #endif // FEATURE_PREJIT
+
+ // Save the module if present.
+- if (m_pModule.IsValid())
+- m_pModule->EnumMemoryRegions(flags, true);
++ if (GetModule().IsValid())
++ GetModule()->EnumMemoryRegions(flags, true);
+ }
+ #endif // DACCESS_COMPILE
+
+@@ -1136,13 +1149,31 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::FindPersistedEntryByHash(PersistedEnt
+ // Since there is at least one entry there must be at least one bucket.
+ _ASSERTE(pEntries->m_cBuckets > 0);
+
++ DWORD eType = (pEntries == &m_sHotEntries ? Hot : Cold);
++
+ // Get the first entry and count of entries for the bucket which contains all entries with the given hash
+ // code.
+ DWORD dwEntryIndex, cEntriesLeft;
+- pEntries->m_pBuckets->GetBucket(iHash % pEntries->m_cBuckets, &dwEntryIndex, &cEntriesLeft);
++ if (eType == Hot)
++ {
++ GetPersistedHotBuckets()->GetBucket(iHash % pEntries->m_cBuckets, &dwEntryIndex, &cEntriesLeft);
++ }
++ else
++ {
++ GetPersistedColdBuckets()->GetBucket(iHash % pEntries->m_cBuckets, &dwEntryIndex, &cEntriesLeft);
++ }
+
+ // Determine the address of the first entry in the chain by indexing into the entry array.
+- PTR_PersistedEntry pEntry = dac_cast<PTR_PersistedEntry>(&pEntries->m_pEntries[dwEntryIndex]);
++ PTR_PersistedEntry pEntry;
++
++ if (eType == Hot)
++ {
++ pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedHotEntries())[dwEntryIndex]);
++ }
++ else
++ {
++ pEntry = dac_cast<PTR_PersistedEntry>(&(GetPersistedColdEntries())[dwEntryIndex]);
++ }
+
+ // Iterate while we've still got entries left to check in this chain.
+ while (cEntriesLeft--)
+@@ -1154,7 +1185,7 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::FindPersistedEntryByHash(PersistedEnt
+ // Record our current search state into the provided context so that a subsequent call to
+ // BaseFindNextEntryByHash can pick up the search where it left off.
+ pContext->m_pEntry = dac_cast<TADDR>(pEntry);
+- pContext->m_eType = pEntries == &m_sHotEntries ? Hot : Cold;
++ pContext->m_eType = eType;
+ pContext->m_cRemainingEntries = cEntriesLeft;
+
+ // Return the address of the sub-classes' embedded entry structure.
+@@ -1223,7 +1254,7 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::FindVolatileEntryByHash(NgenHashValue
+ _ASSERTE(m_cWarmBuckets > 0);
+
+ // Point at the first entry in the bucket chain which would contain any entries with the given hash code.
+- PTR_VolatileEntry pEntry = m_pWarmBuckets[iHash % m_cWarmBuckets];
++ PTR_VolatileEntry pEntry = (GetWarmBuckets())[iHash % m_cWarmBuckets];
+
+ // Walk the bucket chain one entry at a time.
+ while (pEntry)
+@@ -1257,7 +1288,7 @@ void NgenHashTable<NGEN_HASH_ARGS>::BaseInitIterator(BaseIterator *pIterator)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+- pIterator->m_pTable = this;
++ pIterator->m_pTable = dac_cast<DPTR(NgenHashTable<NGEN_HASH_ARGS>)>(this);
+ pIterator->m_pEntry = NULL;
+ #ifdef FEATURE_PREJIT
+ pIterator->m_eType = Hot;
+@@ -1299,7 +1330,7 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseIterator::Next()
+ if (m_pEntry == NULL)
+ {
+ // This is our first lookup in the hot section, return the first entry in the hot array.
+- m_pEntry = dac_cast<TADDR>(m_pTable->m_sHotEntries.m_pEntries);
++ m_pEntry = dac_cast<TADDR>(m_pTable->GetPersistedHotEntries());
+ }
+ else
+ {
+@@ -1329,7 +1360,7 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseIterator::Next()
+ {
+ // This is our first lookup in the warm section for a particular bucket, return the first
+ // entry in that bucket.
+- m_pEntry = dac_cast<TADDR>(m_pTable->m_pWarmBuckets[m_dwBucket]);
++ m_pEntry = dac_cast<TADDR>((m_pTable->GetWarmBuckets())[m_dwBucket]);
+ }
+ else
+ {
+@@ -1370,7 +1401,7 @@ DPTR(VALUE) NgenHashTable<NGEN_HASH_ARGS>::BaseIterator::Next()
+ if (m_pEntry == NULL)
+ {
+ // This is our first lookup in the cold section, return the first entry in the cold array.
+- m_pEntry = dac_cast<TADDR>(m_pTable->m_sColdEntries.m_pEntries);
++ m_pEntry = dac_cast<TADDR>(m_pTable->GetPersistedColdEntries());
+ }
+ else
+ {
+@@ -1463,17 +1494,17 @@ void NgenHashEntryRef<NGEN_HASH_ARGS>::Fixup(DataImage *pImage, NgenHashTable<NG
+ BYTE *pLocationBase;
+ DWORD cbLocationOffset;
+
+- if (pLocation >= (BYTE*)pTable->m_sHotEntries.m_pEntries &&
+- pLocation < (BYTE*)(pTable->m_sHotEntries.m_pEntries + pTable->m_sHotEntries.m_cEntries))
++ if (pLocation >= (BYTE*)pTable->GetPersistedHotEntries() &&
++ pLocation < (BYTE*)(pTable->GetPersistedHotEntries() + pTable->m_sHotEntries.m_cEntries))
+ {
+ // The field is in a hot entry.
+- pLocationBase = (BYTE*)pTable->m_sHotEntries.m_pEntries;
++ pLocationBase = (BYTE*)pTable->GetPersistedHotEntries();
+ }
+- else if (pLocation >= (BYTE*)pTable->m_sColdEntries.m_pEntries &&
+- pLocation < (BYTE*)(pTable->m_sColdEntries.m_pEntries + pTable->m_sColdEntries.m_cEntries))
++ else if (pLocation >= (BYTE*)pTable->GetPersistedColdEntries() &&
++ pLocation < (BYTE*)(pTable->GetPersistedColdEntries() + pTable->m_sColdEntries.m_cEntries))
+ {
+ // The field is in a cold entry.
+- pLocationBase = (BYTE*)pTable->m_sColdEntries.m_pEntries;
++ pLocationBase = (BYTE*)pTable->GetPersistedColdEntries();
+ }
+ else
+ {
+@@ -1490,17 +1521,17 @@ void NgenHashEntryRef<NGEN_HASH_ARGS>::Fixup(DataImage *pImage, NgenHashTable<NG
+ BYTE *pTargetBase;
+ DWORD cbTargetOffset;
+
+- if (pTarget >= (BYTE*)pTable->m_sHotEntries.m_pEntries &&
+- pTarget < (BYTE*)(pTable->m_sHotEntries.m_pEntries + pTable->m_sHotEntries.m_cEntries))
++ if (pTarget >= (BYTE*)pTable->GetPersistedHotEntries() &&
++ pTarget < (BYTE*)(pTable->GetPersistedHotEntries() + pTable->m_sHotEntries.m_cEntries))
+ {
+ // The target is a hot entry.
+- pTargetBase = (BYTE*)pTable->m_sHotEntries.m_pEntries;
++ pTargetBase = (BYTE*)pTable->GetPersistedHotEntries();
+ }
+- else if (pTarget >= (BYTE*)pTable->m_sColdEntries.m_pEntries &&
+- pTarget < (BYTE*)(pTable->m_sColdEntries.m_pEntries + pTable->m_sColdEntries.m_cEntries))
++ else if (pTarget >= (BYTE*)pTable->GetPersistedColdEntries() &&
++ pTarget < (BYTE*)(pTable->GetPersistedColdEntries() + pTable->m_sColdEntries.m_cEntries))
+ {
+ // The target is a cold entry.
+- pTargetBase = (BYTE*)pTable->m_sColdEntries.m_pEntries;
++ pTargetBase = (BYTE*)pTable->GetPersistedColdEntries();
+ }
+ else
+ {
+diff --git a/src/vm/stubhelpers.cpp b/src/vm/stubhelpers.cpp
+index db593c6..36c6d43 100644
+--- a/src/vm/stubhelpers.cpp
++++ b/src/vm/stubhelpers.cpp
+@@ -96,7 +96,7 @@ MethodDesc *StubHelpers::ResolveInteropMethod(Object *pThisUNSAFE, MethodDesc *p
+ MethodTable *pMT = pThisUNSAFE->GetMethodTable();
+
+ _ASSERTE(pMT->IsDelegate());
+- return ((DelegateEEClass *)pMT->GetClass())->m_pInvokeMethod;
++ return ((DelegateEEClass *)pMT->GetClass())->GetInvokeMethod();
+ }
+ return pMD;
+ }
+@@ -1551,7 +1551,7 @@ FCIMPL3(SIZE_T, StubHelpers::ProfilerBeginTransitionCallback, SIZE_T pSecretPara
+ _ASSERTE(pMT->IsDelegate());
+
+ EEClass * pClass = pMT->GetClass();
+- pRealMD = ((DelegateEEClass*)pClass)->m_pInvokeMethod;
++ pRealMD = ((DelegateEEClass*)pClass)->GetInvokeMethod();
+ _ASSERTE(pRealMD);
+ }
+ }
+diff --git a/src/vm/typedesc.cpp b/src/vm/typedesc.cpp
+index 06170cb..6718068 100644
+--- a/src/vm/typedesc.cpp
++++ b/src/vm/typedesc.cpp
+@@ -1373,7 +1373,7 @@ void TypeVarTypeDesc::Fixup(DataImage *image)
+ STANDARD_VM_CONTRACT;
+
+ LOG((LF_ZAP, LL_INFO10000, " TypeVarTypeDesc::Fixup %x (%p)\n", GetToken(), this));
+- image->FixupPointerField(this, offsetof(TypeVarTypeDesc, m_pModule));
++ image->FixupRelativePointerField(this, offsetof(TypeVarTypeDesc, m_pModule));
+ image->ZeroField(this, offsetof(TypeVarTypeDesc, m_hExposedClassObject), sizeof(m_hExposedClassObject));
+
+ // We don't persist the constraints: instead, load them back on demand
+@@ -1394,10 +1394,10 @@ MethodDesc * TypeVarTypeDesc::LoadOwnerMethod()
+ }
+ CONTRACTL_END;
+
+- MethodDesc *pMD = m_pModule->LookupMethodDef(m_typeOrMethodDef);
++ MethodDesc *pMD = GetModule()->LookupMethodDef(m_typeOrMethodDef);
+ if (pMD == NULL)
+ {
+- pMD = MemberLoader::GetMethodDescFromMethodDef(m_pModule, m_typeOrMethodDef, FALSE);
++ pMD = MemberLoader::GetMethodDescFromMethodDef(GetModule(), m_typeOrMethodDef, FALSE);
+ }
+ return pMD;
+ }
+@@ -1414,10 +1414,10 @@ TypeHandle TypeVarTypeDesc::LoadOwnerType()
+ }
+ CONTRACTL_END;
+
+- TypeHandle genericType = m_pModule->LookupTypeDef(m_typeOrMethodDef);
++ TypeHandle genericType = GetModule()->LookupTypeDef(m_typeOrMethodDef);
+ if (genericType.IsNull())
+ {
+- genericType = ClassLoader::LoadTypeDefThrowing(m_pModule, m_typeOrMethodDef,
++ genericType = ClassLoader::LoadTypeDefThrowing(GetModule(), m_typeOrMethodDef,
+ ClassLoader::ThrowIfNotFound,
+ ClassLoader::PermitUninstDefOrRef);
+ }
+@@ -1506,7 +1506,7 @@ void TypeVarTypeDesc::LoadConstraints(ClassLoadLevel level /* = CLASS_LOADED */)
+ numConstraints = pInternalImport->EnumGetCount(&hEnum);
+ if (numConstraints != 0)
+ {
+- LoaderAllocator* pAllocator=m_pModule->GetLoaderAllocator();
++ LoaderAllocator* pAllocator = GetModule()->GetLoaderAllocator();
+ // If there is a single class constraint we put in in element 0 of the array
+ AllocMemHolder<TypeHandle> constraints
+ (pAllocator->GetLowFrequencyHeap()->AllocMem(S_SIZE_T(numConstraints) * S_SIZE_T(sizeof(TypeHandle))));
+@@ -2434,9 +2434,11 @@ TypeVarTypeDesc::EnumMemoryRegions(CLRDataEnumMemoryFlags flags)
+ SUPPORTS_DAC;
+ DAC_ENUM_DTHIS();
+
+- if (m_pModule.IsValid())
++ PTR_TypeVarTypeDesc ptrThis(this);
++
++ if (GetModule().IsValid())
+ {
+- m_pModule->EnumMemoryRegions(flags, true);
++ GetModule()->EnumMemoryRegions(flags, true);
+ }
+
+ if (m_numConstraints != (DWORD)-1)
+diff --git a/src/vm/typedesc.h b/src/vm/typedesc.h
+index 4bc4978..a8b1c25 100644
+--- a/src/vm/typedesc.h
++++ b/src/vm/typedesc.h
+@@ -462,7 +462,7 @@ public:
+ }
+ CONTRACTL_END;
+
+- m_pModule = pModule;
++ m_pModule.SetValue(pModule);
+ m_typeOrMethodDef = typeOrMethodDef;
+ m_token = token;
+ m_index = index;
+@@ -479,7 +479,8 @@ public:
+ {
+ LIMITED_METHOD_CONTRACT;
+ SUPPORTS_DAC;
+- return m_pModule;
++
++ return ReadPointer(this, &TypeVarTypeDesc::m_pModule);
+ }
+
+ unsigned int GetIndex()
+@@ -567,7 +568,7 @@ protected:
+ BOOL ConstrainedAsObjRefHelper();
+
+ // Module containing the generic definition, also the loader module for this type desc
+- PTR_Module m_pModule;
++ RelativePointer<PTR_Module> m_pModule;
+
+ // Declaring type or method
+ mdToken m_typeOrMethodDef;
+diff --git a/src/vm/typehash.cpp b/src/vm/typehash.cpp
+index b3de777..0d53a15 100644
+--- a/src/vm/typehash.cpp
++++ b/src/vm/typehash.cpp
+@@ -67,8 +67,8 @@ LoaderAllocator *EETypeHashTable::GetLoaderAllocator()
+ }
+ else
+ {
+- _ASSERTE(m_pModule != NULL);
+- return m_pModule->GetLoaderAllocator();
++ _ASSERTE(!m_pModule.IsNull());
++ return GetModule()->GetLoaderAllocator();
+ }
+ }
+
+@@ -417,7 +417,7 @@ EETypeHashEntry_t *EETypeHashTable::FindItem(TypeKey* pKey)
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+ Module *pDefiningModule;
+- PCCOR_SIGNATURE pSig = m_pModule->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
++ PCCOR_SIGNATURE pSig = GetModule()->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
+ if (pDefiningModule == NULL)
+ break;
+
+@@ -487,7 +487,8 @@ BOOL EETypeHashTable::CompareInstantiatedType(TypeHandle t, Module *pModule, mdT
+ if (CORCOMPILE_IS_POINTER_TAGGED(fixup))
+ {
+ Module *pDefiningModule;
+- PCCOR_SIGNATURE pSig = m_pModule->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
++
++ PCCOR_SIGNATURE pSig = GetModule()->GetEncodedSigIfLoaded(CORCOMPILE_UNTAG_TOKEN(fixup), &pDefiningModule);
+
+ // First check that the modules for the generic type defs match
+ if (dac_cast<TADDR>(pDefiningModule) !=
+@@ -536,7 +537,7 @@ BOOL EETypeHashTable::CompareInstantiatedType(TypeHandle t, Module *pModule, mdT
+ DACCOP_IGNORE(CastOfMarshalledType, "Dual mode DAC problem, but since the size is the same, the cast is safe");
+ TADDR candidateArg = ((FixupPointer<TADDR> *)candidateInst.GetRawArgs())[i].GetValue();
+
+- if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, candidateArg, inst[i]))
++ if (!ZapSig::CompareTaggedPointerToTypeHandle(GetModule(), candidateArg, inst[i]))
+ {
+ return FALSE;
+ }
+@@ -578,7 +579,7 @@ BOOL EETypeHashTable::CompareFnPtrType(TypeHandle t, BYTE callConv, DWORD numArg
+ for (DWORD i = 0; i <= numArgs; i++)
+ {
+ TADDR candidateArg = retAndArgTypes2[i].AsTAddr();
+- if (!ZapSig::CompareTaggedPointerToTypeHandle(m_pModule, candidateArg, retAndArgTypes[i]))
++ if (!ZapSig::CompareTaggedPointerToTypeHandle(GetModule(), candidateArg, retAndArgTypes[i]))
+ {
+ return FALSE;
+ }
+@@ -647,7 +648,7 @@ VOID EETypeHashTable::InsertValue(TypeHandle data)
+ PRECONDITION(!data.IsEncodedFixup());
+ PRECONDITION(!data.IsGenericTypeDefinition()); // Generic type defs live in typedef table (availableClasses)
+ PRECONDITION(data.HasInstantiation() || data.HasTypeParam() || data.IsFnPtrType()); // It's an instantiated type or an array/ptr/byref type
+- PRECONDITION(!m_pModule || m_pModule->IsTenured()); // Destruct won't destruct m_pAvailableParamTypes for non-tenured modules - so make sure no one tries to insert one before the Module has been tenured
++ PRECONDITION(m_pModule.IsNull() || GetModule()->IsTenured()); // Destruct won't destruct m_pAvailableParamTypes for non-tenured modules - so make sure no one tries to insert one before the Module has been tenured
+ }
+ CONTRACTL_END
+
+@@ -673,7 +674,7 @@ void EETypeHashTable::Save(DataImage *image, Module *module, CorProfileData *pro
+ CONTRACTL
+ {
+ STANDARD_VM_CHECK;
+- PRECONDITION(image->GetModule() == m_pModule);
++ PRECONDITION(image->GetModule() == GetModule());
+ }
+ CONTRACTL_END;
+
+@@ -715,7 +716,7 @@ void EETypeHashTable::Save(DataImage *image, Module *module, CorProfileData *pro
+ {
+ if (flags & (1<<ReadTypeHashTable))
+ {
+- TypeHandle th = m_pModule->LoadIBCTypeHelper(pBlobSigEntry);
++ TypeHandle th = GetModule()->LoadIBCTypeHelper(pBlobSigEntry);
+ #if defined(_DEBUG) && !defined(DACCESS_COMPILE)
+ g_pConfig->DebugCheckAndForceIBCFailure(EEConfig::CallSite_8);
+ #endif
+@@ -798,14 +799,14 @@ void EETypeHashTable::FixupEntry(DataImage *pImage, EETypeHashEntry_t *pEntry, v
+ if (pType.IsTypeDesc())
+ {
+ pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(EETypeHashEntry_t, m_data),
+- pType.AsTypeDesc(), 2);
++ pType.AsTypeDesc(), 2, IMAGE_REL_BASED_RelativePointer);
+
+ pType.AsTypeDesc()->Fixup(pImage);
+ }
+ else
+ {
+ pImage->FixupField(pFixupBase, cbFixupOffset + offsetof(EETypeHashEntry_t, m_data),
+- pType.AsMethodTable());
++ pType.AsMethodTable(), 0, IMAGE_REL_BASED_RelativePointer);
+
+ pType.AsMethodTable()->Fixup(pImage);
+ }
+@@ -838,17 +839,20 @@ TypeHandle EETypeHashEntry::GetTypeHandle()
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // Remove any hot entry indicator bit that may have been set as the result of Ngen saving.
+- return TypeHandle::FromTAddr(m_data & ~0x1);
++ TADDR data = dac_cast<TADDR>(GetData());
++ return TypeHandle::FromTAddr(data & ~0x1);
+ }
+
++#ifndef DACCESS_COMPILE
+ void EETypeHashEntry::SetTypeHandle(TypeHandle handle)
+ {
+ LIMITED_METHOD_DAC_CONTRACT;
+
+ // We plan to steal the low-order bit of the handle for ngen purposes.
+ _ASSERTE((handle.AsTAddr() & 0x1) == 0);
+- m_data = handle.AsTAddr();
++ m_data.SetValueMaybeNull(handle.AsPtr());
+ }
++#endif // !DACCESS_COMPILE
+
+ #ifdef FEATURE_PREJIT
+ bool EETypeHashEntry::IsHot()
+@@ -856,16 +860,21 @@ bool EETypeHashEntry::IsHot()
+ LIMITED_METHOD_CONTRACT;
+
+ // Low order bit of data field indicates a hot entry.
+- return (m_data & 1) != 0;
++ TADDR data = dac_cast<TADDR>(GetData());
++ return (data & 1) != 0;
+ }
+
++#ifndef DACCESS_COMPILE
+ void EETypeHashEntry::MarkAsHot()
+ {
+ LIMITED_METHOD_CONTRACT;
+
+ // Low order bit of data field indicates a hot entry.
+- m_data |= 0x1;
++ TADDR data = dac_cast<TADDR>(GetData());
++ data |= 0x1;
++ m_data.SetValueMaybeNull(dac_cast<PTR_VOID>(data));
+ }
++#endif // !DACCESS_COMPILE
+ #endif // FEATURE_PREJIT
+
+ #ifdef _MSC_VER
+diff --git a/src/vm/typehash.h b/src/vm/typehash.h
+index ce1f90b..c9b01d5 100644
+--- a/src/vm/typehash.h
++++ b/src/vm/typehash.h
+@@ -42,13 +42,27 @@ typedef struct EETypeHashEntry
+ void MarkAsHot();
+ #endif // FEATURE_PREJIT
+
++#ifndef DACCESS_COMPILE
++ EETypeHashEntry& operator=(const EETypeHashEntry& src)
++ {
++ m_data.SetValueMaybeNull(src.m_data.GetValueMaybeNull());
++
++ return *this;
++ }
++#endif // !DACCESS_COMPILE
++
++ PTR_VOID GetData()
++ {
++ return ReadPointerMaybeNull(this, &EETypeHashEntry::m_data);
++ }
++
+ private:
+ friend class EETypeHashTable;
+ #ifdef DACCESS_COMPILE
+ friend class NativeImageDumper;
+ #endif
+
+- TADDR m_data;
++ RelativePointer<PTR_VOID> m_data;
+ } EETypeHashEntry_t;
+
+
+--
+2.7.4
+