summaryrefslogtreecommitdiff
path: root/src/vm/arm64/gmscpu.h
diff options
context:
space:
mode:
Diffstat (limited to 'src/vm/arm64/gmscpu.h')
-rw-r--r--src/vm/arm64/gmscpu.h33
1 files changed, 21 insertions, 12 deletions
diff --git a/src/vm/arm64/gmscpu.h b/src/vm/arm64/gmscpu.h
index e95ef63d7d..7785daf219 100644
--- a/src/vm/arm64/gmscpu.h
+++ b/src/vm/arm64/gmscpu.h
@@ -56,25 +56,34 @@ inline void LazyMachState::setLazyStateFromUnwind(MachState* copy)
_sp = copy->_sp;
_pc = copy->_pc;
- // Now copy the preserved register pointers. Note that some of the pointers could be
- // pointing to copy->captureX19_X29[]. If that is case then while copying to destination
- // ensure that they point to corresponding element in captureX19_X29[] of destination.
- ULONG64* srcLowerBound = &copy->captureX19_X29[0];
- ULONG64* srcUpperBound = (ULONG64*)((BYTE*)copy + offsetof(MachState, ptrX19_X29));
+ // Capture* has already been set, so there is no need to touch it
+ // loop over the nonvolatile context pointers and make
+ // sure to properly copy interior pointers into the
+ // new struct
- for (int i = 0; i<NUM_NONVOLATILE_CONTEXT_POINTERS; i++)
+ PULONG64* pSrc = (PULONG64 *)&copy->ptrX19_X29;
+ PULONG64* pDst = (PULONG64 *)&this->ptrX19_X29;
+
+ const PULONG64 LowerBoundDst = (PULONG64) this;
+ const PULONG64 LowerBoundSrc = (PULONG64) copy;
+
+ const PULONG64 UpperBoundSrc = (PULONG64) ((BYTE*)LowerBoundSrc + sizeof(*copy));
+
+ for (int i = 0; i < NUM_NONVOLATILE_CONTEXT_POINTERS; i++)
{
- if (copy->ptrX19_X29[i] >= srcLowerBound && copy->ptrX19_X29[i] < srcUpperBound)
- {
- ptrX19_X29[i] = (PTR_ULONG64)((BYTE*)copy->ptrX19_X29[i] - (BYTE*)srcLowerBound + (BYTE*)captureX19_X29);
- }
- else
+ PULONG64 valueSrc = *pSrc++;
+
+ if ((LowerBoundSrc <= valueSrc) && (valueSrc < UpperBoundSrc))
{
- ptrX19_X29[i] = copy->ptrX19_X29[i];
+ // make any pointer interior to 'src' interior to 'dst'
+ valueSrc = (PULONG64)((BYTE*)valueSrc - (BYTE*)LowerBoundSrc + (BYTE*)LowerBoundDst);
}
+
+ *pDst++ = valueSrc;
}
+
// this has to be last because we depend on write ordering to
// synchronize the race implicit in updating this struct
VolatileStore(&_isValid, TRUE);