summaryrefslogtreecommitdiff
path: root/src/System.Private.CoreLib/src/System
diff options
context:
space:
mode:
authorFilip Navara <filip.navara@gmail.com>2019-01-22 13:16:45 +0100
committerJan Kotas <jkotas@microsoft.com>2019-01-22 04:16:45 -0800
commit635a609eba8db8082a4e1245ce1291f9bbe9835c (patch)
treebdd9ab76b9528377ee506e22e2684306f84c63d1 /src/System.Private.CoreLib/src/System
parent61ff7397e849273bafc8b1e46ec88ddd905ce187 (diff)
downloadcoreclr-635a609eba8db8082a4e1245ce1291f9bbe9835c.tar.gz
coreclr-635a609eba8db8082a4e1245ce1291f9bbe9835c.tar.bz2
coreclr-635a609eba8db8082a4e1245ce1291f9bbe9835c.zip
Move MemoryFailPoint to shared CoreLib partition (#22104)
* Move MemoryFailPoint to shared CoreLib partition. * Split MemoryFailPoint into Unix and Windows versions. * Replace MemoryFailPoint.GetMemorySettings FCall with GC.GetSegmentSize to make sharing with CoreRT easier.
Diffstat (limited to 'src/System.Private.CoreLib/src/System')
-rw-r--r--src/System.Private.CoreLib/src/System/GC.cs3
-rw-r--r--src/System.Private.CoreLib/src/System/Runtime/MemoryFailPoint.cs484
2 files changed, 3 insertions, 484 deletions
diff --git a/src/System.Private.CoreLib/src/System/GC.cs b/src/System.Private.CoreLib/src/System/GC.cs
index aac612a63a..838e8147f2 100644
--- a/src/System.Private.CoreLib/src/System/GC.cs
+++ b/src/System.Private.CoreLib/src/System/GC.cs
@@ -104,6 +104,9 @@ namespace System
[MethodImplAttribute(MethodImplOptions.InternalCall)]
internal static extern bool IsServerGC();
+ [MethodImplAttribute(MethodImplOptions.InternalCall)]
+ internal static extern ulong GetSegmentSize();
+
[DllImport(JitHelpers.QCall, CharSet = CharSet.Unicode)]
private static extern void _AddMemoryPressure(ulong bytesAllocated);
diff --git a/src/System.Private.CoreLib/src/System/Runtime/MemoryFailPoint.cs b/src/System.Private.CoreLib/src/System/Runtime/MemoryFailPoint.cs
deleted file mode 100644
index a6d8ab4284..0000000000
--- a/src/System.Private.CoreLib/src/System/Runtime/MemoryFailPoint.cs
+++ /dev/null
@@ -1,484 +0,0 @@
-// Licensed to the .NET Foundation under one or more agreements.
-// The .NET Foundation licenses this file to you under the MIT license.
-// See the LICENSE file in the project root for more information.
-
-/*============================================================
-**
-**
-**
-** Provides a way for an app to not start an operation unless
-** there's a reasonable chance there's enough memory
-** available for the operation to succeed.
-**
-**
-===========================================================*/
-
-using System;
-using System.IO;
-using Microsoft.Win32;
-using System.Runtime.InteropServices;
-using System.Threading;
-using System.Runtime.CompilerServices;
-using System.Runtime.ConstrainedExecution;
-using System.Runtime.Versioning;
-using System.Diagnostics;
-
-/*
- This class allows an application to fail before starting certain
- activities. The idea is to fail early instead of failing in the middle
- of some long-running operation to increase the survivability of the
- application and ensure you don't have to write tricky code to handle an
- OOM anywhere in your app's code (which implies state corruption, meaning you
- should unload the appdomain, if you have a transacted environment to ensure
- rollback of individual transactions). This is an incomplete tool to attempt
- hoisting all your OOM failures from anywhere in your worker methods to one
- particular point where it is easier to handle an OOM failure, and you can
- optionally choose to not start a workitem if it will likely fail. This does
- not help the performance of your code directly (other than helping to avoid
- AD unloads). The point is to avoid starting work if it is likely to fail.
- The Enterprise Services team has used these memory gates effectively in the
- unmanaged world for a decade.
-
- In Whidbey, we will simply check to see if there is enough memory available
- in the OS's page file & attempt to ensure there might be enough space free
- within the process's address space (checking for address space fragmentation
- as well). We will not commit or reserve any memory. To avoid race conditions with
- other threads using MemoryFailPoints, we'll also keep track of a
- process-wide amount of memory "reserved" via all currently-active
- MemoryFailPoints. This has two problems:
- 1) This can account for memory twice. If a thread creates a
- MemoryFailPoint for 100 MB then allocates 99 MB, we'll see 99 MB
- less free memory and 100 MB less reserved memory. Yet, subtracting
- off the 100 MB is necessary because the thread may not have started
- allocating memory yet. Disposing of this class immediately after
- front-loaded allocations have completed is a great idea.
- 2) This is still vulnerable to race conditions with other threads that don't use
- MemoryFailPoints.
- So this class is far from perfect. But it may be good enough to
- meaningfully reduce the frequency of OutOfMemoryExceptions in managed apps.
-
- In Orcas or later, we might allocate some memory from the OS and add it
- to a allocation context for this thread. Obviously, at that point we need
- some way of conveying when we release this block of memory. So, we
- implemented IDisposable on this type in Whidbey and expect all users to call
- this from within a using block to provide lexical scope for their memory
- usage. The call to Dispose (implicit with the using block) will give us an
- opportunity to release this memory, perhaps. We anticipate this will give
- us the possibility of a more effective design in a future version.
-
- In Orcas, we may also need to differentiate between allocations that would
- go into the normal managed heap vs. the large object heap, or we should
- consider checking for enough free space in both locations (with any
- appropriate adjustments to ensure the memory is contiguous).
-*/
-
-namespace System.Runtime
-{
- public sealed class MemoryFailPoint : CriticalFinalizerObject, IDisposable
- {
- // Find the top section of user mode memory. Avoid the last 64K.
- // Windows reserves that block for the kernel, apparently, and doesn't
- // let us ask about that memory. But since we ask for memory in 1 MB
- // chunks, we don't have to special case this. Also, we need to
- // deal with 32 bit machines in 3 GB mode.
- // Using Win32's GetSystemInfo should handle all this for us.
- private static readonly ulong s_topOfMemory;
-
- // Walking the address space is somewhat expensive, taking around half
- // a millisecond. Doing that per transaction limits us to a max of
- // ~2000 transactions/second. Instead, let's do this address space
- // walk once every 10 seconds, or when we will likely fail. This
- // amortization scheme can reduce the cost of a memory gate by about
- // a factor of 100.
- private static long s_hiddenLastKnownFreeAddressSpace = 0;
- private static long s_hiddenLastTimeCheckingAddressSpace = 0;
- private const int CheckThreshold = 10 * 1000; // 10 seconds
-
- private static long LastKnownFreeAddressSpace
- {
- get { return Volatile.Read(ref s_hiddenLastKnownFreeAddressSpace); }
- set { Volatile.Write(ref s_hiddenLastKnownFreeAddressSpace, value); }
- }
-
- private static long AddToLastKnownFreeAddressSpace(long addend)
- {
- return Interlocked.Add(ref s_hiddenLastKnownFreeAddressSpace, addend);
- }
-
- private static long LastTimeCheckingAddressSpace
- {
- get { return Volatile.Read(ref s_hiddenLastTimeCheckingAddressSpace); }
- set { Volatile.Write(ref s_hiddenLastTimeCheckingAddressSpace, value); }
- }
-
- // When allocating memory segment by segment, we've hit some cases
- // where there are only 22 MB of memory available on the machine,
- // we need 1 16 MB segment, and the OS does not succeed in giving us
- // that memory. Reasons for this could include:
- // 1) The GC does allocate memory when doing a collection.
- // 2) Another process on the machine could grab that memory.
- // 3) Some other part of the runtime might grab this memory.
- // If we build in a little padding, we can help protect
- // ourselves against some of these cases, and we want to err on the
- // conservative side with this class.
- private const int LowMemoryFudgeFactor = 16 << 20;
-
- // Round requested size to a 16MB multiple to have a better granularity
- // when checking for available memory.
- private const int MemoryCheckGranularity = 16;
-
- // Note: This may become dynamically tunable in the future.
- // Also note that we can have different segment sizes for the normal vs.
- // large object heap. We currently use the max of the two.
- private static readonly ulong s_GCSegmentSize;
-
- // For multi-threaded workers, we want to ensure that if two workers
- // use a MemoryFailPoint at the same time, and they both succeed, that
- // they don't trample over each other's memory. Keep a process-wide
- // count of "reserved" memory, and decrement this in Dispose and
- // in the critical finalizer.
- private static long s_failPointReservedMemory;
-
- private ulong _reservedMemory; // The size of this request (from user)
- private bool _mustSubtractReservation; // Did we add data to SharedStatics?
-
- static MemoryFailPoint()
- {
- GetMemorySettings(out s_GCSegmentSize, out s_topOfMemory);
- }
-
- // We can remove this link demand in a future version - we will
- // have scenarios for this in partial trust in the future, but
- // we're doing this just to restrict this in case the code below
- // is somehow incorrect.
- public MemoryFailPoint(int sizeInMegabytes)
- {
- if (sizeInMegabytes <= 0)
- throw new ArgumentOutOfRangeException(nameof(sizeInMegabytes), SR.ArgumentOutOfRange_NeedNonNegNum);
-
-#if !FEATURE_PAL // Remove this when CheckForAvailableMemory is able to provide legitimate estimates
- ulong size = ((ulong)sizeInMegabytes) << 20;
- _reservedMemory = size;
-
- // Check to see that we both have enough memory on the system
- // and that we have enough room within the user section of the
- // process's address space. Also, we need to use the GC segment
- // size, not the amount of memory the user wants to allocate.
- // Consider correcting this to reflect free memory within the GC
- // heap, and to check both the normal & large object heaps.
- ulong segmentSize = (ulong)(Math.Ceiling((double)size / s_GCSegmentSize) * s_GCSegmentSize);
- if (segmentSize >= s_topOfMemory)
- throw new InsufficientMemoryException(SR.InsufficientMemory_MemFailPoint_TooBig);
-
- ulong requestedSizeRounded = (ulong)(Math.Ceiling((double)sizeInMegabytes / MemoryCheckGranularity) * MemoryCheckGranularity);
- //re-convert into bytes
- requestedSizeRounded <<= 20;
-
- ulong availPageFile = 0; // available VM (physical + page file)
- ulong totalAddressSpaceFree = 0; // non-contiguous free address space
-
- // Check for available memory, with 2 attempts at getting more
- // memory.
- // Stage 0: If we don't have enough, trigger a GC.
- // Stage 1: If we don't have enough, try growing the swap file.
- // Stage 2: Update memory state, then fail or leave loop.
- //
- // (In the future, we could consider adding another stage after
- // Stage 0 to run finalizers. However, before doing that make sure
- // that we could abort this constructor when we call
- // GC.WaitForPendingFinalizers, noting that this method uses a CER
- // so it can't be aborted, and we have a critical finalizer. It
- // would probably work, but do some thinking first.)
- for (int stage = 0; stage < 3; stage++)
- {
- CheckForAvailableMemory(out availPageFile, out totalAddressSpaceFree);
-
- // If we have enough room, then skip some stages.
- // Note that multiple threads can still lead to a race condition for our free chunk
- // of address space, which can't be easily solved.
- ulong reserved = (ulong)Volatile.Read(ref s_failPointReservedMemory);
- ulong segPlusReserved = segmentSize + reserved;
- bool overflow = segPlusReserved < segmentSize || segPlusReserved < reserved;
- bool needPageFile = availPageFile < (requestedSizeRounded + reserved + LowMemoryFudgeFactor) || overflow;
- bool needAddressSpace = totalAddressSpaceFree < segPlusReserved || overflow;
-
- // Ensure our cached amount of free address space is not stale.
- long now = Environment.TickCount; // Handle wraparound.
- if ((now > LastTimeCheckingAddressSpace + CheckThreshold || now < LastTimeCheckingAddressSpace) ||
- LastKnownFreeAddressSpace < (long)segmentSize)
- {
- CheckForFreeAddressSpace(segmentSize, false);
- }
- bool needContiguousVASpace = (ulong)LastKnownFreeAddressSpace < segmentSize;
-
-#if false
- Console.WriteLine($"MemoryFailPoint:" +
- $"Checking for {(segmentSize >> 20)} MB, " +
- $"for allocation size of {sizeInMegabytes} MB, " +
- $"stage {stage}. " +
- $"Need page file? {needPageFile} " +
- $"Need Address Space? {needAddressSpace} " +
- $"Need Contiguous address space? {needContiguousVASpace} " +
- $"Avail page file: {(availPageFile >> 20)} MB " +
- $"Total free VA space: {totalAddressSpaceFree >> 20} MB " +
- $"Contiguous free address space (found): {LastKnownFreeAddressSpace >> 20} MB " +
- $"Space reserved via process's MemoryFailPoints: {reserved} MB");
-#endif
-
- if (!needPageFile && !needAddressSpace && !needContiguousVASpace)
- break;
-
- switch (stage)
- {
- case 0:
- // The GC will release empty segments to the OS. This will
- // relieve us from having to guess whether there's
- // enough memory in either GC heap, and whether
- // internal fragmentation will prevent those
- // allocations from succeeding.
- GC.Collect();
- continue;
-
- case 1:
- // Do this step if and only if the page file is too small.
- if (!needPageFile)
- continue;
-
- // Attempt to grow the OS's page file. Note that we ignore
- // any allocation routines from the host intentionally.
- RuntimeHelpers.PrepareConstrainedRegions();
-
- // This shouldn't overflow due to the if clauses above.
- UIntPtr numBytes = new UIntPtr(segmentSize);
- unsafe
- {
- void* pMemory = Win32Native.VirtualAlloc(null, numBytes, Win32Native.MEM_COMMIT, Win32Native.PAGE_READWRITE);
- if (pMemory != null)
- {
- bool r = Win32Native.VirtualFree(pMemory, UIntPtr.Zero, Win32Native.MEM_RELEASE);
- if (!r)
- throw Win32Marshal.GetExceptionForLastWin32Error();
- }
- }
-
- continue;
-
- case 2:
- // The call to CheckForAvailableMemory above updated our
- // state.
- if (needPageFile || needAddressSpace)
- {
- InsufficientMemoryException e = new InsufficientMemoryException(SR.InsufficientMemory_MemFailPoint);
-#if DEBUG
- e.Data["MemFailPointState"] = new MemoryFailPointState(sizeInMegabytes, segmentSize,
- needPageFile, needAddressSpace, needContiguousVASpace,
- availPageFile >> 20, totalAddressSpaceFree >> 20,
- LastKnownFreeAddressSpace >> 20, reserved);
-#endif
- throw e;
- }
-
- if (needContiguousVASpace)
- {
- InsufficientMemoryException e = new InsufficientMemoryException(SR.InsufficientMemory_MemFailPoint_VAFrag);
-#if DEBUG
- e.Data["MemFailPointState"] = new MemoryFailPointState(sizeInMegabytes, segmentSize,
- needPageFile, needAddressSpace, needContiguousVASpace,
- availPageFile >> 20, totalAddressSpaceFree >> 20,
- LastKnownFreeAddressSpace >> 20, reserved);
-#endif
- throw e;
- }
-
- break;
-
- default:
- Debug.Fail("Fell through switch statement!");
- break;
- }
- }
-
- // Success - we have enough room the last time we checked.
- // Now update our shared state in a somewhat atomic fashion
- // and handle a simple race condition with other MemoryFailPoint instances.
- AddToLastKnownFreeAddressSpace(-((long)size));
- if (LastKnownFreeAddressSpace < 0)
- CheckForFreeAddressSpace(segmentSize, true);
-
- RuntimeHelpers.PrepareConstrainedRegions();
-
- Interlocked.Add(ref s_failPointReservedMemory, (long)size);
- _mustSubtractReservation = true;
-#endif
- }
-
- private static void CheckForAvailableMemory(out ulong availPageFile, out ulong totalAddressSpaceFree)
- {
- bool r;
- Win32Native.MEMORYSTATUSEX memory = new Win32Native.MEMORYSTATUSEX();
- r = Win32Native.GlobalMemoryStatusEx(ref memory);
- if (!r)
- throw Win32Marshal.GetExceptionForLastWin32Error();
- availPageFile = memory.availPageFile;
- totalAddressSpaceFree = memory.availVirtual;
- // Console.WriteLine($"Memory gate: Mem load: {memory.memoryLoad}% Available memory (physical + page file): {(memory.availPageFile >> 20)} MB Total free address space: {memory.availVirtual >> 20} MB GC Heap: {(GC.GetTotalMemory(true) >> 20)} MB");
- }
-
- // Based on the shouldThrow parameter, this will throw an exception, or
- // returns whether there is enough space. In all cases, we update
- // our last known free address space, hopefully avoiding needing to
- // probe again.
- private static unsafe bool CheckForFreeAddressSpace(ulong size, bool shouldThrow)
- {
- // Start walking the address space at 0. VirtualAlloc may wrap
- // around the address space. We don't need to find the exact
- // pages that VirtualAlloc would return - we just need to
- // know whether VirtualAlloc could succeed.
- ulong freeSpaceAfterGCHeap = MemFreeAfterAddress(null, size);
-
- // Console.WriteLine($"MemoryFailPoint: Checked for free VA space. Found enough? {(freeSpaceAfterGCHeap >= size)} Asked for: {size} Found: {freeSpaceAfterGCHeap}");
-
- // We may set these without taking a lock - I don't believe
- // this will hurt, as long as we never increment this number in
- // the Dispose method. If we do an extra bit of checking every
- // once in a while, but we avoid taking a lock, we may win.
- LastKnownFreeAddressSpace = (long)freeSpaceAfterGCHeap;
- LastTimeCheckingAddressSpace = Environment.TickCount;
-
- if (freeSpaceAfterGCHeap < size && shouldThrow)
- throw new InsufficientMemoryException(SR.InsufficientMemory_MemFailPoint_VAFrag);
- return freeSpaceAfterGCHeap >= size;
- }
-
- // Returns the amount of consecutive free memory available in a block
- // of pages. If we didn't have enough address space, we still return
- // a positive value < size, to help potentially avoid the overhead of
- // this check if we use a MemoryFailPoint with a smaller size next.
- private static unsafe ulong MemFreeAfterAddress(void* address, ulong size)
- {
- if (size >= s_topOfMemory)
- return 0;
-
- ulong largestFreeRegion = 0;
- Win32Native.MEMORY_BASIC_INFORMATION memInfo = new Win32Native.MEMORY_BASIC_INFORMATION();
- UIntPtr sizeOfMemInfo = (UIntPtr)Marshal.SizeOf(memInfo);
-
- while (((ulong)address) + size < s_topOfMemory)
- {
- UIntPtr r = Win32Native.VirtualQuery(address, ref memInfo, sizeOfMemInfo);
- if (r == UIntPtr.Zero)
- throw Win32Marshal.GetExceptionForLastWin32Error();
-
- ulong regionSize = memInfo.RegionSize.ToUInt64();
- if (memInfo.State == Win32Native.MEM_FREE)
- {
- if (regionSize >= size)
- return regionSize;
- else
- largestFreeRegion = Math.Max(largestFreeRegion, regionSize);
- }
- address = (void*)((ulong)address + regionSize);
- }
- return largestFreeRegion;
- }
-
- [MethodImpl(MethodImplOptions.InternalCall)]
- private static extern void GetMemorySettings(out ulong maxGCSegmentSize, out ulong topOfMemory);
-
- ~MemoryFailPoint()
- {
- Dispose(false);
- }
-
- // Applications must call Dispose, which conceptually "releases" the
- // memory that was "reserved" by the MemoryFailPoint. This affects a
- // global count of reserved memory in this version (helping to throttle
- // future MemoryFailPoints) in this version. We may in the
- // future create an allocation context and release it in the Dispose
- // method. While the finalizer will eventually free this block of
- // memory, apps will help their performance greatly by calling Dispose.
- public void Dispose()
- {
- Dispose(true);
- GC.SuppressFinalize(this);
- }
-
- private void Dispose(bool disposing)
- {
- // This is just bookkeeping to ensure multiple threads can really
- // get enough memory, and this does not actually reserve memory
- // within the GC heap.
- if (_mustSubtractReservation)
- {
- RuntimeHelpers.PrepareConstrainedRegions();
-
- Interlocked.Add(ref s_failPointReservedMemory, -(long)_reservedMemory);
- _mustSubtractReservation = false;
- }
-
- /*
- // Prototype performance
- // Let's pretend that we returned at least some free memory to
- // the GC heap. We don't know this is true - the objects could
- // have a longer lifetime, and the memory could be elsewhere in the
- // GC heap. Additionally, we subtracted off the segment size, not
- // this size. That's ok - we don't mind if this slowly degrades
- // and requires us to refresh the value a little bit sooner.
- // But releasing the memory here should help us avoid probing for
- // free address space excessively with large workItem sizes.
- Interlocked.Add(ref LastKnownFreeAddressSpace, _reservedMemory);
- */
- }
-
-#if DEBUG
- [Serializable]
- internal sealed class MemoryFailPointState
- {
- private ulong _segmentSize;
- private int _allocationSizeInMB;
- private bool _needPageFile;
- private bool _needAddressSpace;
- private bool _needContiguousVASpace;
- private ulong _availPageFile;
- private ulong _totalFreeAddressSpace;
- private long _lastKnownFreeAddressSpace;
- private ulong _reservedMem;
- private string _stackTrace; // Where did we fail, for additional debugging.
-
- internal MemoryFailPointState(int allocationSizeInMB, ulong segmentSize, bool needPageFile, bool needAddressSpace, bool needContiguousVASpace, ulong availPageFile, ulong totalFreeAddressSpace, long lastKnownFreeAddressSpace, ulong reservedMem)
- {
- _allocationSizeInMB = allocationSizeInMB;
- _segmentSize = segmentSize;
- _needPageFile = needPageFile;
- _needAddressSpace = needAddressSpace;
- _needContiguousVASpace = needContiguousVASpace;
- _availPageFile = availPageFile;
- _totalFreeAddressSpace = totalFreeAddressSpace;
- _lastKnownFreeAddressSpace = lastKnownFreeAddressSpace;
- _reservedMem = reservedMem;
- try
- {
- _stackTrace = Environment.StackTrace;
- }
- catch (System.Security.SecurityException)
- {
- _stackTrace = "no permission";
- }
- catch (OutOfMemoryException)
- {
- _stackTrace = "out of memory";
- }
- }
-
- public override string ToString()
- {
- return string.Format(System.Globalization.CultureInfo.InvariantCulture, "MemoryFailPoint detected insufficient memory to guarantee an operation could complete. Checked for {0} MB, for allocation size of {1} MB. Need page file? {2} Need Address Space? {3} Need Contiguous address space? {4} Avail page file: {5} MB Total free VA space: {6} MB Contiguous free address space (found): {7} MB Space reserved by process's MemoryFailPoints: {8} MB",
- _segmentSize >> 20, _allocationSizeInMB, _needPageFile,
- _needAddressSpace, _needContiguousVASpace,
- _availPageFile >> 20, _totalFreeAddressSpace >> 20,
- _lastKnownFreeAddressSpace >> 20, _reservedMem);
- }
- }
-#endif
- }
-}