// Licensed to the .NET Foundation under one or more agreements. // The .NET Foundation licenses this file to you under the MIT license. // See the LICENSE file in the project root for more information. using System.Diagnostics; using System.Runtime; using System.Runtime.CompilerServices; #if BIT64 using nuint = System.UInt64; #else using nuint = System.UInt32; #endif namespace System { /// /// Extension methods and non-generic helpers for Span and ReadOnlySpan /// public static class Span { /// /// Casts a Span of one primitive type to Span of bytes. /// That type may not contain pointers or references. This is checked at runtime in order to preserve type safety. /// /// The source slice, of type . /// /// Thrown when contains pointers. /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public static Span AsBytes(this Span source) where T : struct { if (RuntimeHelpers.IsReferenceOrContainsReferences()) ThrowHelper.ThrowInvalidTypeWithPointersNotSupported(typeof(T)); return new Span( ref Unsafe.As(ref source.DangerousGetPinnableReference()), checked(source.Length * Unsafe.SizeOf())); } /// /// Casts a ReadOnlySpan of one primitive type to ReadOnlySpan of bytes. /// That type may not contain pointers or references. This is checked at runtime in order to preserve type safety. /// /// The source slice, of type . /// /// Thrown when contains pointers. /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public static ReadOnlySpan AsBytes(this ReadOnlySpan source) where T : struct { if (RuntimeHelpers.IsReferenceOrContainsReferences()) ThrowHelper.ThrowInvalidTypeWithPointersNotSupported(typeof(T)); return new ReadOnlySpan( ref Unsafe.As(ref source.DangerousGetPinnableReference()), checked(source.Length * Unsafe.SizeOf())); } /// /// Casts a Span of one primitive type to another primitive type . /// These types may not contain pointers or references. This is checked at runtime in order to preserve type safety. /// /// /// Supported only for platforms that support misaligned memory access. /// /// The source slice, of type . /// /// Thrown when or contains pointers. /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public static Span NonPortableCast(this Span source) where TFrom : struct where TTo : struct { if (RuntimeHelpers.IsReferenceOrContainsReferences()) ThrowHelper.ThrowInvalidTypeWithPointersNotSupported(typeof(TFrom)); if (RuntimeHelpers.IsReferenceOrContainsReferences()) ThrowHelper.ThrowInvalidTypeWithPointersNotSupported(typeof(TTo)); return new Span( ref Unsafe.As(ref source.DangerousGetPinnableReference()), checked((int)((long)source.Length * Unsafe.SizeOf() / Unsafe.SizeOf()))); } /// /// Casts a ReadOnlySpan of one primitive type to another primitive type . /// These types may not contain pointers or references. This is checked at runtime in order to preserve type safety. /// /// /// Supported only for platforms that support misaligned memory access. /// /// The source slice, of type . /// /// Thrown when or contains pointers. /// [MethodImpl(MethodImplOptions.AggressiveInlining)] public static ReadOnlySpan NonPortableCast(this ReadOnlySpan source) where TFrom : struct where TTo : struct { if (RuntimeHelpers.IsReferenceOrContainsReferences()) ThrowHelper.ThrowInvalidTypeWithPointersNotSupported(typeof(TFrom)); if (RuntimeHelpers.IsReferenceOrContainsReferences()) ThrowHelper.ThrowInvalidTypeWithPointersNotSupported(typeof(TTo)); return new ReadOnlySpan( ref Unsafe.As(ref source.DangerousGetPinnableReference()), checked((int)((long)source.Length * Unsafe.SizeOf() / Unsafe.SizeOf()))); } /// /// Creates a new readonly span over the portion of the target string. /// /// The target string. /// Thrown when is a null /// reference (Nothing in Visual Basic). [MethodImpl(MethodImplOptions.AggressiveInlining)] public static ReadOnlySpan AsSpan(this string text) { if (text == null) ThrowHelper.ThrowArgumentNullException(ExceptionArgument.text); return new ReadOnlySpan(ref text.GetRawStringData(), text.Length); } internal static unsafe void CopyTo(ref T destination, ref T source, int elementsCount) { if (Unsafe.AreSame(ref destination, ref source)) return; if (elementsCount <= 1) { if (elementsCount == 1) { destination = source; } return; } nuint byteCount = (nuint)elementsCount * (nuint)Unsafe.SizeOf(); if (!RuntimeHelpers.IsReferenceOrContainsReferences()) { fixed (byte* pDestination = &Unsafe.As(ref destination)) { fixed (byte* pSource = &Unsafe.As(ref source)) { Buffer.Memmove(pDestination, pSource, byteCount); } } } else { RuntimeImports.RhBulkMoveWithWriteBarrier( ref Unsafe.As(ref destination), ref Unsafe.As(ref source), byteCount); } } internal static unsafe void ClearWithoutReferences(ref byte b, nuint byteLength) { if (byteLength == 0) return; #if AMD64 && CORECLR if (byteLength > 4096) goto PInvoke; Unsafe.InitBlockUnaligned(ref b, 0, (uint)byteLength); return; #else // TODO: Optimize other platforms to be on par with AMD64 CoreCLR // Note: It's important that this switch handles lengths at least up to 22. // See notes below near the main loop for why. // The switch will be very fast since it can be implemented using a jump // table in assembly. See http://stackoverflow.com/a/449297/4077294 for more info. switch (byteLength) { case 1: b = 0; return; case 2: Unsafe.As(ref b) = 0; return; case 3: Unsafe.As(ref b) = 0; Unsafe.Add(ref b, 2) = 0; return; case 4: Unsafe.As(ref b) = 0; return; case 5: Unsafe.As(ref b) = 0; Unsafe.Add(ref b, 4) = 0; return; case 6: Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; return; case 7: Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; Unsafe.Add(ref b, 6) = 0; return; case 8: #if BIT64 Unsafe.As(ref b) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; #endif return; case 9: #if BIT64 Unsafe.As(ref b) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; #endif Unsafe.Add(ref b, 8) = 0; return; case 10: #if BIT64 Unsafe.As(ref b) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; #endif Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; return; case 11: #if BIT64 Unsafe.As(ref b) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; #endif Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; Unsafe.Add(ref b, 10) = 0; return; case 12: #if BIT64 Unsafe.As(ref b) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; #endif Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; return; case 13: #if BIT64 Unsafe.As(ref b) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; #endif Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; Unsafe.Add(ref b, 12) = 0; return; case 14: #if BIT64 Unsafe.As(ref b) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; #endif Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 12)) = 0; return; case 15: #if BIT64 Unsafe.As(ref b) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; #endif Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 12)) = 0; Unsafe.Add(ref b, 14) = 0; return; case 16: #if BIT64 Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 12)) = 0; #endif return; case 17: #if BIT64 Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 12)) = 0; #endif Unsafe.Add(ref b, 16) = 0; return; case 18: #if BIT64 Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 12)) = 0; #endif Unsafe.As(ref Unsafe.Add(ref b, 16)) = 0; return; case 19: #if BIT64 Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 12)) = 0; #endif Unsafe.As(ref Unsafe.Add(ref b, 16)) = 0; Unsafe.Add(ref b, 18) = 0; return; case 20: #if BIT64 Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 12)) = 0; #endif Unsafe.As(ref Unsafe.Add(ref b, 16)) = 0; return; case 21: #if BIT64 Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 12)) = 0; #endif Unsafe.As(ref Unsafe.Add(ref b, 16)) = 0; Unsafe.Add(ref b, 20) = 0; return; case 22: #if BIT64 Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; #else Unsafe.As(ref b) = 0; Unsafe.As(ref Unsafe.Add(ref b, 4)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 8)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 12)) = 0; #endif Unsafe.As(ref Unsafe.Add(ref b, 16)) = 0; Unsafe.As(ref Unsafe.Add(ref b, 20)) = 0; return; } // P/Invoke into the native version for large lengths if (byteLength >= 512) goto PInvoke; nuint i = 0; // byte offset at which we're copying if ((Unsafe.As(ref b) & 3) != 0) { if ((Unsafe.As(ref b) & 1) != 0) { Unsafe.AddByteOffset(ref b, i) = 0; i += 1; if ((Unsafe.As(ref b) & 2) != 0) goto IntAligned; } Unsafe.As(ref Unsafe.AddByteOffset(ref b, i)) = 0; i += 2; } IntAligned: // On 64-bit IntPtr.Size == 8, so we want to advance to the next 8-aligned address. If // (int)b % 8 is 0, 5, 6, or 7, we will already have advanced by 0, 3, 2, or 1 // bytes to the next aligned address (respectively), so do nothing. On the other hand, // if it is 1, 2, 3, or 4 we will want to copy-and-advance another 4 bytes until // we're aligned. // The thing 1, 2, 3, and 4 have in common that the others don't is that if you // subtract one from them, their 3rd lsb will not be set. Hence, the below check. if (((Unsafe.As(ref b) - 1) & 4) == 0) { Unsafe.As(ref Unsafe.AddByteOffset(ref b, i)) = 0; i += 4; } nuint end = byteLength - 16; byteLength -= i; // lower 4 bits of byteLength represent how many bytes are left *after* the unrolled loop // We know due to the above switch-case that this loop will always run 1 iteration; max // bytes we clear before checking is 23 (7 to align the pointers, 16 for 1 iteration) so // the switch handles lengths 0-22. Debug.Assert(end >= 7 && i <= end); // This is separated out into a different variable, so the i + 16 addition can be // performed at the start of the pipeline and the loop condition does not have // a dependency on the writes. nuint counter; do { counter = i + 16; // This loop looks very costly since there appear to be a bunch of temporary values // being created with the adds, but the jit (for x86 anyways) will convert each of // these to use memory addressing operands. // So the only cost is a bit of code size, which is made up for by the fact that // we save on writes to b. #if BIT64 Unsafe.As(ref Unsafe.AddByteOffset(ref b, i)) = 0; Unsafe.As(ref Unsafe.AddByteOffset(ref b, i + 8)) = 0; #else Unsafe.As(ref Unsafe.AddByteOffset(ref b, i)) = 0; Unsafe.As(ref Unsafe.AddByteOffset(ref b, i + 4)) = 0; Unsafe.As(ref Unsafe.AddByteOffset(ref b, i + 8)) = 0; Unsafe.As(ref Unsafe.AddByteOffset(ref b, i + 12)) = 0; #endif i = counter; // See notes above for why this wasn't used instead // i += 16; } while (counter <= end); if ((byteLength & 8) != 0) { #if BIT64 Unsafe.As(ref Unsafe.AddByteOffset(ref b, i)) = 0; #else Unsafe.As(ref Unsafe.AddByteOffset(ref b, i)) = 0; Unsafe.As(ref Unsafe.AddByteOffset(ref b, i + 4)) = 0; #endif i += 8; } if ((byteLength & 4) != 0) { Unsafe.As(ref Unsafe.AddByteOffset(ref b, i)) = 0; i += 4; } if ((byteLength & 2) != 0) { Unsafe.As(ref Unsafe.AddByteOffset(ref b, i)) = 0; i += 2; } if ((byteLength & 1) != 0) { Unsafe.AddByteOffset(ref b, i) = 0; // We're not using i after this, so not needed // i += 1; } return; #endif PInvoke: RuntimeImports.RhZeroMemory(ref b, byteLength); } internal static unsafe void ClearWithReferences(ref IntPtr ip, nuint pointerSizeLength) { if (pointerSizeLength == 0) return; // TODO: Perhaps do switch casing to improve small size perf nuint i = 0; nuint n = 0; while ((n = i + 8) <= (pointerSizeLength)) { Unsafe.AddByteOffset(ref ip, (i + 0) * (nuint)sizeof(IntPtr)) = default(IntPtr); Unsafe.AddByteOffset(ref ip, (i + 1) * (nuint)sizeof(IntPtr)) = default(IntPtr); Unsafe.AddByteOffset(ref ip, (i + 2) * (nuint)sizeof(IntPtr)) = default(IntPtr); Unsafe.AddByteOffset(ref ip, (i + 3) * (nuint)sizeof(IntPtr)) = default(IntPtr); Unsafe.AddByteOffset(ref ip, (i + 4) * (nuint)sizeof(IntPtr)) = default(IntPtr); Unsafe.AddByteOffset(ref ip, (i + 5) * (nuint)sizeof(IntPtr)) = default(IntPtr); Unsafe.AddByteOffset(ref ip, (i + 6) * (nuint)sizeof(IntPtr)) = default(IntPtr); Unsafe.AddByteOffset(ref ip, (i + 7) * (nuint)sizeof(IntPtr)) = default(IntPtr); i = n; } if ((n = i + 4) <= (pointerSizeLength)) { Unsafe.AddByteOffset(ref ip, (i + 0) * (nuint)sizeof(IntPtr)) = default(IntPtr); Unsafe.AddByteOffset(ref ip, (i + 1) * (nuint)sizeof(IntPtr)) = default(IntPtr); Unsafe.AddByteOffset(ref ip, (i + 2) * (nuint)sizeof(IntPtr)) = default(IntPtr); Unsafe.AddByteOffset(ref ip, (i + 3) * (nuint)sizeof(IntPtr)) = default(IntPtr); i = n; } if ((n = i + 2) <= (pointerSizeLength)) { Unsafe.AddByteOffset(ref ip, (i + 0) * (nuint)sizeof(IntPtr)) = default(IntPtr); Unsafe.AddByteOffset(ref ip, (i + 1) * (nuint)sizeof(IntPtr)) = default(IntPtr); i = n; } if ((i + 1) <= (pointerSizeLength)) { Unsafe.AddByteOffset(ref ip, (i + 0) * (nuint)sizeof(IntPtr)) = default(IntPtr); } } } }