summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStephen Toub <stoub@microsoft.com>2019-01-17 16:00:12 -0500
committerGitHub <noreply@github.com>2019-01-17 16:00:12 -0500
commitc6b1b60f5f7cbcedab5d25e2ea3ac669885dc5fd (patch)
tree7fe93b0df7cdd36b7276496045d700db39d62c60
parent5849f647616695556aff9978252084d83959e2b1 (diff)
downloadcoreclr-c6b1b60f5f7cbcedab5d25e2ea3ac669885dc5fd.tar.gz
coreclr-c6b1b60f5f7cbcedab5d25e2ea3ac669885dc5fd.tar.bz2
coreclr-c6b1b60f5f7cbcedab5d25e2ea3ac669885dc5fd.zip
Avoid allocations in Utf8Formatter.TryFormat(float/double, ...) (#22011)
* Avoid allocations in Utf8Formatter.TryFormat(float/double, ...) Currently every call to Utf8Formatter.TryFormat for a float or a double allocates two strings, one for the format string and one for the resulting number. This avoids those in the common case where we can use float/double.TryFormat and format directly into a stack buffer. Also removes some unsafe code; the performance of StandardFormat.ToString isn't currently worth the extra nanoseconds gained by using unsafe code. * Delete assert There's a test that uses unsafe code to cause this to fail and validate that no exception is thrown.
-rw-r--r--src/System.Private.CoreLib/shared/System/Buffers/StandardFormat.cs68
-rw-r--r--src/System.Private.CoreLib/shared/System/Buffers/Text/Utf8Formatter/Utf8Formatter.Float.cs54
2 files changed, 81 insertions, 41 deletions
diff --git a/src/System.Private.CoreLib/shared/System/Buffers/StandardFormat.cs b/src/System.Private.CoreLib/shared/System/Buffers/StandardFormat.cs
index 62ab2f362c..83014d06b0 100644
--- a/src/System.Private.CoreLib/shared/System/Buffers/StandardFormat.cs
+++ b/src/System.Private.CoreLib/shared/System/Buffers/StandardFormat.cs
@@ -146,40 +146,54 @@ namespace System.Buffers
/// </summary>
public override string ToString()
{
- unsafe
+ Span<char> buffer = stackalloc char[FormatStringLength];
+ int charsWritten = Format(buffer);
+ return new string(buffer.Slice(0, charsWritten));
+ }
+
+ /// <summary>The exact buffer length required by <see cref="Format"/>.</summary>
+ internal const int FormatStringLength = 3;
+
+ /// <summary>
+ /// Formats the format in classic .NET format.
+ /// </summary>
+ internal int Format(Span<char> destination)
+ {
+ Debug.Assert(destination.Length == FormatStringLength);
+
+ int count = 0;
+ char symbol = Symbol;
+
+ if (symbol != default &&
+ (uint)destination.Length == FormatStringLength) // to eliminate bounds checks
{
- const int MaxLength = 4;
- char* pBuffer = stackalloc char[MaxLength];
+ destination[0] = symbol;
+ count = 1;
- int dstIndex = 0;
- char symbol = Symbol;
- if (symbol != default)
+ uint precision = Precision;
+ if (precision != NoPrecision)
{
- pBuffer[dstIndex++] = symbol;
-
- byte precision = Precision;
- if (precision != NoPrecision)
+ // Note that Precision is stored as a byte, so in theory it could contain
+ // values > MaxPrecision (99). But all supported mechanisms for creating a
+ // StandardFormat limit values to being <= MaxPrecision, so the only way a value
+ // could be larger than that is if unsafe code or the equivalent were used
+ // to force a larger invalid value in, in which case we don't need to
+ // guarantee such an invalid value is properly roundtripped through here;
+ // we just need to make sure things aren't corrupted further.
+
+ if (precision >= 10)
{
- if (precision >= 100)
- {
- pBuffer[dstIndex++] = (char)('0' + (precision / 100) % 10);
- precision = (byte)(precision % 100);
- }
-
- if (precision >= 10)
- {
- pBuffer[dstIndex++] = (char)('0' + (precision / 10) % 10);
- precision = (byte)(precision % 10);
- }
-
- pBuffer[dstIndex++] = (char)('0' + precision);
+ uint div = Math.DivRem(precision, 10, out precision);
+ destination[1] = (char)('0' + div % 10);
+ count = 2;
}
- }
- Debug.Assert(dstIndex <= MaxLength);
-
- return new string(pBuffer, startIndex: 0, length: dstIndex);
+ destination[count] = (char)('0' + precision);
+ count++;
+ }
}
+
+ return count;
}
/// <summary>
diff --git a/src/System.Private.CoreLib/shared/System/Buffers/Text/Utf8Formatter/Utf8Formatter.Float.cs b/src/System.Private.CoreLib/shared/System/Buffers/Text/Utf8Formatter/Utf8Formatter.Float.cs
index a32dd9d66a..e546a444aa 100644
--- a/src/System.Private.CoreLib/shared/System/Buffers/Text/Utf8Formatter/Utf8Formatter.Float.cs
+++ b/src/System.Private.CoreLib/shared/System/Buffers/Text/Utf8Formatter/Utf8Formatter.Float.cs
@@ -59,13 +59,7 @@ namespace System.Buffers.Text
return TryFormatFloatingPoint<float>(value, destination, out bytesWritten, format);
}
- //
- // Common handler for TryFormat(Double) and TryFormat(Single). You may notice that this particular routine isn't getting into the "no allocation" spirit
- // of things. The DoubleToNumber() code is incredibly complex and is one of the few pieces of Number formatting never C#-ized. It would be really
- // be preferable not to have another version of that lying around. Until we really hit a scenario where floating point formatting needs the perf, we'll
- // make do with this.
- //
- private static bool TryFormatFloatingPoint<T>(T value, Span<byte> destination, out int bytesWritten, StandardFormat format) where T : IFormattable
+ private static bool TryFormatFloatingPoint<T>(T value, Span<byte> destination, out int bytesWritten, StandardFormat format) where T : IFormattable, ISpanFormattable
{
if (format.IsDefault)
{
@@ -89,23 +83,55 @@ namespace System.Buffers.Text
default:
return FormattingHelpers.TryFormatThrowFormatException(out bytesWritten);
}
+
+ Span<char> formatText = stackalloc char[StandardFormat.FormatStringLength];
+ int formattedLength = format.Format(formatText);
+ formatText = formatText.Slice(0, formattedLength);
- string formatString = format.ToString();
- string utf16Text = value.ToString(formatString, CultureInfo.InvariantCulture);
- int length = utf16Text.Length;
- if (length > destination.Length)
+ // We first try to format into a stack-allocated buffer, and if it succeeds, we can avoid
+ // all allocation. If that fails, we fall back to allocating strings. If it proves impactful,
+ // that allocation (as well as roundtripping from byte to char and back to byte) could be avoided by
+ // calling into a refactored Number.FormatSingle/Double directly.
+
+ const int StackBufferLength = 128; // large enough to handle the majority cases
+ Span<char> stackBuffer = stackalloc char[StackBufferLength];
+ ReadOnlySpan<char> utf16Text = stackalloc char[0];
+
+ // Try to format into the stack buffer. If we're successful, we can avoid all allocations.
+ if (value.TryFormat(stackBuffer, out formattedLength, formatText, CultureInfo.InvariantCulture))
+ {
+ utf16Text = stackBuffer.Slice(0, formattedLength);
+ }
+ else
+ {
+ // The stack buffer wasn't large enough. If the destination buffer isn't at least as
+ // big as the stack buffer, we know the whole operation will eventually fail, so we
+ // can just fail now.
+ if (destination.Length <= StackBufferLength)
+ {
+ bytesWritten = 0;
+ return false;
+ }
+
+ // Fall back to using a string format and allocating a string for the resulting formatted value.
+ utf16Text = value.ToString(new string(formatText), CultureInfo.InvariantCulture);
+ }
+
+ // Copy the value to the destination, if it's large enough.
+
+ if (utf16Text.Length > destination.Length)
{
bytesWritten = 0;
return false;
}
- for (int i = 0; i < length; i++)
+ for (int i = 0; i < utf16Text.Length; i++)
{
Debug.Assert(utf16Text[i] < 128, "A culture-invariant ToString() of a floating point expected to produce ASCII characters only.");
- destination[i] = (byte)(utf16Text[i]);
+ destination[i] = (byte)utf16Text[i];
}
- bytesWritten = length;
+ bytesWritten = utf16Text.Length;
return true;
}
}