summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorskanev <skanev@google.com>2017-02-01 08:34:26 -0800
committerVictor Costan <pwnall@chromium.org>2017-03-08 11:42:21 -0800
commitd3c6d20d0a882e1220c9f2a9a8867531971b7841 (patch)
tree24638e5e13fc3a02c15b83d171c30a7336232bb4
parent626e1b9faa81e6f744b8b101ab0cf94c5927d15a (diff)
downloadsnappy-d3c6d20d0a882e1220c9f2a9a8867531971b7841.tar.gz
snappy-d3c6d20d0a882e1220c9f2a9a8867531971b7841.tar.bz2
snappy-d3c6d20d0a882e1220c9f2a9a8867531971b7841.zip
Add compression size reporting hooks.
Also, force inlining util::compression::Sample(). The inlining change is necessary. Without it even with FDO+LIPO the call doesn't get inlined and uses 4 registers to construct parameters (which won't be used in the common case). In some of the more compute-bound tests that causes extra spills and significant overhead (even if call is sufficiently long). For example, with inlining: BM_UFlat/0 32.7µs ± 1% 33.1µs ± 1% +1.41% without: BM_UFlat/0 32.7µs ± 1% 37.7µs ± 1% +15.29%
-rw-r--r--snappy.cc22
1 files changed, 18 insertions, 4 deletions
diff --git a/snappy.cc b/snappy.cc
index 956db1a..bf82b20 100644
--- a/snappy.cc
+++ b/snappy.cc
@@ -527,6 +527,10 @@ char* CompressFragment(const char* input,
}
} // end namespace internal
+// Called back at avery compression call to trace parameters and sizes.
+static inline void Report(const char *algorithm, size_t compressed_size,
+ size_t uncompressed_size) {}
+
// Signature of output types needed by decompression code.
// The decompression code is templatized on a type that obeys this
// signature so that we do not pay virtual function call overhead in
@@ -786,13 +790,18 @@ static bool InternalUncompress(Source* r, Writer* writer) {
SnappyDecompressor decompressor(r);
uint32 uncompressed_len = 0;
if (!decompressor.ReadUncompressedLength(&uncompressed_len)) return false;
- return InternalUncompressAllTags(&decompressor, writer, uncompressed_len);
+
+ return InternalUncompressAllTags(&decompressor, writer, r->Available(),
+ uncompressed_len);
}
template <typename Writer>
static bool InternalUncompressAllTags(SnappyDecompressor* decompressor,
Writer* writer,
+ uint32 compressed_len,
uint32 uncompressed_len) {
+ Report("snappy_uncompress", compressed_len, uncompressed_len);
+
writer->SetExpectedLength(uncompressed_len);
// Process the entire input
@@ -809,6 +818,7 @@ bool GetUncompressedLength(Source* source, uint32* result) {
size_t Compress(Source* reader, Sink* writer) {
size_t written = 0;
size_t N = reader->Available();
+ const size_t uncompressed_size = N;
char ulength[Varint::kMax32];
char* p = Varint::Encode32(ulength, N);
writer->Append(ulength, p-ulength);
@@ -881,6 +891,8 @@ size_t Compress(Source* reader, Sink* writer) {
reader->Skip(pending_advance);
}
+ Report("snappy_compress", written, uncompressed_size);
+
delete[] scratch;
delete[] scratch_output;
@@ -1446,18 +1458,20 @@ bool Uncompress(Source* compressed, Sink* uncompressed) {
char* buf = uncompressed->GetAppendBufferVariable(
1, uncompressed_len, &c, 1, &allocated_size);
+ const size_t compressed_len = compressed->Available();
// If we can get a flat buffer, then use it, otherwise do block by block
// uncompression
if (allocated_size >= uncompressed_len) {
SnappyArrayWriter writer(buf);
- bool result = InternalUncompressAllTags(
- &decompressor, &writer, uncompressed_len);
+ bool result = InternalUncompressAllTags(&decompressor, &writer,
+ compressed_len, uncompressed_len);
uncompressed->Append(buf, writer.Produced());
return result;
} else {
SnappySinkAllocator allocator(uncompressed);
SnappyScatteredWriter<SnappySinkAllocator> writer(allocator);
- return InternalUncompressAllTags(&decompressor, &writer, uncompressed_len);
+ return InternalUncompressAllTags(&decompressor, &writer, compressed_len,
+ uncompressed_len);
}
}