snappy-test.cc 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609
  1. // Copyright 2011 Google Inc. All Rights Reserved.
  2. //
  3. // Redistribution and use in source and binary forms, with or without
  4. // modification, are permitted provided that the following conditions are
  5. // met:
  6. //
  7. // * Redistributions of source code must retain the above copyright
  8. // notice, this list of conditions and the following disclaimer.
  9. // * Redistributions in binary form must reproduce the above
  10. // copyright notice, this list of conditions and the following disclaimer
  11. // in the documentation and/or other materials provided with the
  12. // distribution.
  13. // * Neither the name of Google Inc. nor the names of its
  14. // contributors may be used to endorse or promote products derived from
  15. // this software without specific prior written permission.
  16. //
  17. // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  18. // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  19. // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  20. // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  21. // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  22. // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  23. // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  24. // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  25. // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  26. // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  27. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  28. //
  29. // Various stubs for the unit tests for the open-source version of Snappy.
  30. #ifdef HAVE_CONFIG_H
  31. #include "config.h"
  32. #endif
  33. #ifdef HAVE_WINDOWS_H
  34. #include <windows.h>
  35. #endif
  36. #include "snappy-test.h"
  37. #include <algorithm>
  38. DEFINE_bool(run_microbenchmarks, true,
  39. "Run microbenchmarks before doing anything else.");
  40. namespace snappy {
  41. string ReadTestDataFile(const string& base, size_t size_limit) {
  42. string contents;
  43. const char* srcdir = getenv("srcdir"); // This is set by Automake.
  44. string prefix;
  45. if (srcdir) {
  46. prefix = string(srcdir) + "/";
  47. }
  48. file::GetContents(prefix + "testdata/" + base, &contents, file::Defaults()
  49. ).CheckSuccess();
  50. if (size_limit > 0) {
  51. contents = contents.substr(0, size_limit);
  52. }
  53. return contents;
  54. }
  55. string ReadTestDataFile(const string& base) {
  56. return ReadTestDataFile(base, 0);
  57. }
  58. string StringPrintf(const char* format, ...) {
  59. char buf[4096];
  60. va_list ap;
  61. va_start(ap, format);
  62. vsnprintf(buf, sizeof(buf), format, ap);
  63. va_end(ap);
  64. return buf;
  65. }
  66. bool benchmark_running = false;
  67. int64 benchmark_real_time_us = 0;
  68. int64 benchmark_cpu_time_us = 0;
  69. string *benchmark_label = NULL;
  70. int64 benchmark_bytes_processed = 0;
  71. void ResetBenchmarkTiming() {
  72. benchmark_real_time_us = 0;
  73. benchmark_cpu_time_us = 0;
  74. }
  75. #ifdef WIN32
  76. LARGE_INTEGER benchmark_start_real;
  77. FILETIME benchmark_start_cpu;
  78. #else // WIN32
  79. struct timeval benchmark_start_real;
  80. struct rusage benchmark_start_cpu;
  81. #endif // WIN32
  82. void StartBenchmarkTiming() {
  83. #ifdef WIN32
  84. QueryPerformanceCounter(&benchmark_start_real);
  85. FILETIME dummy;
  86. CHECK(GetProcessTimes(
  87. GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_start_cpu));
  88. #else
  89. gettimeofday(&benchmark_start_real, NULL);
  90. if (getrusage(RUSAGE_SELF, &benchmark_start_cpu) == -1) {
  91. perror("getrusage(RUSAGE_SELF)");
  92. exit(1);
  93. }
  94. #endif
  95. benchmark_running = true;
  96. }
  97. void StopBenchmarkTiming() {
  98. if (!benchmark_running) {
  99. return;
  100. }
  101. #ifdef WIN32
  102. LARGE_INTEGER benchmark_stop_real;
  103. LARGE_INTEGER benchmark_frequency;
  104. QueryPerformanceCounter(&benchmark_stop_real);
  105. QueryPerformanceFrequency(&benchmark_frequency);
  106. double elapsed_real = static_cast<double>(
  107. benchmark_stop_real.QuadPart - benchmark_start_real.QuadPart) /
  108. benchmark_frequency.QuadPart;
  109. benchmark_real_time_us += elapsed_real * 1e6 + 0.5;
  110. FILETIME benchmark_stop_cpu, dummy;
  111. CHECK(GetProcessTimes(
  112. GetCurrentProcess(), &dummy, &dummy, &dummy, &benchmark_stop_cpu));
  113. ULARGE_INTEGER start_ulargeint;
  114. start_ulargeint.LowPart = benchmark_start_cpu.dwLowDateTime;
  115. start_ulargeint.HighPart = benchmark_start_cpu.dwHighDateTime;
  116. ULARGE_INTEGER stop_ulargeint;
  117. stop_ulargeint.LowPart = benchmark_stop_cpu.dwLowDateTime;
  118. stop_ulargeint.HighPart = benchmark_stop_cpu.dwHighDateTime;
  119. benchmark_cpu_time_us +=
  120. (stop_ulargeint.QuadPart - start_ulargeint.QuadPart + 5) / 10;
  121. #else // WIN32
  122. struct timeval benchmark_stop_real;
  123. gettimeofday(&benchmark_stop_real, NULL);
  124. benchmark_real_time_us +=
  125. 1000000 * (benchmark_stop_real.tv_sec - benchmark_start_real.tv_sec);
  126. benchmark_real_time_us +=
  127. (benchmark_stop_real.tv_usec - benchmark_start_real.tv_usec);
  128. struct rusage benchmark_stop_cpu;
  129. if (getrusage(RUSAGE_SELF, &benchmark_stop_cpu) == -1) {
  130. perror("getrusage(RUSAGE_SELF)");
  131. exit(1);
  132. }
  133. benchmark_cpu_time_us += 1000000 * (benchmark_stop_cpu.ru_utime.tv_sec -
  134. benchmark_start_cpu.ru_utime.tv_sec);
  135. benchmark_cpu_time_us += (benchmark_stop_cpu.ru_utime.tv_usec -
  136. benchmark_start_cpu.ru_utime.tv_usec);
  137. #endif // WIN32
  138. benchmark_running = false;
  139. }
  140. void SetBenchmarkLabel(const string& str) {
  141. if (benchmark_label) {
  142. delete benchmark_label;
  143. }
  144. benchmark_label = new string(str);
  145. }
  146. void SetBenchmarkBytesProcessed(int64 bytes) {
  147. benchmark_bytes_processed = bytes;
  148. }
  149. struct BenchmarkRun {
  150. int64 real_time_us;
  151. int64 cpu_time_us;
  152. };
  153. struct BenchmarkCompareCPUTime {
  154. bool operator() (const BenchmarkRun& a, const BenchmarkRun& b) const {
  155. return a.cpu_time_us < b.cpu_time_us;
  156. }
  157. };
  158. void Benchmark::Run() {
  159. for (int test_case_num = start_; test_case_num <= stop_; ++test_case_num) {
  160. // Run a few iterations first to find out approximately how fast
  161. // the benchmark is.
  162. const int kCalibrateIterations = 100;
  163. ResetBenchmarkTiming();
  164. StartBenchmarkTiming();
  165. (*function_)(kCalibrateIterations, test_case_num);
  166. StopBenchmarkTiming();
  167. // Let each test case run for about 200ms, but at least as many
  168. // as we used to calibrate.
  169. // Run five times and pick the median.
  170. const int kNumRuns = 5;
  171. const int kMedianPos = kNumRuns / 2;
  172. int num_iterations = 0;
  173. if (benchmark_real_time_us > 0) {
  174. num_iterations = 200000 * kCalibrateIterations / benchmark_real_time_us;
  175. }
  176. num_iterations = std::max(num_iterations, kCalibrateIterations);
  177. BenchmarkRun benchmark_runs[kNumRuns];
  178. for (int run = 0; run < kNumRuns; ++run) {
  179. ResetBenchmarkTiming();
  180. StartBenchmarkTiming();
  181. (*function_)(num_iterations, test_case_num);
  182. StopBenchmarkTiming();
  183. benchmark_runs[run].real_time_us = benchmark_real_time_us;
  184. benchmark_runs[run].cpu_time_us = benchmark_cpu_time_us;
  185. }
  186. string heading = StringPrintf("%s/%d", name_.c_str(), test_case_num);
  187. string human_readable_speed;
  188. std::nth_element(benchmark_runs,
  189. benchmark_runs + kMedianPos,
  190. benchmark_runs + kNumRuns,
  191. BenchmarkCompareCPUTime());
  192. int64 real_time_us = benchmark_runs[kMedianPos].real_time_us;
  193. int64 cpu_time_us = benchmark_runs[kMedianPos].cpu_time_us;
  194. if (cpu_time_us <= 0) {
  195. human_readable_speed = "?";
  196. } else {
  197. int64 bytes_per_second =
  198. benchmark_bytes_processed * 1000000 / cpu_time_us;
  199. if (bytes_per_second < 1024) {
  200. human_readable_speed = StringPrintf("%dB/s", bytes_per_second);
  201. } else if (bytes_per_second < 1024 * 1024) {
  202. human_readable_speed = StringPrintf(
  203. "%.1fkB/s", bytes_per_second / 1024.0f);
  204. } else if (bytes_per_second < 1024 * 1024 * 1024) {
  205. human_readable_speed = StringPrintf(
  206. "%.1fMB/s", bytes_per_second / (1024.0f * 1024.0f));
  207. } else {
  208. human_readable_speed = StringPrintf(
  209. "%.1fGB/s", bytes_per_second / (1024.0f * 1024.0f * 1024.0f));
  210. }
  211. }
  212. fprintf(stderr,
  213. #ifdef WIN32
  214. "%-18s %10I64d %10I64d %10d %s %s\n",
  215. #else
  216. "%-18s %10lld %10lld %10d %s %s\n",
  217. #endif
  218. heading.c_str(),
  219. static_cast<long long>(real_time_us * 1000 / num_iterations),
  220. static_cast<long long>(cpu_time_us * 1000 / num_iterations),
  221. num_iterations,
  222. human_readable_speed.c_str(),
  223. benchmark_label->c_str());
  224. }
  225. }
  226. #ifdef HAVE_LIBZ
  227. ZLib::ZLib()
  228. : comp_init_(false),
  229. uncomp_init_(false) {
  230. Reinit();
  231. }
  232. ZLib::~ZLib() {
  233. if (comp_init_) { deflateEnd(&comp_stream_); }
  234. if (uncomp_init_) { inflateEnd(&uncomp_stream_); }
  235. }
  236. void ZLib::Reinit() {
  237. compression_level_ = Z_DEFAULT_COMPRESSION;
  238. window_bits_ = MAX_WBITS;
  239. mem_level_ = 8; // DEF_MEM_LEVEL
  240. if (comp_init_) {
  241. deflateEnd(&comp_stream_);
  242. comp_init_ = false;
  243. }
  244. if (uncomp_init_) {
  245. inflateEnd(&uncomp_stream_);
  246. uncomp_init_ = false;
  247. }
  248. first_chunk_ = true;
  249. }
  250. void ZLib::Reset() {
  251. first_chunk_ = true;
  252. }
  253. // --------- COMPRESS MODE
  254. // Initialization method to be called if we hit an error while
  255. // compressing. On hitting an error, call this method before returning
  256. // the error.
  257. void ZLib::CompressErrorInit() {
  258. deflateEnd(&comp_stream_);
  259. comp_init_ = false;
  260. Reset();
  261. }
  262. int ZLib::DeflateInit() {
  263. return deflateInit2(&comp_stream_,
  264. compression_level_,
  265. Z_DEFLATED,
  266. window_bits_,
  267. mem_level_,
  268. Z_DEFAULT_STRATEGY);
  269. }
  270. int ZLib::CompressInit(Bytef *dest, uLongf *destLen,
  271. const Bytef *source, uLong *sourceLen) {
  272. int err;
  273. comp_stream_.next_in = (Bytef*)source;
  274. comp_stream_.avail_in = (uInt)*sourceLen;
  275. if ((uLong)comp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
  276. comp_stream_.next_out = dest;
  277. comp_stream_.avail_out = (uInt)*destLen;
  278. if ((uLong)comp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
  279. if ( !first_chunk_ ) // only need to set up stream the first time through
  280. return Z_OK;
  281. if (comp_init_) { // we've already initted it
  282. err = deflateReset(&comp_stream_);
  283. if (err != Z_OK) {
  284. LOG(WARNING) << "ERROR: Can't reset compress object; creating a new one";
  285. deflateEnd(&comp_stream_);
  286. comp_init_ = false;
  287. }
  288. }
  289. if (!comp_init_) { // first use
  290. comp_stream_.zalloc = (alloc_func)0;
  291. comp_stream_.zfree = (free_func)0;
  292. comp_stream_.opaque = (voidpf)0;
  293. err = DeflateInit();
  294. if (err != Z_OK) return err;
  295. comp_init_ = true;
  296. }
  297. return Z_OK;
  298. }
  299. // In a perfect world we'd always have the full buffer to compress
  300. // when the time came, and we could just call Compress(). Alas, we
  301. // want to do chunked compression on our webserver. In this
  302. // application, we compress the header, send it off, then compress the
  303. // results, send them off, then compress the footer. Thus we need to
  304. // use the chunked compression features of zlib.
  305. int ZLib::CompressAtMostOrAll(Bytef *dest, uLongf *destLen,
  306. const Bytef *source, uLong *sourceLen,
  307. int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
  308. int err;
  309. if ( (err=CompressInit(dest, destLen, source, sourceLen)) != Z_OK )
  310. return err;
  311. // This is used to figure out how many bytes we wrote *this chunk*
  312. int compressed_size = comp_stream_.total_out;
  313. // Some setup happens only for the first chunk we compress in a run
  314. if ( first_chunk_ ) {
  315. first_chunk_ = false;
  316. }
  317. // flush_mode is Z_FINISH for all mode, Z_SYNC_FLUSH for incremental
  318. // compression.
  319. err = deflate(&comp_stream_, flush_mode);
  320. *sourceLen = comp_stream_.avail_in;
  321. if ((err == Z_STREAM_END || err == Z_OK)
  322. && comp_stream_.avail_in == 0
  323. && comp_stream_.avail_out != 0 ) {
  324. // we processed everything ok and the output buffer was large enough.
  325. ;
  326. } else if (err == Z_STREAM_END && comp_stream_.avail_in > 0) {
  327. return Z_BUF_ERROR; // should never happen
  328. } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
  329. // an error happened
  330. CompressErrorInit();
  331. return err;
  332. } else if (comp_stream_.avail_out == 0) { // not enough space
  333. err = Z_BUF_ERROR;
  334. }
  335. assert(err == Z_OK || err == Z_STREAM_END || err == Z_BUF_ERROR);
  336. if (err == Z_STREAM_END)
  337. err = Z_OK;
  338. // update the crc and other metadata
  339. compressed_size = comp_stream_.total_out - compressed_size; // delta
  340. *destLen = compressed_size;
  341. return err;
  342. }
  343. int ZLib::CompressChunkOrAll(Bytef *dest, uLongf *destLen,
  344. const Bytef *source, uLong sourceLen,
  345. int flush_mode) { // Z_FULL_FLUSH or Z_FINISH
  346. const int ret =
  347. CompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
  348. if (ret == Z_BUF_ERROR)
  349. CompressErrorInit();
  350. return ret;
  351. }
  352. // This routine only initializes the compression stream once. Thereafter, it
  353. // just does a deflateReset on the stream, which should be faster.
  354. int ZLib::Compress(Bytef *dest, uLongf *destLen,
  355. const Bytef *source, uLong sourceLen) {
  356. int err;
  357. if ( (err=CompressChunkOrAll(dest, destLen, source, sourceLen,
  358. Z_FINISH)) != Z_OK )
  359. return err;
  360. Reset(); // reset for next call to Compress
  361. return Z_OK;
  362. }
  363. // --------- UNCOMPRESS MODE
  364. int ZLib::InflateInit() {
  365. return inflateInit2(&uncomp_stream_, MAX_WBITS);
  366. }
  367. // Initialization method to be called if we hit an error while
  368. // uncompressing. On hitting an error, call this method before
  369. // returning the error.
  370. void ZLib::UncompressErrorInit() {
  371. inflateEnd(&uncomp_stream_);
  372. uncomp_init_ = false;
  373. Reset();
  374. }
  375. int ZLib::UncompressInit(Bytef *dest, uLongf *destLen,
  376. const Bytef *source, uLong *sourceLen) {
  377. int err;
  378. uncomp_stream_.next_in = (Bytef*)source;
  379. uncomp_stream_.avail_in = (uInt)*sourceLen;
  380. // Check for source > 64K on 16-bit machine:
  381. if ((uLong)uncomp_stream_.avail_in != *sourceLen) return Z_BUF_ERROR;
  382. uncomp_stream_.next_out = dest;
  383. uncomp_stream_.avail_out = (uInt)*destLen;
  384. if ((uLong)uncomp_stream_.avail_out != *destLen) return Z_BUF_ERROR;
  385. if ( !first_chunk_ ) // only need to set up stream the first time through
  386. return Z_OK;
  387. if (uncomp_init_) { // we've already initted it
  388. err = inflateReset(&uncomp_stream_);
  389. if (err != Z_OK) {
  390. LOG(WARNING)
  391. << "ERROR: Can't reset uncompress object; creating a new one";
  392. UncompressErrorInit();
  393. }
  394. }
  395. if (!uncomp_init_) {
  396. uncomp_stream_.zalloc = (alloc_func)0;
  397. uncomp_stream_.zfree = (free_func)0;
  398. uncomp_stream_.opaque = (voidpf)0;
  399. err = InflateInit();
  400. if (err != Z_OK) return err;
  401. uncomp_init_ = true;
  402. }
  403. return Z_OK;
  404. }
  405. // If you compressed your data a chunk at a time, with CompressChunk,
  406. // you can uncompress it a chunk at a time with UncompressChunk.
  407. // Only difference bewteen chunked and unchunked uncompression
  408. // is the flush mode we use: Z_SYNC_FLUSH (chunked) or Z_FINISH (unchunked).
  409. int ZLib::UncompressAtMostOrAll(Bytef *dest, uLongf *destLen,
  410. const Bytef *source, uLong *sourceLen,
  411. int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
  412. int err = Z_OK;
  413. if ( (err=UncompressInit(dest, destLen, source, sourceLen)) != Z_OK ) {
  414. LOG(WARNING) << "UncompressInit: Error: " << err << " SourceLen: "
  415. << *sourceLen;
  416. return err;
  417. }
  418. // This is used to figure out how many output bytes we wrote *this chunk*:
  419. const uLong old_total_out = uncomp_stream_.total_out;
  420. // This is used to figure out how many input bytes we read *this chunk*:
  421. const uLong old_total_in = uncomp_stream_.total_in;
  422. // Some setup happens only for the first chunk we compress in a run
  423. if ( first_chunk_ ) {
  424. first_chunk_ = false; // so we don't do this again
  425. // For the first chunk *only* (to avoid infinite troubles), we let
  426. // there be no actual data to uncompress. This sometimes triggers
  427. // when the input is only the gzip header, say.
  428. if ( *sourceLen == 0 ) {
  429. *destLen = 0;
  430. return Z_OK;
  431. }
  432. }
  433. // We'll uncompress as much as we can. If we end OK great, otherwise
  434. // if we get an error that seems to be the gzip footer, we store the
  435. // gzip footer and return OK, otherwise we return the error.
  436. // flush_mode is Z_SYNC_FLUSH for chunked mode, Z_FINISH for all mode.
  437. err = inflate(&uncomp_stream_, flush_mode);
  438. // Figure out how many bytes of the input zlib slurped up:
  439. const uLong bytes_read = uncomp_stream_.total_in - old_total_in;
  440. CHECK_LE(source + bytes_read, source + *sourceLen);
  441. *sourceLen = uncomp_stream_.avail_in;
  442. if ((err == Z_STREAM_END || err == Z_OK) // everything went ok
  443. && uncomp_stream_.avail_in == 0) { // and we read it all
  444. ;
  445. } else if (err == Z_STREAM_END && uncomp_stream_.avail_in > 0) {
  446. LOG(WARNING)
  447. << "UncompressChunkOrAll: Received some extra data, bytes total: "
  448. << uncomp_stream_.avail_in << " bytes: "
  449. << std::string(reinterpret_cast<const char *>(uncomp_stream_.next_in),
  450. std::min(int(uncomp_stream_.avail_in), 20));
  451. UncompressErrorInit();
  452. return Z_DATA_ERROR; // what's the extra data for?
  453. } else if (err != Z_OK && err != Z_STREAM_END && err != Z_BUF_ERROR) {
  454. // an error happened
  455. LOG(WARNING) << "UncompressChunkOrAll: Error: " << err
  456. << " avail_out: " << uncomp_stream_.avail_out;
  457. UncompressErrorInit();
  458. return err;
  459. } else if (uncomp_stream_.avail_out == 0) {
  460. err = Z_BUF_ERROR;
  461. }
  462. assert(err == Z_OK || err == Z_BUF_ERROR || err == Z_STREAM_END);
  463. if (err == Z_STREAM_END)
  464. err = Z_OK;
  465. *destLen = uncomp_stream_.total_out - old_total_out; // size for this call
  466. return err;
  467. }
  468. int ZLib::UncompressChunkOrAll(Bytef *dest, uLongf *destLen,
  469. const Bytef *source, uLong sourceLen,
  470. int flush_mode) { // Z_SYNC_FLUSH or Z_FINISH
  471. const int ret =
  472. UncompressAtMostOrAll(dest, destLen, source, &sourceLen, flush_mode);
  473. if (ret == Z_BUF_ERROR)
  474. UncompressErrorInit();
  475. return ret;
  476. }
  477. int ZLib::UncompressAtMost(Bytef *dest, uLongf *destLen,
  478. const Bytef *source, uLong *sourceLen) {
  479. return UncompressAtMostOrAll(dest, destLen, source, sourceLen, Z_SYNC_FLUSH);
  480. }
  481. // We make sure we've uncompressed everything, that is, the current
  482. // uncompress stream is at a compressed-buffer-EOF boundary. In gzip
  483. // mode, we also check the gzip footer to make sure we pass the gzip
  484. // consistency checks. We RETURN true iff both types of checks pass.
  485. bool ZLib::UncompressChunkDone() {
  486. assert(!first_chunk_ && uncomp_init_);
  487. // Make sure we're at the end-of-compressed-data point. This means
  488. // if we call inflate with Z_FINISH we won't consume any input or
  489. // write any output
  490. Bytef dummyin, dummyout;
  491. uLongf dummylen = 0;
  492. if ( UncompressChunkOrAll(&dummyout, &dummylen, &dummyin, 0, Z_FINISH)
  493. != Z_OK ) {
  494. return false;
  495. }
  496. // Make sure that when we exit, we can start a new round of chunks later
  497. Reset();
  498. return true;
  499. }
  500. // Uncompresses the source buffer into the destination buffer.
  501. // The destination buffer must be long enough to hold the entire
  502. // decompressed contents.
  503. //
  504. // We only initialize the uncomp_stream once. Thereafter, we use
  505. // inflateReset, which should be faster.
  506. //
  507. // Returns Z_OK on success, otherwise, it returns a zlib error code.
  508. int ZLib::Uncompress(Bytef *dest, uLongf *destLen,
  509. const Bytef *source, uLong sourceLen) {
  510. int err;
  511. if ( (err=UncompressChunkOrAll(dest, destLen, source, sourceLen,
  512. Z_FINISH)) != Z_OK ) {
  513. Reset(); // let us try to compress again
  514. return err;
  515. }
  516. if ( !UncompressChunkDone() ) // calls Reset()
  517. return Z_DATA_ERROR;
  518. return Z_OK; // stream_end is ok
  519. }
  520. #endif // HAVE_LIBZ
  521. } // namespace snappy