ringbuffer.h 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157
  1. /* Copyright 2013 Google Inc. All Rights Reserved.
  2. Distributed under MIT license.
  3. See file LICENSE for detail or copy at https://opensource.org/licenses/MIT
  4. */
  5. /* Sliding window over the input data. */
  6. #ifndef BROTLI_ENC_RINGBUFFER_H_
  7. #define BROTLI_ENC_RINGBUFFER_H_
  8. #include <string.h> /* memcpy */
  9. #include "../common/types.h"
  10. #include "./memory.h"
  11. #include "./port.h"
  12. #if defined(__cplusplus) || defined(c_plusplus)
  13. extern "C" {
  14. #endif
  15. /* A RingBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of
  16. data in a circular manner: writing a byte writes it to:
  17. `position() % (1 << window_bits)'.
  18. For convenience, the RingBuffer array contains another copy of the
  19. first `1 << tail_bits' bytes:
  20. buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),
  21. and another copy of the last two bytes:
  22. buffer_[-1] == buffer_[(1 << window_bits) - 1] and
  23. buffer_[-2] == buffer_[(1 << window_bits) - 2]. */
  24. typedef struct RingBuffer {
  25. /* Size of the ringbuffer is (1 << window_bits) + tail_size_. */
  26. const uint32_t size_;
  27. const uint32_t mask_;
  28. const uint32_t tail_size_;
  29. const uint32_t total_size_;
  30. uint32_t cur_size_;
  31. /* Position to write in the ring buffer. */
  32. uint32_t pos_;
  33. /* The actual ring buffer containing the copy of the last two bytes, the data,
  34. and the copy of the beginning as a tail. */
  35. uint8_t *data_;
  36. /* The start of the ringbuffer. */
  37. uint8_t *buffer_;
  38. } RingBuffer;
  39. static BROTLI_INLINE void RingBufferInit(RingBuffer* rb) {
  40. rb->cur_size_ = 0;
  41. rb->pos_ = 0;
  42. rb->data_ = 0;
  43. rb->buffer_ = 0;
  44. }
  45. static BROTLI_INLINE void RingBufferSetup(
  46. int window_bits, int tail_bits, RingBuffer* rb) {
  47. *(uint32_t*)&rb->size_ = 1u << window_bits;
  48. *(uint32_t*)&rb->mask_ = (1u << window_bits) - 1;
  49. *(uint32_t*)&rb->tail_size_ = 1u << tail_bits;
  50. *(uint32_t*)&rb->total_size_ = rb->size_ + rb->tail_size_;
  51. }
  52. static BROTLI_INLINE void RingBufferFree(MemoryManager* m, RingBuffer* rb) {
  53. BROTLI_FREE(m, rb->data_);
  54. }
  55. /* Allocates or re-allocates data_ to the given length + plus some slack
  56. region before and after. Fills the slack regions with zeros. */
  57. static BROTLI_INLINE void RingBufferInitBuffer(
  58. MemoryManager* m, const uint32_t buflen, RingBuffer* rb) {
  59. static const size_t kSlackForEightByteHashingEverywhere = 7;
  60. uint8_t* new_data = BROTLI_ALLOC(
  61. m, uint8_t, 2 + buflen + kSlackForEightByteHashingEverywhere);
  62. size_t i;
  63. if (BROTLI_IS_OOM(m)) return;
  64. if (rb->data_) {
  65. memcpy(new_data, rb->data_,
  66. 2 + rb->cur_size_ + kSlackForEightByteHashingEverywhere);
  67. BROTLI_FREE(m, rb->data_);
  68. }
  69. rb->data_ = new_data;
  70. rb->cur_size_ = buflen;
  71. rb->buffer_ = rb->data_ + 2;
  72. rb->buffer_[-2] = rb->buffer_[-1] = 0;
  73. for (i = 0; i < kSlackForEightByteHashingEverywhere; ++i) {
  74. rb->buffer_[rb->cur_size_ + i] = 0;
  75. }
  76. }
  77. static BROTLI_INLINE void RingBufferWriteTail(
  78. const uint8_t *bytes, size_t n, RingBuffer* rb) {
  79. const size_t masked_pos = rb->pos_ & rb->mask_;
  80. if (PREDICT_FALSE(masked_pos < rb->tail_size_)) {
  81. /* Just fill the tail buffer with the beginning data. */
  82. const size_t p = rb->size_ + masked_pos;
  83. memcpy(&rb->buffer_[p], bytes,
  84. BROTLI_MIN(size_t, n, rb->tail_size_ - masked_pos));
  85. }
  86. }
  87. /* Push bytes into the ring buffer. */
  88. static BROTLI_INLINE void RingBufferWrite(
  89. MemoryManager* m, const uint8_t *bytes, size_t n, RingBuffer* rb) {
  90. if (rb->pos_ == 0 && n < rb->tail_size_) {
  91. /* Special case for the first write: to process the first block, we don't
  92. need to allocate the whole ringbuffer and we don't need the tail
  93. either. However, we do this memory usage optimization only if the
  94. first write is less than the tail size, which is also the input block
  95. size, otherwise it is likely that other blocks will follow and we
  96. will need to reallocate to the full size anyway. */
  97. rb->pos_ = (uint32_t)n;
  98. RingBufferInitBuffer(m, rb->pos_, rb);
  99. if (BROTLI_IS_OOM(m)) return;
  100. memcpy(rb->buffer_, bytes, n);
  101. return;
  102. }
  103. if (rb->cur_size_ < rb->total_size_) {
  104. /* Lazily allocate the full buffer. */
  105. RingBufferInitBuffer(m, rb->total_size_, rb);
  106. if (BROTLI_IS_OOM(m)) return;
  107. /* Initialize the last two bytes to zero, so that we don't have to worry
  108. later when we copy the last two bytes to the first two positions. */
  109. rb->buffer_[rb->size_ - 2] = 0;
  110. rb->buffer_[rb->size_ - 1] = 0;
  111. }
  112. {
  113. const size_t masked_pos = rb->pos_ & rb->mask_;
  114. /* The length of the writes is limited so that we do not need to worry
  115. about a write */
  116. RingBufferWriteTail(bytes, n, rb);
  117. if (PREDICT_TRUE(masked_pos + n <= rb->size_)) {
  118. /* A single write fits. */
  119. memcpy(&rb->buffer_[masked_pos], bytes, n);
  120. } else {
  121. /* Split into two writes.
  122. Copy into the end of the buffer, including the tail buffer. */
  123. memcpy(&rb->buffer_[masked_pos], bytes,
  124. BROTLI_MIN(size_t, n, rb->total_size_ - masked_pos));
  125. /* Copy into the beginning of the buffer */
  126. memcpy(&rb->buffer_[0], bytes + (rb->size_ - masked_pos),
  127. n - (rb->size_ - masked_pos));
  128. }
  129. }
  130. rb->buffer_[-2] = rb->buffer_[rb->size_ - 2];
  131. rb->buffer_[-1] = rb->buffer_[rb->size_ - 1];
  132. rb->pos_ += (uint32_t)n;
  133. if (rb->pos_ > (1u << 30)) {
  134. /* Wrap, but preserve not-a-first-lap feature. */
  135. rb->pos_ = (rb->pos_ & ((1u << 30) - 1)) | (1u << 30);
  136. }
  137. }
  138. #if defined(__cplusplus) || defined(c_plusplus)
  139. } /* extern "C" */
  140. #endif
  141. #endif /* BROTLI_ENC_RINGBUFFER_H_ */