+++ /dev/null
-/* Copyright 2013 Google Inc. All Rights Reserved.\r
-\r
- Distributed under MIT license.\r
- See file LICENSE for detail or copy at https://opensource.org/licenses/MIT\r
-*/\r
-\r
-/* Sliding window over the input data. */\r
-\r
-#ifndef BROTLI_ENC_RINGBUFFER_H_\r
-#define BROTLI_ENC_RINGBUFFER_H_\r
-\r
-#include <string.h> /* memcpy */\r
-\r
-#include "../common/platform.h"\r
-#include <brotli/types.h>\r
-#include "./memory.h"\r
-#include "./quality.h"\r
-\r
-#if defined(__cplusplus) || defined(c_plusplus)\r
-extern "C" {\r
-#endif\r
-\r
-/* A RingBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of\r
- data in a circular manner: writing a byte writes it to:\r
- `position() % (1 << window_bits)'.\r
- For convenience, the RingBuffer array contains another copy of the\r
- first `1 << tail_bits' bytes:\r
- buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),\r
- and another copy of the last two bytes:\r
- buffer_[-1] == buffer_[(1 << window_bits) - 1] and\r
- buffer_[-2] == buffer_[(1 << window_bits) - 2]. */\r
-typedef struct RingBuffer {\r
- /* Size of the ring-buffer is (1 << window_bits) + tail_size_. */\r
- const uint32_t size_;\r
- const uint32_t mask_;\r
- const uint32_t tail_size_;\r
- const uint32_t total_size_;\r
-\r
- uint32_t cur_size_;\r
- /* Position to write in the ring buffer. */\r
- uint32_t pos_;\r
- /* The actual ring buffer containing the copy of the last two bytes, the data,\r
- and the copy of the beginning as a tail. */\r
- uint8_t* data_;\r
- /* The start of the ring-buffer. */\r
- uint8_t* buffer_;\r
-} RingBuffer;\r
-\r
-static BROTLI_INLINE void RingBufferInit(RingBuffer* rb) {\r
- rb->cur_size_ = 0;\r
- rb->pos_ = 0;\r
- rb->data_ = 0;\r
- rb->buffer_ = 0;\r
-}\r
-\r
-static BROTLI_INLINE void RingBufferSetup(\r
- const BrotliEncoderParams* params, RingBuffer* rb) {\r
- int window_bits = ComputeRbBits(params);\r
- int tail_bits = params->lgblock;\r
- *(uint32_t*)&rb->size_ = 1u << window_bits;\r
- *(uint32_t*)&rb->mask_ = (1u << window_bits) - 1;\r
- *(uint32_t*)&rb->tail_size_ = 1u << tail_bits;\r
- *(uint32_t*)&rb->total_size_ = rb->size_ + rb->tail_size_;\r
-}\r
-\r
-static BROTLI_INLINE void RingBufferFree(MemoryManager* m, RingBuffer* rb) {\r
- BROTLI_FREE(m, rb->data_);\r
-}\r
-\r
-/* Allocates or re-allocates data_ to the given length + plus some slack\r
- region before and after. Fills the slack regions with zeros. */\r
-static BROTLI_INLINE void RingBufferInitBuffer(\r
- MemoryManager* m, const uint32_t buflen, RingBuffer* rb) {\r
- static const size_t kSlackForEightByteHashingEverywhere = 7;\r
- uint8_t* new_data = BROTLI_ALLOC(\r
- m, uint8_t, 2 + buflen + kSlackForEightByteHashingEverywhere);\r
- size_t i;\r
- if (BROTLI_IS_OOM(m)) return;\r
- if (rb->data_) {\r
- memcpy(new_data, rb->data_,\r
- 2 + rb->cur_size_ + kSlackForEightByteHashingEverywhere);\r
- BROTLI_FREE(m, rb->data_);\r
- }\r
- rb->data_ = new_data;\r
- rb->cur_size_ = buflen;\r
- rb->buffer_ = rb->data_ + 2;\r
- rb->buffer_[-2] = rb->buffer_[-1] = 0;\r
- for (i = 0; i < kSlackForEightByteHashingEverywhere; ++i) {\r
- rb->buffer_[rb->cur_size_ + i] = 0;\r
- }\r
-}\r
-\r
-static BROTLI_INLINE void RingBufferWriteTail(\r
- const uint8_t* bytes, size_t n, RingBuffer* rb) {\r
- const size_t masked_pos = rb->pos_ & rb->mask_;\r
- if (BROTLI_PREDICT_FALSE(masked_pos < rb->tail_size_)) {\r
- /* Just fill the tail buffer with the beginning data. */\r
- const size_t p = rb->size_ + masked_pos;\r
- memcpy(&rb->buffer_[p], bytes,\r
- BROTLI_MIN(size_t, n, rb->tail_size_ - masked_pos));\r
- }\r
-}\r
-\r
-/* Push bytes into the ring buffer. */\r
-static BROTLI_INLINE void RingBufferWrite(\r
- MemoryManager* m, const uint8_t* bytes, size_t n, RingBuffer* rb) {\r
- if (rb->pos_ == 0 && n < rb->tail_size_) {\r
- /* Special case for the first write: to process the first block, we don't\r
- need to allocate the whole ring-buffer and we don't need the tail\r
- either. However, we do this memory usage optimization only if the\r
- first write is less than the tail size, which is also the input block\r
- size, otherwise it is likely that other blocks will follow and we\r
- will need to reallocate to the full size anyway. */\r
- rb->pos_ = (uint32_t)n;\r
- RingBufferInitBuffer(m, rb->pos_, rb);\r
- if (BROTLI_IS_OOM(m)) return;\r
- memcpy(rb->buffer_, bytes, n);\r
- return;\r
- }\r
- if (rb->cur_size_ < rb->total_size_) {\r
- /* Lazily allocate the full buffer. */\r
- RingBufferInitBuffer(m, rb->total_size_, rb);\r
- if (BROTLI_IS_OOM(m)) return;\r
- /* Initialize the last two bytes to zero, so that we don't have to worry\r
- later when we copy the last two bytes to the first two positions. */\r
- rb->buffer_[rb->size_ - 2] = 0;\r
- rb->buffer_[rb->size_ - 1] = 0;\r
- }\r
- {\r
- const size_t masked_pos = rb->pos_ & rb->mask_;\r
- /* The length of the writes is limited so that we do not need to worry\r
- about a write */\r
- RingBufferWriteTail(bytes, n, rb);\r
- if (BROTLI_PREDICT_TRUE(masked_pos + n <= rb->size_)) {\r
- /* A single write fits. */\r
- memcpy(&rb->buffer_[masked_pos], bytes, n);\r
- } else {\r
- /* Split into two writes.\r
- Copy into the end of the buffer, including the tail buffer. */\r
- memcpy(&rb->buffer_[masked_pos], bytes,\r
- BROTLI_MIN(size_t, n, rb->total_size_ - masked_pos));\r
- /* Copy into the beginning of the buffer */\r
- memcpy(&rb->buffer_[0], bytes + (rb->size_ - masked_pos),\r
- n - (rb->size_ - masked_pos));\r
- }\r
- }\r
- {\r
- BROTLI_BOOL not_first_lap = (rb->pos_ & (1u << 31)) != 0;\r
- uint32_t rb_pos_mask = (1u << 31) - 1;\r
- rb->buffer_[-2] = rb->buffer_[rb->size_ - 2];\r
- rb->buffer_[-1] = rb->buffer_[rb->size_ - 1];\r
- rb->pos_ = (rb->pos_ & rb_pos_mask) + (uint32_t)(n & rb_pos_mask);\r
- if (not_first_lap) {\r
- /* Wrap, but preserve not-a-first-lap feature. */\r
- rb->pos_ |= 1u << 31;\r
- }\r
- }\r
-}\r
-\r
-#if defined(__cplusplus) || defined(c_plusplus)\r
-} /* extern "C" */\r
-#endif\r
-\r
-#endif /* BROTLI_ENC_RINGBUFFER_H_ */\r