]>
Commit | Line | Data |
---|---|---|
11b7501a SB |
1 | /* Copyright 2013 Google Inc. All Rights Reserved.\r |
2 | \r | |
3 | Distributed under MIT license.\r | |
4 | See file LICENSE for detail or copy at https://opensource.org/licenses/MIT\r | |
5 | */\r | |
6 | \r | |
7 | /* Sliding window over the input data. */\r | |
8 | \r | |
9 | #ifndef BROTLI_ENC_RINGBUFFER_H_\r | |
10 | #define BROTLI_ENC_RINGBUFFER_H_\r | |
11 | \r | |
12 | #include <string.h> /* memcpy */\r | |
13 | \r | |
dd4f667e LG |
14 | #include "../common/platform.h"\r |
15 | #include <brotli/types.h>\r | |
11b7501a | 16 | #include "./memory.h"\r |
11b7501a SB |
17 | #include "./quality.h"\r |
18 | \r | |
19 | #if defined(__cplusplus) || defined(c_plusplus)\r | |
20 | extern "C" {\r | |
21 | #endif\r | |
22 | \r | |
23 | /* A RingBuffer(window_bits, tail_bits) contains `1 << window_bits' bytes of\r | |
24 | data in a circular manner: writing a byte writes it to:\r | |
25 | `position() % (1 << window_bits)'.\r | |
26 | For convenience, the RingBuffer array contains another copy of the\r | |
27 | first `1 << tail_bits' bytes:\r | |
28 | buffer_[i] == buffer_[i + (1 << window_bits)], if i < (1 << tail_bits),\r | |
29 | and another copy of the last two bytes:\r | |
30 | buffer_[-1] == buffer_[(1 << window_bits) - 1] and\r | |
31 | buffer_[-2] == buffer_[(1 << window_bits) - 2]. */\r | |
32 | typedef struct RingBuffer {\r | |
dd4f667e | 33 | /* Size of the ring-buffer is (1 << window_bits) + tail_size_. */\r |
11b7501a SB |
34 | const uint32_t size_;\r |
35 | const uint32_t mask_;\r | |
36 | const uint32_t tail_size_;\r | |
37 | const uint32_t total_size_;\r | |
38 | \r | |
39 | uint32_t cur_size_;\r | |
40 | /* Position to write in the ring buffer. */\r | |
41 | uint32_t pos_;\r | |
42 | /* The actual ring buffer containing the copy of the last two bytes, the data,\r | |
43 | and the copy of the beginning as a tail. */\r | |
dd4f667e LG |
44 | uint8_t* data_;\r |
45 | /* The start of the ring-buffer. */\r | |
46 | uint8_t* buffer_;\r | |
11b7501a SB |
47 | } RingBuffer;\r |
48 | \r | |
49 | static BROTLI_INLINE void RingBufferInit(RingBuffer* rb) {\r | |
50 | rb->cur_size_ = 0;\r | |
51 | rb->pos_ = 0;\r | |
52 | rb->data_ = 0;\r | |
53 | rb->buffer_ = 0;\r | |
54 | }\r | |
55 | \r | |
56 | static BROTLI_INLINE void RingBufferSetup(\r | |
57 | const BrotliEncoderParams* params, RingBuffer* rb) {\r | |
58 | int window_bits = ComputeRbBits(params);\r | |
59 | int tail_bits = params->lgblock;\r | |
60 | *(uint32_t*)&rb->size_ = 1u << window_bits;\r | |
61 | *(uint32_t*)&rb->mask_ = (1u << window_bits) - 1;\r | |
62 | *(uint32_t*)&rb->tail_size_ = 1u << tail_bits;\r | |
63 | *(uint32_t*)&rb->total_size_ = rb->size_ + rb->tail_size_;\r | |
64 | }\r | |
65 | \r | |
66 | static BROTLI_INLINE void RingBufferFree(MemoryManager* m, RingBuffer* rb) {\r | |
67 | BROTLI_FREE(m, rb->data_);\r | |
68 | }\r | |
69 | \r | |
70 | /* Allocates or re-allocates data_ to the given length + plus some slack\r | |
71 | region before and after. Fills the slack regions with zeros. */\r | |
72 | static BROTLI_INLINE void RingBufferInitBuffer(\r | |
73 | MemoryManager* m, const uint32_t buflen, RingBuffer* rb) {\r | |
74 | static const size_t kSlackForEightByteHashingEverywhere = 7;\r | |
75 | uint8_t* new_data = BROTLI_ALLOC(\r | |
76 | m, uint8_t, 2 + buflen + kSlackForEightByteHashingEverywhere);\r | |
77 | size_t i;\r | |
78 | if (BROTLI_IS_OOM(m)) return;\r | |
79 | if (rb->data_) {\r | |
80 | memcpy(new_data, rb->data_,\r | |
81 | 2 + rb->cur_size_ + kSlackForEightByteHashingEverywhere);\r | |
82 | BROTLI_FREE(m, rb->data_);\r | |
83 | }\r | |
84 | rb->data_ = new_data;\r | |
85 | rb->cur_size_ = buflen;\r | |
86 | rb->buffer_ = rb->data_ + 2;\r | |
87 | rb->buffer_[-2] = rb->buffer_[-1] = 0;\r | |
88 | for (i = 0; i < kSlackForEightByteHashingEverywhere; ++i) {\r | |
89 | rb->buffer_[rb->cur_size_ + i] = 0;\r | |
90 | }\r | |
91 | }\r | |
92 | \r | |
93 | static BROTLI_INLINE void RingBufferWriteTail(\r | |
dd4f667e | 94 | const uint8_t* bytes, size_t n, RingBuffer* rb) {\r |
11b7501a | 95 | const size_t masked_pos = rb->pos_ & rb->mask_;\r |
dd4f667e | 96 | if (BROTLI_PREDICT_FALSE(masked_pos < rb->tail_size_)) {\r |
11b7501a SB |
97 | /* Just fill the tail buffer with the beginning data. */\r |
98 | const size_t p = rb->size_ + masked_pos;\r | |
99 | memcpy(&rb->buffer_[p], bytes,\r | |
100 | BROTLI_MIN(size_t, n, rb->tail_size_ - masked_pos));\r | |
101 | }\r | |
102 | }\r | |
103 | \r | |
104 | /* Push bytes into the ring buffer. */\r | |
105 | static BROTLI_INLINE void RingBufferWrite(\r | |
dd4f667e | 106 | MemoryManager* m, const uint8_t* bytes, size_t n, RingBuffer* rb) {\r |
11b7501a SB |
107 | if (rb->pos_ == 0 && n < rb->tail_size_) {\r |
108 | /* Special case for the first write: to process the first block, we don't\r | |
dd4f667e | 109 | need to allocate the whole ring-buffer and we don't need the tail\r |
11b7501a SB |
110 | either. However, we do this memory usage optimization only if the\r |
111 | first write is less than the tail size, which is also the input block\r | |
112 | size, otherwise it is likely that other blocks will follow and we\r | |
113 | will need to reallocate to the full size anyway. */\r | |
114 | rb->pos_ = (uint32_t)n;\r | |
115 | RingBufferInitBuffer(m, rb->pos_, rb);\r | |
116 | if (BROTLI_IS_OOM(m)) return;\r | |
117 | memcpy(rb->buffer_, bytes, n);\r | |
118 | return;\r | |
119 | }\r | |
120 | if (rb->cur_size_ < rb->total_size_) {\r | |
121 | /* Lazily allocate the full buffer. */\r | |
122 | RingBufferInitBuffer(m, rb->total_size_, rb);\r | |
123 | if (BROTLI_IS_OOM(m)) return;\r | |
124 | /* Initialize the last two bytes to zero, so that we don't have to worry\r | |
125 | later when we copy the last two bytes to the first two positions. */\r | |
126 | rb->buffer_[rb->size_ - 2] = 0;\r | |
127 | rb->buffer_[rb->size_ - 1] = 0;\r | |
128 | }\r | |
129 | {\r | |
130 | const size_t masked_pos = rb->pos_ & rb->mask_;\r | |
131 | /* The length of the writes is limited so that we do not need to worry\r | |
132 | about a write */\r | |
133 | RingBufferWriteTail(bytes, n, rb);\r | |
dd4f667e | 134 | if (BROTLI_PREDICT_TRUE(masked_pos + n <= rb->size_)) {\r |
11b7501a SB |
135 | /* A single write fits. */\r |
136 | memcpy(&rb->buffer_[masked_pos], bytes, n);\r | |
137 | } else {\r | |
138 | /* Split into two writes.\r | |
139 | Copy into the end of the buffer, including the tail buffer. */\r | |
140 | memcpy(&rb->buffer_[masked_pos], bytes,\r | |
141 | BROTLI_MIN(size_t, n, rb->total_size_ - masked_pos));\r | |
142 | /* Copy into the beginning of the buffer */\r | |
143 | memcpy(&rb->buffer_[0], bytes + (rb->size_ - masked_pos),\r | |
144 | n - (rb->size_ - masked_pos));\r | |
145 | }\r | |
146 | }\r | |
dd4f667e LG |
147 | {\r |
148 | BROTLI_BOOL not_first_lap = (rb->pos_ & (1u << 31)) != 0;\r | |
149 | uint32_t rb_pos_mask = (1u << 31) - 1;\r | |
150 | rb->buffer_[-2] = rb->buffer_[rb->size_ - 2];\r | |
151 | rb->buffer_[-1] = rb->buffer_[rb->size_ - 1];\r | |
152 | rb->pos_ = (rb->pos_ & rb_pos_mask) + (uint32_t)(n & rb_pos_mask);\r | |
153 | if (not_first_lap) {\r | |
154 | /* Wrap, but preserve not-a-first-lap feature. */\r | |
155 | rb->pos_ |= 1u << 31;\r | |
156 | }\r | |
11b7501a SB |
157 | }\r |
158 | }\r | |
159 | \r | |
160 | #if defined(__cplusplus) || defined(c_plusplus)\r | |
161 | } /* extern "C" */\r | |
162 | #endif\r | |
163 | \r | |
164 | #endif /* BROTLI_ENC_RINGBUFFER_H_ */\r |