]> git.proxmox.com Git - mirror_zfs.git/blame - module/icp/algs/blake3/blake3.c
Fix BLAKE3 tuneable and module loading on Linux and FreeBSD
[mirror_zfs.git] / module / icp / algs / blake3 / blake3.c
CommitLineData
985c33b1
TR
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
1d3ba0bf 9 * or https://opensource.org/licenses/CDDL-1.0.
985c33b1
TR
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22/*
23 * Based on BLAKE3 v1.3.1, https://github.com/BLAKE3-team/BLAKE3
24 * Copyright (c) 2019-2020 Samuel Neves and Jack O'Connor
25 * Copyright (c) 2021-2022 Tino Reichardt <milky-zfs@mcmilk.de>
26 */
27
28#include <sys/zfs_context.h>
29#include <sys/blake3.h>
30
31#include "blake3_impl.h"
32
33/*
34 * We need 1056 byte stack for blake3_compress_subtree_wide()
35 * - we define this pragma to make gcc happy
36 */
37#if defined(__GNUC__)
38#pragma GCC diagnostic ignored "-Wframe-larger-than="
39#endif
40
41/* internal used */
42typedef struct {
43 uint32_t input_cv[8];
44 uint64_t counter;
45 uint8_t block[BLAKE3_BLOCK_LEN];
46 uint8_t block_len;
47 uint8_t flags;
48} output_t;
49
50/* internal flags */
51enum blake3_flags {
52 CHUNK_START = 1 << 0,
53 CHUNK_END = 1 << 1,
54 PARENT = 1 << 2,
55 ROOT = 1 << 3,
56 KEYED_HASH = 1 << 4,
57 DERIVE_KEY_CONTEXT = 1 << 5,
58 DERIVE_KEY_MATERIAL = 1 << 6,
59};
60
61/* internal start */
62static void chunk_state_init(blake3_chunk_state_t *ctx,
63 const uint32_t key[8], uint8_t flags)
64{
65 memcpy(ctx->cv, key, BLAKE3_KEY_LEN);
66 ctx->chunk_counter = 0;
67 memset(ctx->buf, 0, BLAKE3_BLOCK_LEN);
68 ctx->buf_len = 0;
69 ctx->blocks_compressed = 0;
70 ctx->flags = flags;
71}
72
73static void chunk_state_reset(blake3_chunk_state_t *ctx,
74 const uint32_t key[8], uint64_t chunk_counter)
75{
76 memcpy(ctx->cv, key, BLAKE3_KEY_LEN);
77 ctx->chunk_counter = chunk_counter;
78 ctx->blocks_compressed = 0;
79 memset(ctx->buf, 0, BLAKE3_BLOCK_LEN);
80 ctx->buf_len = 0;
81}
82
83static size_t chunk_state_len(const blake3_chunk_state_t *ctx)
84{
85 return (BLAKE3_BLOCK_LEN * (size_t)ctx->blocks_compressed) +
86 ((size_t)ctx->buf_len);
87}
88
89static size_t chunk_state_fill_buf(blake3_chunk_state_t *ctx,
90 const uint8_t *input, size_t input_len)
91{
92 size_t take = BLAKE3_BLOCK_LEN - ((size_t)ctx->buf_len);
93 if (take > input_len) {
94 take = input_len;
95 }
96 uint8_t *dest = ctx->buf + ((size_t)ctx->buf_len);
97 memcpy(dest, input, take);
98 ctx->buf_len += (uint8_t)take;
99 return (take);
100}
101
102static uint8_t chunk_state_maybe_start_flag(const blake3_chunk_state_t *ctx)
103{
104 if (ctx->blocks_compressed == 0) {
105 return (CHUNK_START);
106 } else {
107 return (0);
108 }
109}
110
111static output_t make_output(const uint32_t input_cv[8],
112 const uint8_t *block, uint8_t block_len,
113 uint64_t counter, uint8_t flags)
114{
115 output_t ret;
116 memcpy(ret.input_cv, input_cv, 32);
117 memcpy(ret.block, block, BLAKE3_BLOCK_LEN);
118 ret.block_len = block_len;
119 ret.counter = counter;
120 ret.flags = flags;
121 return (ret);
122}
123
124/*
125 * Chaining values within a given chunk (specifically the compress_in_place
126 * interface) are represented as words. This avoids unnecessary bytes<->words
127 * conversion overhead in the portable implementation. However, the hash_many
128 * interface handles both user input and parent node blocks, so it accepts
129 * bytes. For that reason, chaining values in the CV stack are represented as
130 * bytes.
131 */
75e8b5ad 132static void output_chaining_value(const blake3_ops_t *ops,
985c33b1
TR
133 const output_t *ctx, uint8_t cv[32])
134{
135 uint32_t cv_words[8];
136 memcpy(cv_words, ctx->input_cv, 32);
137 ops->compress_in_place(cv_words, ctx->block, ctx->block_len,
138 ctx->counter, ctx->flags);
139 store_cv_words(cv, cv_words);
140}
141
75e8b5ad 142static void output_root_bytes(const blake3_ops_t *ops, const output_t *ctx,
985c33b1
TR
143 uint64_t seek, uint8_t *out, size_t out_len)
144{
145 uint64_t output_block_counter = seek / 64;
146 size_t offset_within_block = seek % 64;
147 uint8_t wide_buf[64];
148 while (out_len > 0) {
149 ops->compress_xof(ctx->input_cv, ctx->block, ctx->block_len,
150 output_block_counter, ctx->flags | ROOT, wide_buf);
151 size_t available_bytes = 64 - offset_within_block;
152 size_t memcpy_len;
153 if (out_len > available_bytes) {
154 memcpy_len = available_bytes;
155 } else {
156 memcpy_len = out_len;
157 }
158 memcpy(out, wide_buf + offset_within_block, memcpy_len);
159 out += memcpy_len;
160 out_len -= memcpy_len;
161 output_block_counter += 1;
162 offset_within_block = 0;
163 }
164}
165
75e8b5ad 166static void chunk_state_update(const blake3_ops_t *ops,
985c33b1
TR
167 blake3_chunk_state_t *ctx, const uint8_t *input, size_t input_len)
168{
169 if (ctx->buf_len > 0) {
170 size_t take = chunk_state_fill_buf(ctx, input, input_len);
171 input += take;
172 input_len -= take;
173 if (input_len > 0) {
174 ops->compress_in_place(ctx->cv, ctx->buf,
175 BLAKE3_BLOCK_LEN, ctx->chunk_counter,
176 ctx->flags|chunk_state_maybe_start_flag(ctx));
177 ctx->blocks_compressed += 1;
178 ctx->buf_len = 0;
179 memset(ctx->buf, 0, BLAKE3_BLOCK_LEN);
180 }
181 }
182
183 while (input_len > BLAKE3_BLOCK_LEN) {
184 ops->compress_in_place(ctx->cv, input, BLAKE3_BLOCK_LEN,
185 ctx->chunk_counter,
186 ctx->flags|chunk_state_maybe_start_flag(ctx));
187 ctx->blocks_compressed += 1;
188 input += BLAKE3_BLOCK_LEN;
189 input_len -= BLAKE3_BLOCK_LEN;
190 }
191
192 size_t take = chunk_state_fill_buf(ctx, input, input_len);
193 input += take;
194 input_len -= take;
195}
196
197static output_t chunk_state_output(const blake3_chunk_state_t *ctx)
198{
199 uint8_t block_flags =
200 ctx->flags | chunk_state_maybe_start_flag(ctx) | CHUNK_END;
201 return (make_output(ctx->cv, ctx->buf, ctx->buf_len, ctx->chunk_counter,
202 block_flags));
203}
204
205static output_t parent_output(const uint8_t block[BLAKE3_BLOCK_LEN],
206 const uint32_t key[8], uint8_t flags)
207{
208 return (make_output(key, block, BLAKE3_BLOCK_LEN, 0, flags | PARENT));
209}
210
211/*
212 * Given some input larger than one chunk, return the number of bytes that
213 * should go in the left subtree. This is the largest power-of-2 number of
214 * chunks that leaves at least 1 byte for the right subtree.
215 */
216static size_t left_len(size_t content_len)
217{
218 /*
219 * Subtract 1 to reserve at least one byte for the right side.
220 * content_len
221 * should always be greater than BLAKE3_CHUNK_LEN.
222 */
223 size_t full_chunks = (content_len - 1) / BLAKE3_CHUNK_LEN;
224 return (round_down_to_power_of_2(full_chunks) * BLAKE3_CHUNK_LEN);
225}
226
227/*
228 * Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
229 * on a single thread. Write out the chunk chaining values and return the
230 * number of chunks hashed. These chunks are never the root and never empty;
231 * those cases use a different codepath.
232 */
75e8b5ad 233static size_t compress_chunks_parallel(const blake3_ops_t *ops,
985c33b1
TR
234 const uint8_t *input, size_t input_len, const uint32_t key[8],
235 uint64_t chunk_counter, uint8_t flags, uint8_t *out)
236{
237 const uint8_t *chunks_array[MAX_SIMD_DEGREE];
238 size_t input_position = 0;
239 size_t chunks_array_len = 0;
240 while (input_len - input_position >= BLAKE3_CHUNK_LEN) {
241 chunks_array[chunks_array_len] = &input[input_position];
242 input_position += BLAKE3_CHUNK_LEN;
243 chunks_array_len += 1;
244 }
245
246 ops->hash_many(chunks_array, chunks_array_len, BLAKE3_CHUNK_LEN /
247 BLAKE3_BLOCK_LEN, key, chunk_counter, B_TRUE, flags, CHUNK_START,
248 CHUNK_END, out);
249
250 /*
251 * Hash the remaining partial chunk, if there is one. Note that the
252 * empty chunk (meaning the empty message) is a different codepath.
253 */
254 if (input_len > input_position) {
255 uint64_t counter = chunk_counter + (uint64_t)chunks_array_len;
256 blake3_chunk_state_t chunk_state;
257 chunk_state_init(&chunk_state, key, flags);
258 chunk_state.chunk_counter = counter;
259 chunk_state_update(ops, &chunk_state, &input[input_position],
260 input_len - input_position);
261 output_t output = chunk_state_output(&chunk_state);
262 output_chaining_value(ops, &output, &out[chunks_array_len *
263 BLAKE3_OUT_LEN]);
264 return (chunks_array_len + 1);
265 } else {
266 return (chunks_array_len);
267 }
268}
269
270/*
271 * Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
272 * on a single thread. Write out the parent chaining values and return the
273 * number of parents hashed. (If there's an odd input chaining value left over,
274 * return it as an additional output.) These parents are never the root and
275 * never empty; those cases use a different codepath.
276 */
75e8b5ad 277static size_t compress_parents_parallel(const blake3_ops_t *ops,
985c33b1
TR
278 const uint8_t *child_chaining_values, size_t num_chaining_values,
279 const uint32_t key[8], uint8_t flags, uint8_t *out)
280{
281 const uint8_t *parents_array[MAX_SIMD_DEGREE_OR_2];
282 size_t parents_array_len = 0;
283
284 while (num_chaining_values - (2 * parents_array_len) >= 2) {
285 parents_array[parents_array_len] = &child_chaining_values[2 *
286 parents_array_len * BLAKE3_OUT_LEN];
287 parents_array_len += 1;
288 }
289
290 ops->hash_many(parents_array, parents_array_len, 1, key, 0, B_FALSE,
291 flags | PARENT, 0, 0, out);
292
293 /* If there's an odd child left over, it becomes an output. */
294 if (num_chaining_values > 2 * parents_array_len) {
295 memcpy(&out[parents_array_len * BLAKE3_OUT_LEN],
296 &child_chaining_values[2 * parents_array_len *
297 BLAKE3_OUT_LEN], BLAKE3_OUT_LEN);
298 return (parents_array_len + 1);
299 } else {
300 return (parents_array_len);
301 }
302}
303
304/*
305 * The wide helper function returns (writes out) an array of chaining values
306 * and returns the length of that array. The number of chaining values returned
307 * is the dyanmically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
308 * if the input is shorter than that many chunks. The reason for maintaining a
309 * wide array of chaining values going back up the tree, is to allow the
310 * implementation to hash as many parents in parallel as possible.
311 *
312 * As a special case when the SIMD degree is 1, this function will still return
313 * at least 2 outputs. This guarantees that this function doesn't perform the
314 * root compression. (If it did, it would use the wrong flags, and also we
315 * wouldn't be able to implement exendable ouput.) Note that this function is
316 * not used when the whole input is only 1 chunk long; that's a different
317 * codepath.
318 *
319 * Why not just have the caller split the input on the first update(), instead
320 * of implementing this special rule? Because we don't want to limit SIMD or
321 * multi-threading parallelism for that update().
322 */
75e8b5ad 323static size_t blake3_compress_subtree_wide(const blake3_ops_t *ops,
985c33b1
TR
324 const uint8_t *input, size_t input_len, const uint32_t key[8],
325 uint64_t chunk_counter, uint8_t flags, uint8_t *out)
326{
327 /*
328 * Note that the single chunk case does *not* bump the SIMD degree up
329 * to 2 when it is 1. If this implementation adds multi-threading in
330 * the future, this gives us the option of multi-threading even the
331 * 2-chunk case, which can help performance on smaller platforms.
332 */
333 if (input_len <= (size_t)(ops->degree * BLAKE3_CHUNK_LEN)) {
334 return (compress_chunks_parallel(ops, input, input_len, key,
335 chunk_counter, flags, out));
336 }
337
338
339 /*
340 * With more than simd_degree chunks, we need to recurse. Start by
341 * dividing the input into left and right subtrees. (Note that this is
342 * only optimal as long as the SIMD degree is a power of 2. If we ever
343 * get a SIMD degree of 3 or something, we'll need a more complicated
344 * strategy.)
345 */
346 size_t left_input_len = left_len(input_len);
347 size_t right_input_len = input_len - left_input_len;
348 const uint8_t *right_input = &input[left_input_len];
349 uint64_t right_chunk_counter = chunk_counter +
350 (uint64_t)(left_input_len / BLAKE3_CHUNK_LEN);
351
352 /*
353 * Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2
354 * to account for the special case of returning 2 outputs when the
355 * SIMD degree is 1.
356 */
357 uint8_t cv_array[2 * MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
358 size_t degree = ops->degree;
359 if (left_input_len > BLAKE3_CHUNK_LEN && degree == 1) {
360
361 /*
362 * The special case: We always use a degree of at least two,
363 * to make sure there are two outputs. Except, as noted above,
364 * at the chunk level, where we allow degree=1. (Note that the
365 * 1-chunk-input case is a different codepath.)
366 */
367 degree = 2;
368 }
369 uint8_t *right_cvs = &cv_array[degree * BLAKE3_OUT_LEN];
370
371 /*
372 * Recurse! If this implementation adds multi-threading support in the
373 * future, this is where it will go.
374 */
375 size_t left_n = blake3_compress_subtree_wide(ops, input, left_input_len,
376 key, chunk_counter, flags, cv_array);
377 size_t right_n = blake3_compress_subtree_wide(ops, right_input,
378 right_input_len, key, right_chunk_counter, flags, right_cvs);
379
380 /*
381 * The special case again. If simd_degree=1, then we'll have left_n=1
382 * and right_n=1. Rather than compressing them into a single output,
383 * return them directly, to make sure we always have at least two
384 * outputs.
385 */
386 if (left_n == 1) {
387 memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
388 return (2);
389 }
390
391 /* Otherwise, do one layer of parent node compression. */
392 size_t num_chaining_values = left_n + right_n;
393 return compress_parents_parallel(ops, cv_array,
394 num_chaining_values, key, flags, out);
395}
396
397/*
398 * Hash a subtree with compress_subtree_wide(), and then condense the resulting
399 * list of chaining values down to a single parent node. Don't compress that
400 * last parent node, however. Instead, return its message bytes (the
401 * concatenated chaining values of its children). This is necessary when the
402 * first call to update() supplies a complete subtree, because the topmost
403 * parent node of that subtree could end up being the root. It's also necessary
404 * for extended output in the general case.
405 *
406 * As with compress_subtree_wide(), this function is not used on inputs of 1
407 * chunk or less. That's a different codepath.
408 */
75e8b5ad 409static void compress_subtree_to_parent_node(const blake3_ops_t *ops,
985c33b1
TR
410 const uint8_t *input, size_t input_len, const uint32_t key[8],
411 uint64_t chunk_counter, uint8_t flags, uint8_t out[2 * BLAKE3_OUT_LEN])
412{
413 uint8_t cv_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
414 size_t num_cvs = blake3_compress_subtree_wide(ops, input, input_len,
415 key, chunk_counter, flags, cv_array);
416
417 /*
418 * If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
419 * compress_subtree_wide() returns more than 2 chaining values. Condense
420 * them into 2 by forming parent nodes repeatedly.
421 */
422 uint8_t out_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN / 2];
423 while (num_cvs > 2) {
424 num_cvs = compress_parents_parallel(ops, cv_array, num_cvs, key,
425 flags, out_array);
426 memcpy(cv_array, out_array, num_cvs * BLAKE3_OUT_LEN);
427 }
428 memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
429}
430
431static void hasher_init_base(BLAKE3_CTX *ctx, const uint32_t key[8],
432 uint8_t flags)
433{
434 memcpy(ctx->key, key, BLAKE3_KEY_LEN);
435 chunk_state_init(&ctx->chunk, key, flags);
436 ctx->cv_stack_len = 0;
437 ctx->ops = blake3_impl_get_ops();
438}
439
440/*
441 * As described in hasher_push_cv() below, we do "lazy merging", delaying
442 * merges until right before the next CV is about to be added. This is
443 * different from the reference implementation. Another difference is that we
444 * aren't always merging 1 chunk at a time. Instead, each CV might represent
445 * any power-of-two number of chunks, as long as the smaller-above-larger
446 * stack order is maintained. Instead of the "count the trailing 0-bits"
447 * algorithm described in the spec, we use a "count the total number of
448 * 1-bits" variant that doesn't require us to retain the subtree size of the
449 * CV on top of the stack. The principle is the same: each CV that should
450 * remain in the stack is represented by a 1-bit in the total number of chunks
451 * (or bytes) so far.
452 */
453static void hasher_merge_cv_stack(BLAKE3_CTX *ctx, uint64_t total_len)
454{
455 size_t post_merge_stack_len = (size_t)popcnt(total_len);
456 while (ctx->cv_stack_len > post_merge_stack_len) {
457 uint8_t *parent_node =
458 &ctx->cv_stack[(ctx->cv_stack_len - 2) * BLAKE3_OUT_LEN];
459 output_t output =
460 parent_output(parent_node, ctx->key, ctx->chunk.flags);
461 output_chaining_value(ctx->ops, &output, parent_node);
462 ctx->cv_stack_len -= 1;
463 }
464}
465
466/*
467 * In reference_impl.rs, we merge the new CV with existing CVs from the stack
468 * before pushing it. We can do that because we know more input is coming, so
469 * we know none of the merges are root.
470 *
471 * This setting is different. We want to feed as much input as possible to
472 * compress_subtree_wide(), without setting aside anything for the chunk_state.
473 * If the user gives us 64 KiB, we want to parallelize over all 64 KiB at once
474 * as a single subtree, if at all possible.
475 *
476 * This leads to two problems:
477 * 1) This 64 KiB input might be the only call that ever gets made to update.
478 * In this case, the root node of the 64 KiB subtree would be the root node
479 * of the whole tree, and it would need to be ROOT finalized. We can't
480 * compress it until we know.
481 * 2) This 64 KiB input might complete a larger tree, whose root node is
482 * similarly going to be the the root of the whole tree. For example, maybe
483 * we have 196 KiB (that is, 128 + 64) hashed so far. We can't compress the
484 * node at the root of the 256 KiB subtree until we know how to finalize it.
485 *
486 * The second problem is solved with "lazy merging". That is, when we're about
487 * to add a CV to the stack, we don't merge it with anything first, as the
488 * reference impl does. Instead we do merges using the *previous* CV that was
489 * added, which is sitting on top of the stack, and we put the new CV
490 * (unmerged) on top of the stack afterwards. This guarantees that we never
491 * merge the root node until finalize().
492 *
493 * Solving the first problem requires an additional tool,
494 * compress_subtree_to_parent_node(). That function always returns the top
495 * *two* chaining values of the subtree it's compressing. We then do lazy
496 * merging with each of them separately, so that the second CV will always
497 * remain unmerged. (That also helps us support extendable output when we're
498 * hashing an input all-at-once.)
499 */
500static void hasher_push_cv(BLAKE3_CTX *ctx, uint8_t new_cv[BLAKE3_OUT_LEN],
501 uint64_t chunk_counter)
502{
503 hasher_merge_cv_stack(ctx, chunk_counter);
504 memcpy(&ctx->cv_stack[ctx->cv_stack_len * BLAKE3_OUT_LEN], new_cv,
505 BLAKE3_OUT_LEN);
506 ctx->cv_stack_len += 1;
507}
508
509void
510Blake3_Init(BLAKE3_CTX *ctx)
511{
512 hasher_init_base(ctx, BLAKE3_IV, 0);
513}
514
515void
516Blake3_InitKeyed(BLAKE3_CTX *ctx, const uint8_t key[BLAKE3_KEY_LEN])
517{
518 uint32_t key_words[8];
519 load_key_words(key, key_words);
520 hasher_init_base(ctx, key_words, KEYED_HASH);
521}
522
523static void
524Blake3_Update2(BLAKE3_CTX *ctx, const void *input, size_t input_len)
525{
526 /*
527 * Explicitly checking for zero avoids causing UB by passing a null
528 * pointer to memcpy. This comes up in practice with things like:
529 * std::vector<uint8_t> v;
530 * blake3_hasher_update(&hasher, v.data(), v.size());
531 */
532 if (input_len == 0) {
533 return;
534 }
535
536 const uint8_t *input_bytes = (const uint8_t *)input;
537
538 /*
539 * If we have some partial chunk bytes in the internal chunk_state, we
540 * need to finish that chunk first.
541 */
542 if (chunk_state_len(&ctx->chunk) > 0) {
543 size_t take = BLAKE3_CHUNK_LEN - chunk_state_len(&ctx->chunk);
544 if (take > input_len) {
545 take = input_len;
546 }
547 chunk_state_update(ctx->ops, &ctx->chunk, input_bytes, take);
548 input_bytes += take;
549 input_len -= take;
550 /*
551 * If we've filled the current chunk and there's more coming,
552 * finalize this chunk and proceed. In this case we know it's
553 * not the root.
554 */
555 if (input_len > 0) {
556 output_t output = chunk_state_output(&ctx->chunk);
557 uint8_t chunk_cv[32];
558 output_chaining_value(ctx->ops, &output, chunk_cv);
559 hasher_push_cv(ctx, chunk_cv, ctx->chunk.chunk_counter);
560 chunk_state_reset(&ctx->chunk, ctx->key,
561 ctx->chunk.chunk_counter + 1);
562 } else {
563 return;
564 }
565 }
566
567 /*
568 * Now the chunk_state is clear, and we have more input. If there's
569 * more than a single chunk (so, definitely not the root chunk), hash
570 * the largest whole subtree we can, with the full benefits of SIMD
571 * (and maybe in the future, multi-threading) parallelism. Two
572 * restrictions:
573 * - The subtree has to be a power-of-2 number of chunks. Only
574 * subtrees along the right edge can be incomplete, and we don't know
575 * where the right edge is going to be until we get to finalize().
576 * - The subtree must evenly divide the total number of chunks up
577 * until this point (if total is not 0). If the current incomplete
578 * subtree is only waiting for 1 more chunk, we can't hash a subtree
579 * of 4 chunks. We have to complete the current subtree first.
580 * Because we might need to break up the input to form powers of 2, or
581 * to evenly divide what we already have, this part runs in a loop.
582 */
583 while (input_len > BLAKE3_CHUNK_LEN) {
584 size_t subtree_len = round_down_to_power_of_2(input_len);
585 uint64_t count_so_far =
586 ctx->chunk.chunk_counter * BLAKE3_CHUNK_LEN;
587 /*
588 * Shrink the subtree_len until it evenly divides the count so
589 * far. We know that subtree_len itself is a power of 2, so we
590 * can use a bitmasking trick instead of an actual remainder
591 * operation. (Note that if the caller consistently passes
592 * power-of-2 inputs of the same size, as is hopefully
593 * typical, this loop condition will always fail, and
594 * subtree_len will always be the full length of the input.)
595 *
596 * An aside: We don't have to shrink subtree_len quite this
597 * much. For example, if count_so_far is 1, we could pass 2
598 * chunks to compress_subtree_to_parent_node. Since we'll get
599 * 2 CVs back, we'll still get the right answer in the end,
600 * and we might get to use 2-way SIMD parallelism. The problem
601 * with this optimization, is that it gets us stuck always
602 * hashing 2 chunks. The total number of chunks will remain
603 * odd, and we'll never graduate to higher degrees of
604 * parallelism. See
605 * https://github.com/BLAKE3-team/BLAKE3/issues/69.
606 */
607 while ((((uint64_t)(subtree_len - 1)) & count_so_far) != 0) {
608 subtree_len /= 2;
609 }
610 /*
611 * The shrunken subtree_len might now be 1 chunk long. If so,
612 * hash that one chunk by itself. Otherwise, compress the
613 * subtree into a pair of CVs.
614 */
615 uint64_t subtree_chunks = subtree_len / BLAKE3_CHUNK_LEN;
616 if (subtree_len <= BLAKE3_CHUNK_LEN) {
617 blake3_chunk_state_t chunk_state;
618 chunk_state_init(&chunk_state, ctx->key,
619 ctx->chunk.flags);
620 chunk_state.chunk_counter = ctx->chunk.chunk_counter;
621 chunk_state_update(ctx->ops, &chunk_state, input_bytes,
622 subtree_len);
623 output_t output = chunk_state_output(&chunk_state);
624 uint8_t cv[BLAKE3_OUT_LEN];
625 output_chaining_value(ctx->ops, &output, cv);
626 hasher_push_cv(ctx, cv, chunk_state.chunk_counter);
627 } else {
628 /*
629 * This is the high-performance happy path, though
630 * getting here depends on the caller giving us a long
631 * enough input.
632 */
633 uint8_t cv_pair[2 * BLAKE3_OUT_LEN];
634 compress_subtree_to_parent_node(ctx->ops, input_bytes,
635 subtree_len, ctx->key, ctx-> chunk.chunk_counter,
636 ctx->chunk.flags, cv_pair);
637 hasher_push_cv(ctx, cv_pair, ctx->chunk.chunk_counter);
638 hasher_push_cv(ctx, &cv_pair[BLAKE3_OUT_LEN],
639 ctx->chunk.chunk_counter + (subtree_chunks / 2));
640 }
641 ctx->chunk.chunk_counter += subtree_chunks;
642 input_bytes += subtree_len;
643 input_len -= subtree_len;
644 }
645
646 /*
647 * If there's any remaining input less than a full chunk, add it to
648 * the chunk state. In that case, also do a final merge loop to make
649 * sure the subtree stack doesn't contain any unmerged pairs. The
650 * remaining input means we know these merges are non-root. This merge
651 * loop isn't strictly necessary here, because hasher_push_chunk_cv
652 * already does its own merge loop, but it simplifies
653 * blake3_hasher_finalize below.
654 */
655 if (input_len > 0) {
656 chunk_state_update(ctx->ops, &ctx->chunk, input_bytes,
657 input_len);
658 hasher_merge_cv_stack(ctx, ctx->chunk.chunk_counter);
659 }
660}
661
662void
663Blake3_Update(BLAKE3_CTX *ctx, const void *input, size_t todo)
664{
665 size_t done = 0;
666 const uint8_t *data = input;
667 const size_t block_max = 1024 * 64;
668
669 /* max feed buffer to leave the stack size small */
670 while (todo != 0) {
671 size_t block = (todo >= block_max) ? block_max : todo;
672 Blake3_Update2(ctx, data + done, block);
673 done += block;
674 todo -= block;
675 }
676}
677
678void
679Blake3_Final(const BLAKE3_CTX *ctx, uint8_t *out)
680{
681 Blake3_FinalSeek(ctx, 0, out, BLAKE3_OUT_LEN);
682}
683
684void
685Blake3_FinalSeek(const BLAKE3_CTX *ctx, uint64_t seek, uint8_t *out,
686 size_t out_len)
687{
688 /*
689 * Explicitly checking for zero avoids causing UB by passing a null
690 * pointer to memcpy. This comes up in practice with things like:
691 * std::vector<uint8_t> v;
692 * blake3_hasher_finalize(&hasher, v.data(), v.size());
693 */
694 if (out_len == 0) {
695 return;
696 }
697 /* If the subtree stack is empty, then the current chunk is the root. */
698 if (ctx->cv_stack_len == 0) {
699 output_t output = chunk_state_output(&ctx->chunk);
700 output_root_bytes(ctx->ops, &output, seek, out, out_len);
701 return;
702 }
703 /*
704 * If there are any bytes in the chunk state, finalize that chunk and
705 * do a roll-up merge between that chunk hash and every subtree in the
706 * stack. In this case, the extra merge loop at the end of
707 * blake3_hasher_update guarantees that none of the subtrees in the
708 * stack need to be merged with each other first. Otherwise, if there
709 * are no bytes in the chunk state, then the top of the stack is a
710 * chunk hash, and we start the merge from that.
711 */
712 output_t output;
713 size_t cvs_remaining;
714 if (chunk_state_len(&ctx->chunk) > 0) {
715 cvs_remaining = ctx->cv_stack_len;
716 output = chunk_state_output(&ctx->chunk);
717 } else {
718 /* There are always at least 2 CVs in the stack in this case. */
719 cvs_remaining = ctx->cv_stack_len - 2;
720 output = parent_output(&ctx->cv_stack[cvs_remaining * 32],
721 ctx->key, ctx->chunk.flags);
722 }
723 while (cvs_remaining > 0) {
724 cvs_remaining -= 1;
725 uint8_t parent_block[BLAKE3_BLOCK_LEN];
726 memcpy(parent_block, &ctx->cv_stack[cvs_remaining * 32], 32);
727 output_chaining_value(ctx->ops, &output, &parent_block[32]);
728 output = parent_output(parent_block, ctx->key,
729 ctx->chunk.flags);
730 }
731 output_root_bytes(ctx->ops, &output, seek, out, out_len);
732}