LLVM  15.0.0git
blake3.c
Go to the documentation of this file.
1 /*===-- blake3.c - BLAKE3 C Implementation ------------------------*- C -*-===*\
2 |* *|
3 |* Released into the public domain with CC0 1.0 *|
4 |* See 'llvm/lib/Support/BLAKE3/LICENSE' for info. *|
5 |* SPDX-License-Identifier: CC0-1.0 *|
6 |* *|
7 \*===----------------------------------------------------------------------===*/
8 
9 #include <assert.h>
10 #include <stdbool.h>
11 #include <string.h>
12 
13 #include "blake3_impl.h"
14 
15 const char *llvm_blake3_version(void) { return BLAKE3_VERSION_STRING; }
16 
18  uint8_t flags) {
19  memcpy(self->cv, key, BLAKE3_KEY_LEN);
20  self->chunk_counter = 0;
21  memset(self->buf, 0, BLAKE3_BLOCK_LEN);
22  self->buf_len = 0;
23  self->blocks_compressed = 0;
24  self->flags = flags;
25 }
26 
28  uint64_t chunk_counter) {
29  memcpy(self->cv, key, BLAKE3_KEY_LEN);
30  self->chunk_counter = chunk_counter;
31  self->blocks_compressed = 0;
32  memset(self->buf, 0, BLAKE3_BLOCK_LEN);
33  self->buf_len = 0;
34 }
35 
37  return (BLAKE3_BLOCK_LEN * (size_t)self->blocks_compressed) +
38  ((size_t)self->buf_len);
39 }
40 
42  const uint8_t *input, size_t input_len) {
43  size_t take = BLAKE3_BLOCK_LEN - ((size_t)self->buf_len);
44  if (take > input_len) {
45  take = input_len;
46  }
47  uint8_t *dest = self->buf + ((size_t)self->buf_len);
48  memcpy(dest, input, take);
49  self->buf_len += (uint8_t)take;
50  return take;
51 }
52 
54  if (self->blocks_compressed == 0) {
55  return CHUNK_START;
56  } else {
57  return 0;
58  }
59 }
60 
61 typedef struct {
62  uint32_t input_cv[8];
65  uint8_t block_len;
66  uint8_t flags;
67 } output_t;
68 
69 INLINE output_t make_output(const uint32_t input_cv[8],
70  const uint8_t block[BLAKE3_BLOCK_LEN],
71  uint8_t block_len, uint64_t counter,
72  uint8_t flags) {
73  output_t ret;
74  memcpy(ret.input_cv, input_cv, 32);
76  ret.block_len = block_len;
77  ret.counter = counter;
78  ret.flags = flags;
79  return ret;
80 }
81 
82 // Chaining values within a given chunk (specifically the compress_in_place
83 // interface) are represented as words. This avoids unnecessary bytes<->words
84 // conversion overhead in the portable implementation. However, the hash_many
85 // interface handles both user input and parent node blocks, so it accepts
86 // bytes. For that reason, chaining values in the CV stack are represented as
87 // bytes.
88 INLINE void output_chaining_value(const output_t *self, uint8_t cv[32]) {
89  uint32_t cv_words[8];
90  memcpy(cv_words, self->input_cv, 32);
91  blake3_compress_in_place(cv_words, self->block, self->block_len,
92  self->counter, self->flags);
93  store_cv_words(cv, cv_words);
94 }
95 
96 INLINE void output_root_bytes(const output_t *self, uint64_t seek, uint8_t *out,
97  size_t out_len) {
98  uint64_t output_block_counter = seek / 64;
99  size_t offset_within_block = seek % 64;
100  uint8_t wide_buf[64];
101  while (out_len > 0) {
102  blake3_compress_xof(self->input_cv, self->block, self->block_len,
103  output_block_counter, self->flags | ROOT, wide_buf);
104  size_t available_bytes = 64 - offset_within_block;
105  size_t memcpy_len;
106  if (out_len > available_bytes) {
107  memcpy_len = available_bytes;
108  } else {
109  memcpy_len = out_len;
110  }
111  memcpy(out, wide_buf + offset_within_block, memcpy_len);
112  out += memcpy_len;
113  out_len -= memcpy_len;
114  output_block_counter += 1;
115  offset_within_block = 0;
116  }
117 }
118 
120  size_t input_len) {
121  if (self->buf_len > 0) {
122  size_t take = chunk_state_fill_buf(self, input, input_len);
123  input += take;
124  input_len -= take;
125  if (input_len > 0) {
127  self->cv, self->buf, BLAKE3_BLOCK_LEN, self->chunk_counter,
128  self->flags | chunk_state_maybe_start_flag(self));
129  self->blocks_compressed += 1;
130  self->buf_len = 0;
131  memset(self->buf, 0, BLAKE3_BLOCK_LEN);
132  }
133  }
134 
135  while (input_len > BLAKE3_BLOCK_LEN) {
137  self->chunk_counter,
138  self->flags | chunk_state_maybe_start_flag(self));
139  self->blocks_compressed += 1;
141  input_len -= BLAKE3_BLOCK_LEN;
142  }
143 
144  size_t take = chunk_state_fill_buf(self, input, input_len);
145  input += take;
146  input_len -= take;
147 }
148 
150  uint8_t block_flags =
152  return make_output(self->cv, self->buf, self->buf_len, self->chunk_counter,
153  block_flags);
154 }
155 
157  const uint32_t key[8], uint8_t flags) {
158  return make_output(key, block, BLAKE3_BLOCK_LEN, 0, flags | PARENT);
159 }
160 
161 // Given some input larger than one chunk, return the number of bytes that
162 // should go in the left subtree. This is the largest power-of-2 number of
163 // chunks that leaves at least 1 byte for the right subtree.
164 INLINE size_t left_len(size_t content_len) {
165  // Subtract 1 to reserve at least one byte for the right side. content_len
166  // should always be greater than BLAKE3_CHUNK_LEN.
167  size_t full_chunks = (content_len - 1) / BLAKE3_CHUNK_LEN;
168  return round_down_to_power_of_2(full_chunks) * BLAKE3_CHUNK_LEN;
169 }
170 
171 // Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
172 // on a single thread. Write out the chunk chaining values and return the
173 // number of chunks hashed. These chunks are never the root and never empty;
174 // those cases use a different codepath.
175 INLINE size_t compress_chunks_parallel(const uint8_t *input, size_t input_len,
176  const uint32_t key[8],
177  uint64_t chunk_counter, uint8_t flags,
178  uint8_t *out) {
179 #if defined(BLAKE3_TESTING)
180  assert(0 < input_len);
181  assert(input_len <= MAX_SIMD_DEGREE * BLAKE3_CHUNK_LEN);
182 #endif
183 
184  const uint8_t *chunks_array[MAX_SIMD_DEGREE];
185  size_t input_position = 0;
186  size_t chunks_array_len = 0;
187  while (input_len - input_position >= BLAKE3_CHUNK_LEN) {
188  chunks_array[chunks_array_len] = &input[input_position];
189  input_position += BLAKE3_CHUNK_LEN;
190  chunks_array_len += 1;
191  }
192 
193  blake3_hash_many(chunks_array, chunks_array_len,
194  BLAKE3_CHUNK_LEN / BLAKE3_BLOCK_LEN, key, chunk_counter,
195  true, flags, CHUNK_START, CHUNK_END, out);
196 
197  // Hash the remaining partial chunk, if there is one. Note that the empty
198  // chunk (meaning the empty message) is a different codepath.
199  if (input_len > input_position) {
200  uint64_t counter = chunk_counter + (uint64_t)chunks_array_len;
201  blake3_chunk_state chunk_state;
202  chunk_state_init(&chunk_state, key, flags);
203  chunk_state.chunk_counter = counter;
204  chunk_state_update(&chunk_state, &input[input_position],
205  input_len - input_position);
206  output_t output = chunk_state_output(&chunk_state);
207  output_chaining_value(&output, &out[chunks_array_len * BLAKE3_OUT_LEN]);
208  return chunks_array_len + 1;
209  } else {
210  return chunks_array_len;
211  }
212 }
213 
214 // Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
215 // on a single thread. Write out the parent chaining values and return the
216 // number of parents hashed. (If there's an odd input chaining value left over,
217 // return it as an additional output.) These parents are never the root and
218 // never empty; those cases use a different codepath.
219 INLINE size_t compress_parents_parallel(const uint8_t *child_chaining_values,
220  size_t num_chaining_values,
221  const uint32_t key[8], uint8_t flags,
222  uint8_t *out) {
223 #if defined(BLAKE3_TESTING)
224  assert(2 <= num_chaining_values);
225  assert(num_chaining_values <= 2 * MAX_SIMD_DEGREE_OR_2);
226 #endif
227 
228  const uint8_t *parents_array[MAX_SIMD_DEGREE_OR_2];
229  size_t parents_array_len = 0;
230  while (num_chaining_values - (2 * parents_array_len) >= 2) {
231  parents_array[parents_array_len] =
232  &child_chaining_values[2 * parents_array_len * BLAKE3_OUT_LEN];
233  parents_array_len += 1;
234  }
235 
236  blake3_hash_many(parents_array, parents_array_len, 1, key,
237  0, // Parents always use counter 0.
238  false, flags | PARENT,
239  0, // Parents have no start flags.
240  0, // Parents have no end flags.
241  out);
242 
243  // If there's an odd child left over, it becomes an output.
244  if (num_chaining_values > 2 * parents_array_len) {
245  memcpy(&out[parents_array_len * BLAKE3_OUT_LEN],
246  &child_chaining_values[2 * parents_array_len * BLAKE3_OUT_LEN],
248  return parents_array_len + 1;
249  } else {
250  return parents_array_len;
251  }
252 }
253 
254 // The wide helper function returns (writes out) an array of chaining values
255 // and returns the length of that array. The number of chaining values returned
256 // is the dyanmically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
257 // if the input is shorter than that many chunks. The reason for maintaining a
258 // wide array of chaining values going back up the tree, is to allow the
259 // implementation to hash as many parents in parallel as possible.
260 //
261 // As a special case when the SIMD degree is 1, this function will still return
262 // at least 2 outputs. This guarantees that this function doesn't perform the
263 // root compression. (If it did, it would use the wrong flags, and also we
264 // wouldn't be able to implement exendable ouput.) Note that this function is
265 // not used when the whole input is only 1 chunk long; that's a different
266 // codepath.
267 //
268 // Why not just have the caller split the input on the first update(), instead
269 // of implementing this special rule? Because we don't want to limit SIMD or
270 // multi-threading parallelism for that update().
271 static size_t blake3_compress_subtree_wide(const uint8_t *input,
272  size_t input_len,
273  const uint32_t key[8],
274  uint64_t chunk_counter,
275  uint8_t flags, uint8_t *out) {
276  // Note that the single chunk case does *not* bump the SIMD degree up to 2
277  // when it is 1. If this implementation adds multi-threading in the future,
278  // this gives us the option of multi-threading even the 2-chunk case, which
279  // can help performance on smaller platforms.
280  if (input_len <= blake3_simd_degree() * BLAKE3_CHUNK_LEN) {
281  return compress_chunks_parallel(input, input_len, key, chunk_counter, flags,
282  out);
283  }
284 
285  // With more than simd_degree chunks, we need to recurse. Start by dividing
286  // the input into left and right subtrees. (Note that this is only optimal
287  // as long as the SIMD degree is a power of 2. If we ever get a SIMD degree
288  // of 3 or something, we'll need a more complicated strategy.)
289  size_t left_input_len = left_len(input_len);
290  size_t right_input_len = input_len - left_input_len;
291  const uint8_t *right_input = &input[left_input_len];
292  uint64_t right_chunk_counter =
293  chunk_counter + (uint64_t)(left_input_len / BLAKE3_CHUNK_LEN);
294 
295  // Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2 to
296  // account for the special case of returning 2 outputs when the SIMD degree
297  // is 1.
298  uint8_t cv_array[2 * MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
299  size_t degree = blake3_simd_degree();
300  if (left_input_len > BLAKE3_CHUNK_LEN && degree == 1) {
301  // The special case: We always use a degree of at least two, to make
302  // sure there are two outputs. Except, as noted above, at the chunk
303  // level, where we allow degree=1. (Note that the 1-chunk-input case is
304  // a different codepath.)
305  degree = 2;
306  }
307  uint8_t *right_cvs = &cv_array[degree * BLAKE3_OUT_LEN];
308 
309  // Recurse! If this implementation adds multi-threading support in the
310  // future, this is where it will go.
311  size_t left_n = blake3_compress_subtree_wide(input, left_input_len, key,
312  chunk_counter, flags, cv_array);
313  size_t right_n = blake3_compress_subtree_wide(
314  right_input, right_input_len, key, right_chunk_counter, flags, right_cvs);
315 
316  // The special case again. If simd_degree=1, then we'll have left_n=1 and
317  // right_n=1. Rather than compressing them into a single output, return
318  // them directly, to make sure we always have at least two outputs.
319  if (left_n == 1) {
320  memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
321  return 2;
322  }
323 
324  // Otherwise, do one layer of parent node compression.
325  size_t num_chaining_values = left_n + right_n;
326  return compress_parents_parallel(cv_array, num_chaining_values, key, flags,
327  out);
328 }
329 
330 // Hash a subtree with compress_subtree_wide(), and then condense the resulting
331 // list of chaining values down to a single parent node. Don't compress that
332 // last parent node, however. Instead, return its message bytes (the
333 // concatenated chaining values of its children). This is necessary when the
334 // first call to update() supplies a complete subtree, because the topmost
335 // parent node of that subtree could end up being the root. It's also necessary
336 // for extended output in the general case.
337 //
338 // As with compress_subtree_wide(), this function is not used on inputs of 1
339 // chunk or less. That's a different codepath.
341  const uint8_t *input, size_t input_len, const uint32_t key[8],
342  uint64_t chunk_counter, uint8_t flags, uint8_t out[2 * BLAKE3_OUT_LEN]) {
343 #if defined(BLAKE3_TESTING)
344  assert(input_len > BLAKE3_CHUNK_LEN);
345 #endif
346 
347  uint8_t cv_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
348  size_t num_cvs = blake3_compress_subtree_wide(input, input_len, key,
349  chunk_counter, flags, cv_array);
350  assert(num_cvs <= MAX_SIMD_DEGREE_OR_2);
351 
352  // If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
353  // compress_subtree_wide() returns more than 2 chaining values. Condense
354  // them into 2 by forming parent nodes repeatedly.
355  uint8_t out_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN / 2];
356  // The second half of this loop condition is always true, and we just
357  // asserted it above. But GCC can't tell that it's always true, and if NDEBUG
358  // is set on platforms where MAX_SIMD_DEGREE_OR_2 == 2, GCC emits spurious
359  // warnings here. GCC 8.5 is particularly sensitive, so if you're changing
360  // this code, test it against that version.
361  while (num_cvs > 2 && num_cvs <= MAX_SIMD_DEGREE_OR_2) {
362  num_cvs =
363  compress_parents_parallel(cv_array, num_cvs, key, flags, out_array);
364  memcpy(cv_array, out_array, num_cvs * BLAKE3_OUT_LEN);
365  }
366  memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
367 }
368 
370  uint8_t flags) {
371  memcpy(self->key, key, BLAKE3_KEY_LEN);
372  chunk_state_init(&self->chunk, key, flags);
373  self->cv_stack_len = 0;
374 }
375 
377 
379  const uint8_t key[BLAKE3_KEY_LEN]) {
380  uint32_t key_words[8];
381  load_key_words(key, key_words);
382  hasher_init_base(self, key_words, KEYED_HASH);
383 }
384 
386  size_t context_len) {
387  blake3_hasher context_hasher;
388  hasher_init_base(&context_hasher, IV, DERIVE_KEY_CONTEXT);
389  llvm_blake3_hasher_update(&context_hasher, context, context_len);
390  uint8_t context_key[BLAKE3_KEY_LEN];
391  llvm_blake3_hasher_finalize(&context_hasher, context_key, BLAKE3_KEY_LEN);
392  uint32_t context_key_words[8];
393  load_key_words(context_key, context_key_words);
394  hasher_init_base(self, context_key_words, DERIVE_KEY_MATERIAL);
395 }
396 
397 void llvm_blake3_hasher_init_derive_key(blake3_hasher *self, const char *context) {
398  llvm_blake3_hasher_init_derive_key_raw(self, context, strlen(context));
399 }
400 
401 // As described in hasher_push_cv() below, we do "lazy merging", delaying
402 // merges until right before the next CV is about to be added. This is
403 // different from the reference implementation. Another difference is that we
404 // aren't always merging 1 chunk at a time. Instead, each CV might represent
405 // any power-of-two number of chunks, as long as the smaller-above-larger stack
406 // order is maintained. Instead of the "count the trailing 0-bits" algorithm
407 // described in the spec, we use a "count the total number of 1-bits" variant
408 // that doesn't require us to retain the subtree size of the CV on top of the
409 // stack. The principle is the same: each CV that should remain in the stack is
410 // represented by a 1-bit in the total number of chunks (or bytes) so far.
412  size_t post_merge_stack_len = (size_t)popcnt(total_len);
413  while (self->cv_stack_len > post_merge_stack_len) {
414  uint8_t *parent_node =
415  &self->cv_stack[(self->cv_stack_len - 2) * BLAKE3_OUT_LEN];
416  output_t output = parent_output(parent_node, self->key, self->chunk.flags);
417  output_chaining_value(&output, parent_node);
418  self->cv_stack_len -= 1;
419  }
420 }
421 
422 // In reference_impl.rs, we merge the new CV with existing CVs from the stack
423 // before pushing it. We can do that because we know more input is coming, so
424 // we know none of the merges are root.
425 //
426 // This setting is different. We want to feed as much input as possible to
427 // compress_subtree_wide(), without setting aside anything for the chunk_state.
428 // If the user gives us 64 KiB, we want to parallelize over all 64 KiB at once
429 // as a single subtree, if at all possible.
430 //
431 // This leads to two problems:
432 // 1) This 64 KiB input might be the only call that ever gets made to update.
433 // In this case, the root node of the 64 KiB subtree would be the root node
434 // of the whole tree, and it would need to be ROOT finalized. We can't
435 // compress it until we know.
436 // 2) This 64 KiB input might complete a larger tree, whose root node is
437 // similarly going to be the the root of the whole tree. For example, maybe
438 // we have 196 KiB (that is, 128 + 64) hashed so far. We can't compress the
439 // node at the root of the 256 KiB subtree until we know how to finalize it.
440 //
441 // The second problem is solved with "lazy merging". That is, when we're about
442 // to add a CV to the stack, we don't merge it with anything first, as the
443 // reference impl does. Instead we do merges using the *previous* CV that was
444 // added, which is sitting on top of the stack, and we put the new CV
445 // (unmerged) on top of the stack afterwards. This guarantees that we never
446 // merge the root node until finalize().
447 //
448 // Solving the first problem requires an additional tool,
449 // compress_subtree_to_parent_node(). That function always returns the top
450 // *two* chaining values of the subtree it's compressing. We then do lazy
451 // merging with each of them separately, so that the second CV will always
452 // remain unmerged. (That also helps us support extendable output when we're
453 // hashing an input all-at-once.)
454 INLINE void hasher_push_cv(blake3_hasher *self, uint8_t new_cv[BLAKE3_OUT_LEN],
455  uint64_t chunk_counter) {
456  hasher_merge_cv_stack(self, chunk_counter);
457  memcpy(&self->cv_stack[self->cv_stack_len * BLAKE3_OUT_LEN], new_cv,
459  self->cv_stack_len += 1;
460 }
461 
463  size_t input_len) {
464  // Explicitly checking for zero avoids causing UB by passing a null pointer
465  // to memcpy. This comes up in practice with things like:
466  // std::vector<uint8_t> v;
467  // blake3_hasher_update(&hasher, v.data(), v.size());
468  if (input_len == 0) {
469  return;
470  }
471 
472  const uint8_t *input_bytes = (const uint8_t *)input;
473 
474  // If we have some partial chunk bytes in the internal chunk_state, we need
475  // to finish that chunk first.
476  if (chunk_state_len(&self->chunk) > 0) {
477  size_t take = BLAKE3_CHUNK_LEN - chunk_state_len(&self->chunk);
478  if (take > input_len) {
479  take = input_len;
480  }
481  chunk_state_update(&self->chunk, input_bytes, take);
482  input_bytes += take;
483  input_len -= take;
484  // If we've filled the current chunk and there's more coming, finalize this
485  // chunk and proceed. In this case we know it's not the root.
486  if (input_len > 0) {
487  output_t output = chunk_state_output(&self->chunk);
488  uint8_t chunk_cv[32];
489  output_chaining_value(&output, chunk_cv);
490  hasher_push_cv(self, chunk_cv, self->chunk.chunk_counter);
491  chunk_state_reset(&self->chunk, self->key, self->chunk.chunk_counter + 1);
492  } else {
493  return;
494  }
495  }
496 
497  // Now the chunk_state is clear, and we have more input. If there's more than
498  // a single chunk (so, definitely not the root chunk), hash the largest whole
499  // subtree we can, with the full benefits of SIMD (and maybe in the future,
500  // multi-threading) parallelism. Two restrictions:
501  // - The subtree has to be a power-of-2 number of chunks. Only subtrees along
502  // the right edge can be incomplete, and we don't know where the right edge
503  // is going to be until we get to finalize().
504  // - The subtree must evenly divide the total number of chunks up until this
505  // point (if total is not 0). If the current incomplete subtree is only
506  // waiting for 1 more chunk, we can't hash a subtree of 4 chunks. We have
507  // to complete the current subtree first.
508  // Because we might need to break up the input to form powers of 2, or to
509  // evenly divide what we already have, this part runs in a loop.
510  while (input_len > BLAKE3_CHUNK_LEN) {
511  size_t subtree_len = round_down_to_power_of_2(input_len);
512  uint64_t count_so_far = self->chunk.chunk_counter * BLAKE3_CHUNK_LEN;
513  // Shrink the subtree_len until it evenly divides the count so far. We know
514  // that subtree_len itself is a power of 2, so we can use a bitmasking
515  // trick instead of an actual remainder operation. (Note that if the caller
516  // consistently passes power-of-2 inputs of the same size, as is hopefully
517  // typical, this loop condition will always fail, and subtree_len will
518  // always be the full length of the input.)
519  //
520  // An aside: We don't have to shrink subtree_len quite this much. For
521  // example, if count_so_far is 1, we could pass 2 chunks to
522  // compress_subtree_to_parent_node. Since we'll get 2 CVs back, we'll still
523  // get the right answer in the end, and we might get to use 2-way SIMD
524  // parallelism. The problem with this optimization, is that it gets us
525  // stuck always hashing 2 chunks. The total number of chunks will remain
526  // odd, and we'll never graduate to higher degrees of parallelism. See
527  // https://github.com/BLAKE3-team/BLAKE3/issues/69.
528  while ((((uint64_t)(subtree_len - 1)) & count_so_far) != 0) {
529  subtree_len /= 2;
530  }
531  // The shrunken subtree_len might now be 1 chunk long. If so, hash that one
532  // chunk by itself. Otherwise, compress the subtree into a pair of CVs.
533  uint64_t subtree_chunks = subtree_len / BLAKE3_CHUNK_LEN;
534  if (subtree_len <= BLAKE3_CHUNK_LEN) {
535  blake3_chunk_state chunk_state;
536  chunk_state_init(&chunk_state, self->key, self->chunk.flags);
537  chunk_state.chunk_counter = self->chunk.chunk_counter;
538  chunk_state_update(&chunk_state, input_bytes, subtree_len);
539  output_t output = chunk_state_output(&chunk_state);
540  uint8_t cv[BLAKE3_OUT_LEN];
542  hasher_push_cv(self, cv, chunk_state.chunk_counter);
543  } else {
544  // This is the high-performance happy path, though getting here depends
545  // on the caller giving us a long enough input.
546  uint8_t cv_pair[2 * BLAKE3_OUT_LEN];
547  compress_subtree_to_parent_node(input_bytes, subtree_len, self->key,
548  self->chunk.chunk_counter,
549  self->chunk.flags, cv_pair);
550  hasher_push_cv(self, cv_pair, self->chunk.chunk_counter);
551  hasher_push_cv(self, &cv_pair[BLAKE3_OUT_LEN],
552  self->chunk.chunk_counter + (subtree_chunks / 2));
553  }
554  self->chunk.chunk_counter += subtree_chunks;
555  input_bytes += subtree_len;
556  input_len -= subtree_len;
557  }
558 
559  // If there's any remaining input less than a full chunk, add it to the chunk
560  // state. In that case, also do a final merge loop to make sure the subtree
561  // stack doesn't contain any unmerged pairs. The remaining input means we
562  // know these merges are non-root. This merge loop isn't strictly necessary
563  // here, because hasher_push_chunk_cv already does its own merge loop, but it
564  // simplifies blake3_hasher_finalize below.
565  if (input_len > 0) {
566  chunk_state_update(&self->chunk, input_bytes, input_len);
567  hasher_merge_cv_stack(self, self->chunk.chunk_counter);
568  }
569 }
570 
571 void llvm_blake3_hasher_finalize(const blake3_hasher *self, uint8_t *out,
572  size_t out_len) {
573  llvm_blake3_hasher_finalize_seek(self, 0, out, out_len);
574 #if LLVM_MEMORY_SANITIZER_BUILD
575  // Avoid false positives due to uninstrumented assembly code.
576  __msan_unpoison(out, out_len);
577 #endif
578 }
579 
581  uint8_t *out, size_t out_len) {
582  // Explicitly checking for zero avoids causing UB by passing a null pointer
583  // to memcpy. This comes up in practice with things like:
584  // std::vector<uint8_t> v;
585  // blake3_hasher_finalize(&hasher, v.data(), v.size());
586  if (out_len == 0) {
587  return;
588  }
589 
590  // If the subtree stack is empty, then the current chunk is the root.
591  if (self->cv_stack_len == 0) {
592  output_t output = chunk_state_output(&self->chunk);
593  output_root_bytes(&output, seek, out, out_len);
594  return;
595  }
596  // If there are any bytes in the chunk state, finalize that chunk and do a
597  // roll-up merge between that chunk hash and every subtree in the stack. In
598  // this case, the extra merge loop at the end of blake3_hasher_update
599  // guarantees that none of the subtrees in the stack need to be merged with
600  // each other first. Otherwise, if there are no bytes in the chunk state,
601  // then the top of the stack is a chunk hash, and we start the merge from
602  // that.
604  size_t cvs_remaining;
605  if (chunk_state_len(&self->chunk) > 0) {
606  cvs_remaining = self->cv_stack_len;
607  output = chunk_state_output(&self->chunk);
608  } else {
609  // There are always at least 2 CVs in the stack in this case.
610  cvs_remaining = self->cv_stack_len - 2;
611  output = parent_output(&self->cv_stack[cvs_remaining * 32], self->key,
612  self->chunk.flags);
613  }
614  while (cvs_remaining > 0) {
615  cvs_remaining -= 1;
616  uint8_t parent_block[BLAKE3_BLOCK_LEN];
617  memcpy(parent_block, &self->cv_stack[cvs_remaining * 32], 32);
618  output_chaining_value(&output, &parent_block[32]);
619  output = parent_output(parent_block, self->key, self->chunk.flags);
620  }
621  output_root_bytes(&output, seek, out, out_len);
622 }
623 
625  chunk_state_reset(&self->chunk, self->key, 0);
626  self->cv_stack_len = 0;
627 }
block
we get the following basic block
Definition: README_ALTIVEC.txt:95
compress_parents_parallel
INLINE size_t compress_parents_parallel(const uint8_t *child_chaining_values, size_t num_chaining_values, const uint32_t key[8], uint8_t flags, uint8_t *out)
Definition: blake3.c:219
DERIVE_KEY_MATERIAL
@ DERIVE_KEY_MATERIAL
Definition: blake3_impl.h:32
compress_subtree_to_parent_node
INLINE void compress_subtree_to_parent_node(const uint8_t *input, size_t input_len, const uint32_t key[8], uint64_t chunk_counter, uint8_t flags, uint8_t out[2 *BLAKE3_OUT_LEN])
Definition: blake3.c:340
BLAKE3_KEY_LEN
#define BLAKE3_KEY_LEN
Definition: blake3_impl.h:16
llvm_blake3_hasher_init_keyed
void llvm_blake3_hasher_init_keyed(blake3_hasher *self, const uint8_t key[BLAKE3_KEY_LEN])
Definition: blake3.c:378
output_t
Definition: blake3.c:61
output
Current output
Definition: README.txt:1350
BLAKE3_BLOCK_LEN
#define BLAKE3_BLOCK_LEN
Definition: blake3_impl.h:18
hasher_push_cv
INLINE void hasher_push_cv(blake3_hasher *self, uint8_t new_cv[BLAKE3_OUT_LEN], uint64_t chunk_counter)
Definition: blake3.c:454
blake3_compress_subtree_wide
static size_t blake3_compress_subtree_wide(const uint8_t *input, size_t input_len, const uint32_t key[8], uint64_t chunk_counter, uint8_t flags, uint8_t *out)
Definition: blake3.c:271
CHUNK_END
@ CHUNK_END
Definition: blake3_impl.h:27
ret
to esp esp setne al movzbw ax esp setg cl movzbw cx cmove cx cl jne LBB1_2 esp ret(also really horrible code on ppc). This is due to the expand code for 64-bit compares. GCC produces multiple branches
llvm_blake3_hasher_reset
void llvm_blake3_hasher_reset(blake3_hasher *self)
Definition: blake3.c:624
size_t
chunk_state_init
INLINE void chunk_state_init(blake3_chunk_state *self, const uint32_t key[8], uint8_t flags)
Definition: blake3.c:17
output_t::counter
uint64_t counter
Definition: blake3.c:63
llvm_blake3_version
const char * llvm_blake3_version(void)
Definition: blake3.c:15
blake3_chunk_state
#define blake3_chunk_state
Definition: blake3_impl.h:22
llvm_blake3_hasher_finalize_seek
void llvm_blake3_hasher_finalize_seek(const blake3_hasher *self, uint64_t seek, uint8_t *out, size_t out_len)
Definition: blake3.c:580
output_t::flags
uint8_t flags
Definition: blake3.c:66
llvm_blake3_hasher_init_derive_key
void llvm_blake3_hasher_init_derive_key(blake3_hasher *self, const char *context)
Definition: blake3.c:397
MAX_SIMD_DEGREE_OR_2
#define MAX_SIMD_DEGREE_OR_2
Definition: blake3_impl.h:83
MAX_SIMD_DEGREE
#define MAX_SIMD_DEGREE
Definition: blake3_impl.h:78
BLAKE3_OUT_LEN
#define BLAKE3_OUT_LEN
Definition: blake3_impl.h:17
llvm_blake3_hasher_init_derive_key_raw
void llvm_blake3_hasher_init_derive_key_raw(blake3_hasher *self, const void *context, size_t context_len)
Definition: blake3.c:385
parent_output
INLINE output_t parent_output(const uint8_t block[BLAKE3_BLOCK_LEN], const uint32_t key[8], uint8_t flags)
Definition: blake3.c:156
popcnt
INLINE unsigned int popcnt(uint64_t x)
Definition: blake3_impl.h:131
chunk_state_fill_buf
INLINE size_t chunk_state_fill_buf(blake3_chunk_state *self, const uint8_t *input, size_t input_len)
Definition: blake3.c:41
BLAKE3_VERSION_STRING
#define BLAKE3_VERSION_STRING
Definition: blake3_impl.h:15
input
The initial backend is deliberately restricted to z10 We should add support for later architectures at some point If an asm ties an i32 r result to an i64 input
Definition: README.txt:10
ROOT
@ ROOT
Definition: blake3_impl.h:29
llvm_blake3_hasher_update
void llvm_blake3_hasher_update(blake3_hasher *self, const void *input, size_t input_len)
Definition: blake3.c:462
chunk_state_output
INLINE output_t chunk_state_output(const blake3_chunk_state *self)
Definition: blake3.c:149
blake3_hash_many
void blake3_hash_many(const uint8_t *const *inputs, size_t num_inputs, size_t blocks, const uint32_t key[8], uint64_t counter, bool increment_counter, uint8_t flags, uint8_t flags_start, uint8_t flags_end, uint8_t *out)
Definition: blake3_dispatch.c:195
hasher_init_base
INLINE void hasher_init_base(blake3_hasher *self, const uint32_t key[8], uint8_t flags)
Definition: blake3.c:369
output_root_bytes
INLINE void output_root_bytes(const output_t *self, uint64_t seek, uint8_t *out, size_t out_len)
Definition: blake3.c:96
chunk_state_len
INLINE size_t chunk_state_len(const blake3_chunk_state *self)
Definition: blake3.c:36
uint64_t
DERIVE_KEY_CONTEXT
@ DERIVE_KEY_CONTEXT
Definition: blake3_impl.h:31
blake3_compress_xof
void blake3_compress_xof(const uint32_t cv[8], const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len, uint64_t counter, uint8_t flags, uint8_t out[64])
Definition: blake3_dispatch.c:166
assert
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
memcpy
<%struct.s * > cast struct s *S to sbyte *< sbyte * > sbyte uint cast struct s *agg result to sbyte *< sbyte * > sbyte uint cast struct s *memtmp to sbyte *< sbyte * > sbyte uint ret void llc ends up issuing two memcpy or custom lower memcpy(of small size) to be ldmia/stmia. I think option 2 is better but the current register allocator cannot allocate a chunk of registers at a time. A feasible temporary solution is to use specific physical registers at the lowering time for small(<
chunk_state_update
INLINE void chunk_state_update(blake3_chunk_state *self, const uint8_t *input, size_t input_len)
Definition: blake3.c:119
make_output
INLINE output_t make_output(const uint32_t input_cv[8], const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len, uint64_t counter, uint8_t flags)
Definition: blake3.c:69
output_t::block_len
uint8_t block_len
Definition: blake3.c:65
__msan_unpoison
#define __msan_unpoison(p, size)
Definition: Compiler.h:408
uint32_t
BLAKE3_CHUNK_LEN
#define BLAKE3_CHUNK_LEN
Definition: blake3_impl.h:19
load_key_words
INLINE void load_key_words(const uint8_t key[BLAKE3_KEY_LEN], uint32_t key_words[8])
Definition: blake3_impl.h:162
PARENT
@ PARENT
Definition: blake3_impl.h:28
chunk_state_maybe_start_flag
INLINE uint8_t chunk_state_maybe_start_flag(const blake3_chunk_state *self)
Definition: blake3.c:53
blake3_hasher
#define blake3_hasher
Definition: blake3_impl.h:21
llvm_blake3_hasher_init
void llvm_blake3_hasher_init(blake3_hasher *self)
Definition: blake3.c:376
CHUNK_START
@ CHUNK_START
Definition: blake3_impl.h:26
IV
static const uint32_t IV[8]
Definition: blake3_impl.h:85
hasher_merge_cv_stack
INLINE void hasher_merge_cv_stack(blake3_hasher *self, uint64_t total_len)
Definition: blake3.c:411
round_down_to_power_of_2
INLINE uint64_t round_down_to_power_of_2(uint64_t x)
Definition: blake3_impl.h:146
blake3_impl.h
llvm_blake3_hasher_finalize
void llvm_blake3_hasher_finalize(const blake3_hasher *self, uint8_t *out, size_t out_len)
Definition: blake3.c:571
output_chaining_value
INLINE void output_chaining_value(const output_t *self, uint8_t cv[32])
Definition: blake3.c:88
blake3_compress_in_place
void blake3_compress_in_place(uint32_t cv[8], const uint8_t block[BLAKE3_BLOCK_LEN], uint8_t block_len, uint64_t counter, uint8_t flags)
Definition: blake3_dispatch.c:137
chunk_state_reset
INLINE void chunk_state_reset(blake3_chunk_state *self, const uint32_t key[8], uint64_t chunk_counter)
Definition: blake3.c:27
compress_chunks_parallel
INLINE size_t compress_chunks_parallel(const uint8_t *input, size_t input_len, const uint32_t key[8], uint64_t chunk_counter, uint8_t flags, uint8_t *out)
Definition: blake3.c:175
KEYED_HASH
@ KEYED_HASH
Definition: blake3_impl.h:30
left_len
INLINE size_t left_len(size_t content_len)
Definition: blake3.c:164
store_cv_words
INLINE void store_cv_words(uint8_t bytes_out[32], uint32_t cv_words[8])
Definition: blake3_impl.h:182
blake3_simd_degree
size_t blake3_simd_degree(void)
Definition: blake3_dispatch.c:248
INLINE
#define INLINE
Definition: blake3_impl.h:40