The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/openzfs/module/icp/algs/blake3/blake3.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * CDDL HEADER START
    3  *
    4  * The contents of this file are subject to the terms of the
    5  * Common Development and Distribution License (the "License").
    6  * You may not use this file except in compliance with the License.
    7  *
    8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
    9  * or https://opensource.org/licenses/CDDL-1.0.
   10  * See the License for the specific language governing permissions
   11  * and limitations under the License.
   12  *
   13  * When distributing Covered Code, include this CDDL HEADER in each
   14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
   15  * If applicable, add the following below this CDDL HEADER, with the
   16  * fields enclosed by brackets "[]" replaced with your own identifying
   17  * information: Portions Copyright [yyyy] [name of copyright owner]
   18  *
   19  * CDDL HEADER END
   20  */
   21 
   22 /*
   23  * Based on BLAKE3 v1.3.1, https://github.com/BLAKE3-team/BLAKE3
   24  * Copyright (c) 2019-2020 Samuel Neves and Jack O'Connor
   25  * Copyright (c) 2021-2022 Tino Reichardt <milky-zfs@mcmilk.de>
   26  */
   27 
   28 #include <sys/zfs_context.h>
   29 #include <sys/blake3.h>
   30 
   31 #include "blake3_impl.h"
   32 
   33 /*
   34  * We need 1056 byte stack for blake3_compress_subtree_wide()
   35  * - we define this pragma to make gcc happy
   36  */
   37 #if defined(__GNUC__)
   38 #pragma GCC diagnostic ignored "-Wframe-larger-than="
   39 #endif
   40 
   41 /* internal used */
   42 typedef struct {
   43         uint32_t input_cv[8];
   44         uint64_t counter;
   45         uint8_t block[BLAKE3_BLOCK_LEN];
   46         uint8_t block_len;
   47         uint8_t flags;
   48 } output_t;
   49 
   50 /* internal flags */
   51 enum blake3_flags {
   52         CHUNK_START             = 1 << 0,
   53         CHUNK_END               = 1 << 1,
   54         PARENT                  = 1 << 2,
   55         ROOT                    = 1 << 3,
   56         KEYED_HASH              = 1 << 4,
   57         DERIVE_KEY_CONTEXT      = 1 << 5,
   58         DERIVE_KEY_MATERIAL     = 1 << 6,
   59 };
   60 
   61 /* internal start */
   62 static void chunk_state_init(blake3_chunk_state_t *ctx,
   63     const uint32_t key[8], uint8_t flags)
   64 {
   65         memcpy(ctx->cv, key, BLAKE3_KEY_LEN);
   66         ctx->chunk_counter = 0;
   67         memset(ctx->buf, 0, BLAKE3_BLOCK_LEN);
   68         ctx->buf_len = 0;
   69         ctx->blocks_compressed = 0;
   70         ctx->flags = flags;
   71 }
   72 
   73 static void chunk_state_reset(blake3_chunk_state_t *ctx,
   74     const uint32_t key[8], uint64_t chunk_counter)
   75 {
   76         memcpy(ctx->cv, key, BLAKE3_KEY_LEN);
   77         ctx->chunk_counter = chunk_counter;
   78         ctx->blocks_compressed = 0;
   79         memset(ctx->buf, 0, BLAKE3_BLOCK_LEN);
   80         ctx->buf_len = 0;
   81 }
   82 
   83 static size_t chunk_state_len(const blake3_chunk_state_t *ctx)
   84 {
   85         return (BLAKE3_BLOCK_LEN * (size_t)ctx->blocks_compressed) +
   86             ((size_t)ctx->buf_len);
   87 }
   88 
   89 static size_t chunk_state_fill_buf(blake3_chunk_state_t *ctx,
   90     const uint8_t *input, size_t input_len)
   91 {
   92         size_t take = BLAKE3_BLOCK_LEN - ((size_t)ctx->buf_len);
   93         if (take > input_len) {
   94                 take = input_len;
   95         }
   96         uint8_t *dest = ctx->buf + ((size_t)ctx->buf_len);
   97         memcpy(dest, input, take);
   98         ctx->buf_len += (uint8_t)take;
   99         return (take);
  100 }
  101 
  102 static uint8_t chunk_state_maybe_start_flag(const blake3_chunk_state_t *ctx)
  103 {
  104         if (ctx->blocks_compressed == 0) {
  105                 return (CHUNK_START);
  106         } else {
  107                 return (0);
  108         }
  109 }
  110 
  111 static output_t make_output(const uint32_t input_cv[8],
  112     const uint8_t *block, uint8_t block_len,
  113     uint64_t counter, uint8_t flags)
  114 {
  115         output_t ret;
  116         memcpy(ret.input_cv, input_cv, 32);
  117         memcpy(ret.block, block, BLAKE3_BLOCK_LEN);
  118         ret.block_len = block_len;
  119         ret.counter = counter;
  120         ret.flags = flags;
  121         return (ret);
  122 }
  123 
  124 /*
  125  * Chaining values within a given chunk (specifically the compress_in_place
  126  * interface) are represented as words. This avoids unnecessary bytes<->words
  127  * conversion overhead in the portable implementation. However, the hash_many
  128  * interface handles both user input and parent node blocks, so it accepts
  129  * bytes. For that reason, chaining values in the CV stack are represented as
  130  * bytes.
  131  */
  132 static void output_chaining_value(const blake3_ops_t *ops,
  133     const output_t *ctx, uint8_t cv[32])
  134 {
  135         uint32_t cv_words[8];
  136         memcpy(cv_words, ctx->input_cv, 32);
  137         ops->compress_in_place(cv_words, ctx->block, ctx->block_len,
  138             ctx->counter, ctx->flags);
  139         store_cv_words(cv, cv_words);
  140 }
  141 
  142 static void output_root_bytes(const blake3_ops_t *ops, const output_t *ctx,
  143     uint64_t seek, uint8_t *out, size_t out_len)
  144 {
  145         uint64_t output_block_counter = seek / 64;
  146         size_t offset_within_block = seek % 64;
  147         uint8_t wide_buf[64];
  148         while (out_len > 0) {
  149                 ops->compress_xof(ctx->input_cv, ctx->block, ctx->block_len,
  150                     output_block_counter, ctx->flags | ROOT, wide_buf);
  151                 size_t available_bytes = 64 - offset_within_block;
  152                 size_t memcpy_len;
  153                 if (out_len > available_bytes) {
  154                         memcpy_len = available_bytes;
  155                 } else {
  156                         memcpy_len = out_len;
  157                 }
  158                 memcpy(out, wide_buf + offset_within_block, memcpy_len);
  159                 out += memcpy_len;
  160                 out_len -= memcpy_len;
  161                 output_block_counter += 1;
  162                 offset_within_block = 0;
  163         }
  164 }
  165 
  166 static void chunk_state_update(const blake3_ops_t *ops,
  167     blake3_chunk_state_t *ctx, const uint8_t *input, size_t input_len)
  168 {
  169         if (ctx->buf_len > 0) {
  170                 size_t take = chunk_state_fill_buf(ctx, input, input_len);
  171                 input += take;
  172                 input_len -= take;
  173                 if (input_len > 0) {
  174                         ops->compress_in_place(ctx->cv, ctx->buf,
  175                             BLAKE3_BLOCK_LEN, ctx->chunk_counter,
  176                             ctx->flags|chunk_state_maybe_start_flag(ctx));
  177                         ctx->blocks_compressed += 1;
  178                         ctx->buf_len = 0;
  179                         memset(ctx->buf, 0, BLAKE3_BLOCK_LEN);
  180                 }
  181         }
  182 
  183         while (input_len > BLAKE3_BLOCK_LEN) {
  184                 ops->compress_in_place(ctx->cv, input, BLAKE3_BLOCK_LEN,
  185                     ctx->chunk_counter,
  186                     ctx->flags|chunk_state_maybe_start_flag(ctx));
  187                 ctx->blocks_compressed += 1;
  188                 input += BLAKE3_BLOCK_LEN;
  189                 input_len -= BLAKE3_BLOCK_LEN;
  190         }
  191 
  192         chunk_state_fill_buf(ctx, input, input_len);
  193 }
  194 
  195 static output_t chunk_state_output(const blake3_chunk_state_t *ctx)
  196 {
  197         uint8_t block_flags =
  198             ctx->flags | chunk_state_maybe_start_flag(ctx) | CHUNK_END;
  199         return (make_output(ctx->cv, ctx->buf, ctx->buf_len, ctx->chunk_counter,
  200             block_flags));
  201 }
  202 
  203 static output_t parent_output(const uint8_t block[BLAKE3_BLOCK_LEN],
  204     const uint32_t key[8], uint8_t flags)
  205 {
  206         return (make_output(key, block, BLAKE3_BLOCK_LEN, 0, flags | PARENT));
  207 }
  208 
  209 /*
  210  * Given some input larger than one chunk, return the number of bytes that
  211  * should go in the left subtree. This is the largest power-of-2 number of
  212  * chunks that leaves at least 1 byte for the right subtree.
  213  */
  214 static size_t left_len(size_t content_len)
  215 {
  216         /*
  217          * Subtract 1 to reserve at least one byte for the right side.
  218          * content_len
  219          * should always be greater than BLAKE3_CHUNK_LEN.
  220          */
  221         size_t full_chunks = (content_len - 1) / BLAKE3_CHUNK_LEN;
  222         return (round_down_to_power_of_2(full_chunks) * BLAKE3_CHUNK_LEN);
  223 }
  224 
  225 /*
  226  * Use SIMD parallelism to hash up to MAX_SIMD_DEGREE chunks at the same time
  227  * on a single thread. Write out the chunk chaining values and return the
  228  * number of chunks hashed. These chunks are never the root and never empty;
  229  * those cases use a different codepath.
  230  */
  231 static size_t compress_chunks_parallel(const blake3_ops_t *ops,
  232     const uint8_t *input, size_t input_len, const uint32_t key[8],
  233     uint64_t chunk_counter, uint8_t flags, uint8_t *out)
  234 {
  235         const uint8_t *chunks_array[MAX_SIMD_DEGREE];
  236         size_t input_position = 0;
  237         size_t chunks_array_len = 0;
  238         while (input_len - input_position >= BLAKE3_CHUNK_LEN) {
  239                 chunks_array[chunks_array_len] = &input[input_position];
  240                 input_position += BLAKE3_CHUNK_LEN;
  241                 chunks_array_len += 1;
  242         }
  243 
  244         ops->hash_many(chunks_array, chunks_array_len, BLAKE3_CHUNK_LEN /
  245             BLAKE3_BLOCK_LEN, key, chunk_counter, B_TRUE, flags, CHUNK_START,
  246             CHUNK_END, out);
  247 
  248         /*
  249          * Hash the remaining partial chunk, if there is one. Note that the
  250          * empty chunk (meaning the empty message) is a different codepath.
  251          */
  252         if (input_len > input_position) {
  253                 uint64_t counter = chunk_counter + (uint64_t)chunks_array_len;
  254                 blake3_chunk_state_t chunk_state;
  255                 chunk_state_init(&chunk_state, key, flags);
  256                 chunk_state.chunk_counter = counter;
  257                 chunk_state_update(ops, &chunk_state, &input[input_position],
  258                     input_len - input_position);
  259                 output_t output = chunk_state_output(&chunk_state);
  260                 output_chaining_value(ops, &output, &out[chunks_array_len *
  261                     BLAKE3_OUT_LEN]);
  262                 return (chunks_array_len + 1);
  263         } else {
  264                 return (chunks_array_len);
  265         }
  266 }
  267 
  268 /*
  269  * Use SIMD parallelism to hash up to MAX_SIMD_DEGREE parents at the same time
  270  * on a single thread. Write out the parent chaining values and return the
  271  * number of parents hashed. (If there's an odd input chaining value left over,
  272  * return it as an additional output.) These parents are never the root and
  273  * never empty; those cases use a different codepath.
  274  */
  275 static size_t compress_parents_parallel(const blake3_ops_t *ops,
  276     const uint8_t *child_chaining_values, size_t num_chaining_values,
  277     const uint32_t key[8], uint8_t flags, uint8_t *out)
  278 {
  279         const uint8_t *parents_array[MAX_SIMD_DEGREE_OR_2] = {0};
  280         size_t parents_array_len = 0;
  281 
  282         while (num_chaining_values - (2 * parents_array_len) >= 2) {
  283                 parents_array[parents_array_len] = &child_chaining_values[2 *
  284                     parents_array_len * BLAKE3_OUT_LEN];
  285                 parents_array_len += 1;
  286         }
  287 
  288         ops->hash_many(parents_array, parents_array_len, 1, key, 0, B_FALSE,
  289             flags | PARENT, 0, 0, out);
  290 
  291         /* If there's an odd child left over, it becomes an output. */
  292         if (num_chaining_values > 2 * parents_array_len) {
  293                 memcpy(&out[parents_array_len * BLAKE3_OUT_LEN],
  294                     &child_chaining_values[2 * parents_array_len *
  295                     BLAKE3_OUT_LEN], BLAKE3_OUT_LEN);
  296                 return (parents_array_len + 1);
  297         } else {
  298                 return (parents_array_len);
  299         }
  300 }
  301 
  302 /*
  303  * The wide helper function returns (writes out) an array of chaining values
  304  * and returns the length of that array. The number of chaining values returned
  305  * is the dyanmically detected SIMD degree, at most MAX_SIMD_DEGREE. Or fewer,
  306  * if the input is shorter than that many chunks. The reason for maintaining a
  307  * wide array of chaining values going back up the tree, is to allow the
  308  * implementation to hash as many parents in parallel as possible.
  309  *
  310  * As a special case when the SIMD degree is 1, this function will still return
  311  * at least 2 outputs. This guarantees that this function doesn't perform the
  312  * root compression. (If it did, it would use the wrong flags, and also we
  313  * wouldn't be able to implement exendable ouput.) Note that this function is
  314  * not used when the whole input is only 1 chunk long; that's a different
  315  * codepath.
  316  *
  317  * Why not just have the caller split the input on the first update(), instead
  318  * of implementing this special rule? Because we don't want to limit SIMD or
  319  * multi-threading parallelism for that update().
  320  */
  321 static size_t blake3_compress_subtree_wide(const blake3_ops_t *ops,
  322     const uint8_t *input, size_t input_len, const uint32_t key[8],
  323     uint64_t chunk_counter, uint8_t flags, uint8_t *out)
  324 {
  325         /*
  326          * Note that the single chunk case does *not* bump the SIMD degree up
  327          * to 2 when it is 1. If this implementation adds multi-threading in
  328          * the future, this gives us the option of multi-threading even the
  329          * 2-chunk case, which can help performance on smaller platforms.
  330          */
  331         if (input_len <= (size_t)(ops->degree * BLAKE3_CHUNK_LEN)) {
  332                 return (compress_chunks_parallel(ops, input, input_len, key,
  333                     chunk_counter, flags, out));
  334         }
  335 
  336 
  337         /*
  338          * With more than simd_degree chunks, we need to recurse. Start by
  339          * dividing the input into left and right subtrees. (Note that this is
  340          * only optimal as long as the SIMD degree is a power of 2. If we ever
  341          * get a SIMD degree of 3 or something, we'll need a more complicated
  342          * strategy.)
  343          */
  344         size_t left_input_len = left_len(input_len);
  345         size_t right_input_len = input_len - left_input_len;
  346         const uint8_t *right_input = &input[left_input_len];
  347         uint64_t right_chunk_counter = chunk_counter +
  348             (uint64_t)(left_input_len / BLAKE3_CHUNK_LEN);
  349 
  350         /*
  351          * Make space for the child outputs. Here we use MAX_SIMD_DEGREE_OR_2
  352          * to account for the special case of returning 2 outputs when the
  353          * SIMD degree is 1.
  354          */
  355         uint8_t cv_array[2 * MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
  356         size_t degree = ops->degree;
  357         if (left_input_len > BLAKE3_CHUNK_LEN && degree == 1) {
  358 
  359                 /*
  360                  * The special case: We always use a degree of at least two,
  361                  * to make sure there are two outputs. Except, as noted above,
  362                  * at the chunk level, where we allow degree=1. (Note that the
  363                  * 1-chunk-input case is a different codepath.)
  364                  */
  365                 degree = 2;
  366         }
  367         uint8_t *right_cvs = &cv_array[degree * BLAKE3_OUT_LEN];
  368 
  369         /*
  370          * Recurse! If this implementation adds multi-threading support in the
  371          * future, this is where it will go.
  372          */
  373         size_t left_n = blake3_compress_subtree_wide(ops, input, left_input_len,
  374             key, chunk_counter, flags, cv_array);
  375         size_t right_n = blake3_compress_subtree_wide(ops, right_input,
  376             right_input_len, key, right_chunk_counter, flags, right_cvs);
  377 
  378         /*
  379          * The special case again. If simd_degree=1, then we'll have left_n=1
  380          * and right_n=1. Rather than compressing them into a single output,
  381          * return them directly, to make sure we always have at least two
  382          * outputs.
  383          */
  384         if (left_n == 1) {
  385                 memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
  386                 return (2);
  387         }
  388 
  389         /* Otherwise, do one layer of parent node compression. */
  390         size_t num_chaining_values = left_n + right_n;
  391         return compress_parents_parallel(ops, cv_array,
  392             num_chaining_values, key, flags, out);
  393 }
  394 
  395 /*
  396  * Hash a subtree with compress_subtree_wide(), and then condense the resulting
  397  * list of chaining values down to a single parent node. Don't compress that
  398  * last parent node, however. Instead, return its message bytes (the
  399  * concatenated chaining values of its children). This is necessary when the
  400  * first call to update() supplies a complete subtree, because the topmost
  401  * parent node of that subtree could end up being the root. It's also necessary
  402  * for extended output in the general case.
  403  *
  404  * As with compress_subtree_wide(), this function is not used on inputs of 1
  405  * chunk or less. That's a different codepath.
  406  */
  407 static void compress_subtree_to_parent_node(const blake3_ops_t *ops,
  408     const uint8_t *input, size_t input_len, const uint32_t key[8],
  409     uint64_t chunk_counter, uint8_t flags, uint8_t out[2 * BLAKE3_OUT_LEN])
  410 {
  411         uint8_t cv_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN];
  412         size_t num_cvs = blake3_compress_subtree_wide(ops, input, input_len,
  413             key, chunk_counter, flags, cv_array);
  414 
  415         /*
  416          * If MAX_SIMD_DEGREE is greater than 2 and there's enough input,
  417          * compress_subtree_wide() returns more than 2 chaining values. Condense
  418          * them into 2 by forming parent nodes repeatedly.
  419          */
  420         uint8_t out_array[MAX_SIMD_DEGREE_OR_2 * BLAKE3_OUT_LEN / 2];
  421         while (num_cvs > 2) {
  422                 num_cvs = compress_parents_parallel(ops, cv_array, num_cvs, key,
  423                     flags, out_array);
  424                 memcpy(cv_array, out_array, num_cvs * BLAKE3_OUT_LEN);
  425         }
  426         memcpy(out, cv_array, 2 * BLAKE3_OUT_LEN);
  427 }
  428 
  429 static void hasher_init_base(BLAKE3_CTX *ctx, const uint32_t key[8],
  430     uint8_t flags)
  431 {
  432         memcpy(ctx->key, key, BLAKE3_KEY_LEN);
  433         chunk_state_init(&ctx->chunk, key, flags);
  434         ctx->cv_stack_len = 0;
  435         ctx->ops = blake3_impl_get_ops();
  436 }
  437 
  438 /*
  439  * As described in hasher_push_cv() below, we do "lazy merging", delaying
  440  * merges until right before the next CV is about to be added. This is
  441  * different from the reference implementation. Another difference is that we
  442  * aren't always merging 1 chunk at a time. Instead, each CV might represent
  443  * any power-of-two number of chunks, as long as the smaller-above-larger
  444  * stack order is maintained. Instead of the "count the trailing 0-bits"
  445  * algorithm described in the spec, we use a "count the total number of
  446  * 1-bits" variant that doesn't require us to retain the subtree size of the
  447  * CV on top of the stack. The principle is the same: each CV that should
  448  * remain in the stack is represented by a 1-bit in the total number of chunks
  449  * (or bytes) so far.
  450  */
  451 static void hasher_merge_cv_stack(BLAKE3_CTX *ctx, uint64_t total_len)
  452 {
  453         size_t post_merge_stack_len = (size_t)popcnt(total_len);
  454         while (ctx->cv_stack_len > post_merge_stack_len) {
  455                 uint8_t *parent_node =
  456                     &ctx->cv_stack[(ctx->cv_stack_len - 2) * BLAKE3_OUT_LEN];
  457                 output_t output =
  458                     parent_output(parent_node, ctx->key, ctx->chunk.flags);
  459                 output_chaining_value(ctx->ops, &output, parent_node);
  460                 ctx->cv_stack_len -= 1;
  461         }
  462 }
  463 
  464 /*
  465  * In reference_impl.rs, we merge the new CV with existing CVs from the stack
  466  * before pushing it. We can do that because we know more input is coming, so
  467  * we know none of the merges are root.
  468  *
  469  * This setting is different. We want to feed as much input as possible to
  470  * compress_subtree_wide(), without setting aside anything for the chunk_state.
  471  * If the user gives us 64 KiB, we want to parallelize over all 64 KiB at once
  472  * as a single subtree, if at all possible.
  473  *
  474  * This leads to two problems:
  475  * 1) This 64 KiB input might be the only call that ever gets made to update.
  476  *    In this case, the root node of the 64 KiB subtree would be the root node
  477  *    of the whole tree, and it would need to be ROOT finalized. We can't
  478  *    compress it until we know.
  479  * 2) This 64 KiB input might complete a larger tree, whose root node is
  480  *    similarly going to be the the root of the whole tree. For example, maybe
  481  *    we have 196 KiB (that is, 128 + 64) hashed so far. We can't compress the
  482  *    node at the root of the 256 KiB subtree until we know how to finalize it.
  483  *
  484  * The second problem is solved with "lazy merging". That is, when we're about
  485  * to add a CV to the stack, we don't merge it with anything first, as the
  486  * reference impl does. Instead we do merges using the *previous* CV that was
  487  * added, which is sitting on top of the stack, and we put the new CV
  488  * (unmerged) on top of the stack afterwards. This guarantees that we never
  489  * merge the root node until finalize().
  490  *
  491  * Solving the first problem requires an additional tool,
  492  * compress_subtree_to_parent_node(). That function always returns the top
  493  * *two* chaining values of the subtree it's compressing. We then do lazy
  494  * merging with each of them separately, so that the second CV will always
  495  * remain unmerged. (That also helps us support extendable output when we're
  496  * hashing an input all-at-once.)
  497  */
  498 static void hasher_push_cv(BLAKE3_CTX *ctx, uint8_t new_cv[BLAKE3_OUT_LEN],
  499     uint64_t chunk_counter)
  500 {
  501         hasher_merge_cv_stack(ctx, chunk_counter);
  502         memcpy(&ctx->cv_stack[ctx->cv_stack_len * BLAKE3_OUT_LEN], new_cv,
  503             BLAKE3_OUT_LEN);
  504         ctx->cv_stack_len += 1;
  505 }
  506 
  507 void
  508 Blake3_Init(BLAKE3_CTX *ctx)
  509 {
  510         hasher_init_base(ctx, BLAKE3_IV, 0);
  511 }
  512 
  513 void
  514 Blake3_InitKeyed(BLAKE3_CTX *ctx, const uint8_t key[BLAKE3_KEY_LEN])
  515 {
  516         uint32_t key_words[8];
  517         load_key_words(key, key_words);
  518         hasher_init_base(ctx, key_words, KEYED_HASH);
  519 }
  520 
  521 static void
  522 Blake3_Update2(BLAKE3_CTX *ctx, const void *input, size_t input_len)
  523 {
  524         /*
  525          * Explicitly checking for zero avoids causing UB by passing a null
  526          * pointer to memcpy. This comes up in practice with things like:
  527          *   std::vector<uint8_t> v;
  528          *   blake3_hasher_update(&hasher, v.data(), v.size());
  529          */
  530         if (input_len == 0) {
  531                 return;
  532         }
  533 
  534         const uint8_t *input_bytes = (const uint8_t *)input;
  535 
  536         /*
  537          * If we have some partial chunk bytes in the internal chunk_state, we
  538          * need to finish that chunk first.
  539          */
  540         if (chunk_state_len(&ctx->chunk) > 0) {
  541                 size_t take = BLAKE3_CHUNK_LEN - chunk_state_len(&ctx->chunk);
  542                 if (take > input_len) {
  543                         take = input_len;
  544                 }
  545                 chunk_state_update(ctx->ops, &ctx->chunk, input_bytes, take);
  546                 input_bytes += take;
  547                 input_len -= take;
  548                 /*
  549                  * If we've filled the current chunk and there's more coming,
  550                  * finalize this chunk and proceed. In this case we know it's
  551                  * not the root.
  552                  */
  553                 if (input_len > 0) {
  554                         output_t output = chunk_state_output(&ctx->chunk);
  555                         uint8_t chunk_cv[32];
  556                         output_chaining_value(ctx->ops, &output, chunk_cv);
  557                         hasher_push_cv(ctx, chunk_cv, ctx->chunk.chunk_counter);
  558                         chunk_state_reset(&ctx->chunk, ctx->key,
  559                             ctx->chunk.chunk_counter + 1);
  560                 } else {
  561                         return;
  562                 }
  563         }
  564 
  565         /*
  566          * Now the chunk_state is clear, and we have more input. If there's
  567          * more than a single chunk (so, definitely not the root chunk), hash
  568          * the largest whole subtree we can, with the full benefits of SIMD
  569          * (and maybe in the future, multi-threading) parallelism. Two
  570          * restrictions:
  571          * - The subtree has to be a power-of-2 number of chunks. Only
  572          *   subtrees along the right edge can be incomplete, and we don't know
  573          *   where the right edge is going to be until we get to finalize().
  574          * - The subtree must evenly divide the total number of chunks up
  575          *   until this point (if total is not 0). If the current incomplete
  576          *   subtree is only waiting for 1 more chunk, we can't hash a subtree
  577          *   of 4 chunks. We have to complete the current subtree first.
  578          * Because we might need to break up the input to form powers of 2, or
  579          * to evenly divide what we already have, this part runs in a loop.
  580          */
  581         while (input_len > BLAKE3_CHUNK_LEN) {
  582                 size_t subtree_len = round_down_to_power_of_2(input_len);
  583                 uint64_t count_so_far =
  584                     ctx->chunk.chunk_counter * BLAKE3_CHUNK_LEN;
  585                 /*
  586                  * Shrink the subtree_len until it evenly divides the count so
  587                  * far. We know that subtree_len itself is a power of 2, so we
  588                  * can use a bitmasking trick instead of an actual remainder
  589                  * operation. (Note that if the caller consistently passes
  590                  * power-of-2 inputs of the same size, as is hopefully
  591                  * typical, this loop condition will always fail, and
  592                  * subtree_len will always be the full length of the input.)
  593                  *
  594                  * An aside: We don't have to shrink subtree_len quite this
  595                  * much. For example, if count_so_far is 1, we could pass 2
  596                  * chunks to compress_subtree_to_parent_node. Since we'll get
  597                  * 2 CVs back, we'll still get the right answer in the end,
  598                  * and we might get to use 2-way SIMD parallelism. The problem
  599                  * with this optimization, is that it gets us stuck always
  600                  * hashing 2 chunks. The total number of chunks will remain
  601                  * odd, and we'll never graduate to higher degrees of
  602                  * parallelism. See
  603                  * https://github.com/BLAKE3-team/BLAKE3/issues/69.
  604                  */
  605                 while ((((uint64_t)(subtree_len - 1)) & count_so_far) != 0) {
  606                         subtree_len /= 2;
  607                 }
  608                 /*
  609                  * The shrunken subtree_len might now be 1 chunk long. If so,
  610                  * hash that one chunk by itself. Otherwise, compress the
  611                  * subtree into a pair of CVs.
  612                  */
  613                 uint64_t subtree_chunks = subtree_len / BLAKE3_CHUNK_LEN;
  614                 if (subtree_len <= BLAKE3_CHUNK_LEN) {
  615                         blake3_chunk_state_t chunk_state;
  616                         chunk_state_init(&chunk_state, ctx->key,
  617                             ctx->chunk.flags);
  618                         chunk_state.chunk_counter = ctx->chunk.chunk_counter;
  619                         chunk_state_update(ctx->ops, &chunk_state, input_bytes,
  620                             subtree_len);
  621                         output_t output = chunk_state_output(&chunk_state);
  622                         uint8_t cv[BLAKE3_OUT_LEN];
  623                         output_chaining_value(ctx->ops, &output, cv);
  624                         hasher_push_cv(ctx, cv, chunk_state.chunk_counter);
  625                 } else {
  626                         /*
  627                          * This is the high-performance happy path, though
  628                          * getting here depends on the caller giving us a long
  629                          * enough input.
  630                          */
  631                         uint8_t cv_pair[2 * BLAKE3_OUT_LEN];
  632                         compress_subtree_to_parent_node(ctx->ops, input_bytes,
  633                             subtree_len, ctx->key, ctx-> chunk.chunk_counter,
  634                             ctx->chunk.flags, cv_pair);
  635                         hasher_push_cv(ctx, cv_pair, ctx->chunk.chunk_counter);
  636                         hasher_push_cv(ctx, &cv_pair[BLAKE3_OUT_LEN],
  637                             ctx->chunk.chunk_counter + (subtree_chunks / 2));
  638                 }
  639                 ctx->chunk.chunk_counter += subtree_chunks;
  640                 input_bytes += subtree_len;
  641                 input_len -= subtree_len;
  642         }
  643 
  644         /*
  645          * If there's any remaining input less than a full chunk, add it to
  646          * the chunk state. In that case, also do a final merge loop to make
  647          * sure the subtree stack doesn't contain any unmerged pairs. The
  648          * remaining input means we know these merges are non-root. This merge
  649          * loop isn't strictly necessary here, because hasher_push_chunk_cv
  650          * already does its own merge loop, but it simplifies
  651          * blake3_hasher_finalize below.
  652          */
  653         if (input_len > 0) {
  654                 chunk_state_update(ctx->ops, &ctx->chunk, input_bytes,
  655                     input_len);
  656                 hasher_merge_cv_stack(ctx, ctx->chunk.chunk_counter);
  657         }
  658 }
  659 
  660 void
  661 Blake3_Update(BLAKE3_CTX *ctx, const void *input, size_t todo)
  662 {
  663         size_t done = 0;
  664         const uint8_t *data = input;
  665         const size_t block_max = 1024 * 64;
  666 
  667         /* max feed buffer to leave the stack size small */
  668         while (todo != 0) {
  669                 size_t block = (todo >= block_max) ? block_max : todo;
  670                 Blake3_Update2(ctx, data + done, block);
  671                 done += block;
  672                 todo -= block;
  673         }
  674 }
  675 
  676 void
  677 Blake3_Final(const BLAKE3_CTX *ctx, uint8_t *out)
  678 {
  679         Blake3_FinalSeek(ctx, 0, out, BLAKE3_OUT_LEN);
  680 }
  681 
  682 void
  683 Blake3_FinalSeek(const BLAKE3_CTX *ctx, uint64_t seek, uint8_t *out,
  684     size_t out_len)
  685 {
  686         /*
  687          * Explicitly checking for zero avoids causing UB by passing a null
  688          * pointer to memcpy. This comes up in practice with things like:
  689          *   std::vector<uint8_t> v;
  690          *   blake3_hasher_finalize(&hasher, v.data(), v.size());
  691          */
  692         if (out_len == 0) {
  693                 return;
  694         }
  695         /* If the subtree stack is empty, then the current chunk is the root. */
  696         if (ctx->cv_stack_len == 0) {
  697                 output_t output = chunk_state_output(&ctx->chunk);
  698                 output_root_bytes(ctx->ops, &output, seek, out, out_len);
  699                 return;
  700         }
  701         /*
  702          * If there are any bytes in the chunk state, finalize that chunk and
  703          * do a roll-up merge between that chunk hash and every subtree in the
  704          * stack. In this case, the extra merge loop at the end of
  705          * blake3_hasher_update guarantees that none of the subtrees in the
  706          * stack need to be merged with each other first. Otherwise, if there
  707          * are no bytes in the chunk state, then the top of the stack is a
  708          * chunk hash, and we start the merge from that.
  709          */
  710         output_t output;
  711         size_t cvs_remaining;
  712         if (chunk_state_len(&ctx->chunk) > 0) {
  713                 cvs_remaining = ctx->cv_stack_len;
  714                 output = chunk_state_output(&ctx->chunk);
  715         } else {
  716                 /* There are always at least 2 CVs in the stack in this case. */
  717                 cvs_remaining = ctx->cv_stack_len - 2;
  718                 output = parent_output(&ctx->cv_stack[cvs_remaining * 32],
  719                     ctx->key, ctx->chunk.flags);
  720         }
  721         while (cvs_remaining > 0) {
  722                 cvs_remaining -= 1;
  723                 uint8_t parent_block[BLAKE3_BLOCK_LEN];
  724                 memcpy(parent_block, &ctx->cv_stack[cvs_remaining * 32], 32);
  725                 output_chaining_value(ctx->ops, &output, &parent_block[32]);
  726                 output = parent_output(parent_block, ctx->key,
  727                     ctx->chunk.flags);
  728         }
  729         output_root_bytes(ctx->ops, &output, seek, out, out_len);
  730 }

Cache object: 3b87ea93eb231cb17007ac55573d66c1


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.