The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/random/fenestrasX/fx_pool.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2019 Conrad Meyer <cem@FreeBSD.org>
    5  *
    6  * Redistribution and use in source and binary forms, with or without
    7  * modification, are permitted provided that the following conditions
    8  * are met:
    9  * 1. Redistributions of source code must retain the above copyright
   10  *    notice, this list of conditions and the following disclaimer.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  *
   15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   25  * SUCH DAMAGE.
   26  */
   27 
   28 #include <sys/cdefs.h>
   29 __FBSDID("$FreeBSD$");
   30 
   31 #include <sys/param.h>
   32 #include <sys/domainset.h>
   33 #include <sys/fail.h>
   34 #include <sys/limits.h>
   35 #include <sys/lock.h>
   36 #include <sys/kernel.h>
   37 #include <sys/malloc.h>
   38 #include <sys/mutex.h>
   39 #include <sys/queue.h>
   40 #include <sys/random.h>
   41 #include <sys/sdt.h>
   42 #include <sys/sysctl.h>
   43 #include <sys/systm.h>
   44 #include <sys/taskqueue.h>
   45 
   46 #include <machine/atomic.h>
   47 #include <machine/smp.h>
   48 
   49 #include <dev/random/randomdev.h>
   50 #include <dev/random/random_harvestq.h>
   51 
   52 #include <dev/random/fenestrasX/fx_brng.h>
   53 #include <dev/random/fenestrasX/fx_hash.h>
   54 #include <dev/random/fenestrasX/fx_pool.h>
   55 #include <dev/random/fenestrasX/fx_priv.h>
   56 #include <dev/random/fenestrasX/fx_pub.h>
   57 
   58 /*
   59  * Timer-based reseed interval growth factor and limit in seconds. (§ 3.2)
   60  */
   61 #define FXENT_RESSED_INTVL_GFACT        3
   62 #define FXENT_RESEED_INTVL_MAX          3600
   63 
   64 /*
   65  * Pool reseed schedule.  Initially, only pool 0 is active.  Until the timer
   66  * interval reaches INTVL_MAX, only pool 0 is used.
   67  *
   68  * After reaching INTVL_MAX, pool k is either activated (if inactive) or used
   69  * (if active) every 3^k timer reseeds.  (§ 3.3)
   70  *
   71  * (Entropy harvesting only round robins across active pools.)
   72  */
   73 #define FXENT_RESEED_BASE               3
   74 
   75 /*
   76  * Number of bytes from high quality sources to allocate to pool 0 before
   77  * normal round-robin allocation after each timer reseed. (§ 3.4)
   78  */
   79 #define FXENT_HI_SRC_POOL0_BYTES        32
   80 
   81 /*
   82  * § 3.1
   83  *
   84  * Low sources provide unconditioned entropy, such as mouse movements; high
   85  * sources are assumed to provide high-quality random bytes.  Pull sources are
   86  * those which can be polled, i.e., anything randomdev calls a "random_source."
   87  *
   88  * In the whitepaper, low sources are pull.  For us, at least in the existing
   89  * design, low-quality sources push into some global ring buffer and then get
   90  * forwarded into the RNG by a thread that continually polls.  Presumably their
   91  * design batches low entopy signals in some way (SHA512?) and only requests
   92  * them dynamically on reseed.  I'm not sure what the benefit is vs feeding
   93  * into the pools directly.
   94  */
   95 enum fxrng_ent_access_cls {
   96         FXRNG_PUSH,
   97         FXRNG_PULL,
   98 };
   99 enum fxrng_ent_source_cls {
  100         FXRNG_HI,
  101         FXRNG_LO,
  102         FXRNG_GARBAGE,
  103 };
  104 struct fxrng_ent_cls {
  105         enum fxrng_ent_access_cls       entc_axx_cls;
  106         enum fxrng_ent_source_cls       entc_src_cls;
  107 };
  108 
  109 static const struct fxrng_ent_cls fxrng_hi_pull = {
  110         .entc_axx_cls = FXRNG_PULL,
  111         .entc_src_cls = FXRNG_HI,
  112 };
  113 static const struct fxrng_ent_cls fxrng_hi_push = {
  114         .entc_axx_cls = FXRNG_PUSH,
  115         .entc_src_cls = FXRNG_HI,
  116 };
  117 static const struct fxrng_ent_cls fxrng_lo_push = {
  118         .entc_axx_cls = FXRNG_PUSH,
  119         .entc_src_cls = FXRNG_LO,
  120 };
  121 static const struct fxrng_ent_cls fxrng_garbage = {
  122         .entc_axx_cls = FXRNG_PUSH,
  123         .entc_src_cls = FXRNG_GARBAGE,
  124 };
  125 
  126 /*
  127  * This table is a mapping of randomdev's current source abstractions to the
  128  * designations above; at some point, if the design seems reasonable, it would
  129  * make more sense to pull this up into the abstraction layer instead.
  130  */
  131 static const struct fxrng_ent_char {
  132         const struct fxrng_ent_cls      *entc_cls;
  133 } fxrng_ent_char[ENTROPYSOURCE] = {
  134         [RANDOM_CACHED] = {
  135                 .entc_cls = &fxrng_hi_push,
  136         },
  137         [RANDOM_ATTACH] = {
  138                 .entc_cls = &fxrng_lo_push,
  139         },
  140         [RANDOM_KEYBOARD] = {
  141                 .entc_cls = &fxrng_lo_push,
  142         },
  143         [RANDOM_MOUSE] = {
  144                 .entc_cls = &fxrng_lo_push,
  145         },
  146         [RANDOM_NET_TUN] = {
  147                 .entc_cls = &fxrng_lo_push,
  148         },
  149         [RANDOM_NET_ETHER] = {
  150                 .entc_cls = &fxrng_lo_push,
  151         },
  152         [RANDOM_NET_NG] = {
  153                 .entc_cls = &fxrng_lo_push,
  154         },
  155         [RANDOM_INTERRUPT] = {
  156                 .entc_cls = &fxrng_lo_push,
  157         },
  158         [RANDOM_SWI] = {
  159                 .entc_cls = &fxrng_lo_push,
  160         },
  161         [RANDOM_FS_ATIME] = {
  162                 .entc_cls = &fxrng_lo_push,
  163         },
  164         [RANDOM_UMA] = {
  165                 .entc_cls = &fxrng_lo_push,
  166         },
  167         [RANDOM_CALLOUT] = {
  168                 .entc_cls = &fxrng_lo_push,
  169         },
  170         [RANDOM_PURE_OCTEON] = {
  171                 .entc_cls = &fxrng_hi_push,     /* Could be made pull. */
  172         },
  173         [RANDOM_PURE_SAFE] = {
  174                 .entc_cls = &fxrng_hi_push,
  175         },
  176         [RANDOM_PURE_GLXSB] = {
  177                 .entc_cls = &fxrng_hi_push,
  178         },
  179         [RANDOM_PURE_HIFN] = {
  180                 .entc_cls = &fxrng_hi_push,
  181         },
  182         [RANDOM_PURE_RDRAND] = {
  183                 .entc_cls = &fxrng_hi_pull,
  184         },
  185         [RANDOM_PURE_NEHEMIAH] = {
  186                 .entc_cls = &fxrng_hi_pull,
  187         },
  188         [RANDOM_PURE_RNDTEST] = {
  189                 .entc_cls = &fxrng_garbage,
  190         },
  191         [RANDOM_PURE_VIRTIO] = {
  192                 .entc_cls = &fxrng_hi_pull,
  193         },
  194         [RANDOM_PURE_BROADCOM] = {
  195                 .entc_cls = &fxrng_hi_push,
  196         },
  197         [RANDOM_PURE_CCP] = {
  198                 .entc_cls = &fxrng_hi_pull,
  199         },
  200         [RANDOM_PURE_DARN] = {
  201                 .entc_cls = &fxrng_hi_pull,
  202         },
  203         [RANDOM_PURE_TPM] = {
  204                 .entc_cls = &fxrng_hi_push,
  205         },
  206         [RANDOM_PURE_VMGENID] = {
  207                 .entc_cls = &fxrng_hi_push,
  208         },
  209 };
  210 
  211 /* Useful for single-bit-per-source state. */
  212 BITSET_DEFINE(fxrng_bits, ENTROPYSOURCE);
  213 
  214 /* XXX Borrowed from not-yet-committed D22702. */
  215 #ifndef BIT_TEST_SET_ATOMIC_ACQ
  216 #define BIT_TEST_SET_ATOMIC_ACQ(_s, n, p)       \
  217         (atomic_testandset_acq_long(            \
  218             &(p)->__bits[__bitset_word((_s), (n))], (n)) != 0)
  219 #endif
  220 #define FXENT_TEST_SET_ATOMIC_ACQ(n, p) \
  221         BIT_TEST_SET_ATOMIC_ACQ(ENTROPYSOURCE, n, p)
  222 
  223 /* For special behavior on first-time entropy sources. (§ 3.1) */
  224 static struct fxrng_bits __read_mostly fxrng_seen;
  225 
  226 /* For special behavior for high-entropy sources after a reseed. (§ 3.4) */
  227 _Static_assert(FXENT_HI_SRC_POOL0_BYTES <= UINT8_MAX, "");
  228 static uint8_t __read_mostly fxrng_reseed_seen[ENTROPYSOURCE];
  229 
  230 /* Entropy pools.  Lock order is ENT -> RNG(root) -> RNG(leaf). */
  231 static struct mtx fxent_pool_lk;
  232 MTX_SYSINIT(fx_pool, &fxent_pool_lk, "fx entropy pool lock", MTX_DEF);
  233 #define FXENT_LOCK()            mtx_lock(&fxent_pool_lk)
  234 #define FXENT_UNLOCK()          mtx_unlock(&fxent_pool_lk)
  235 #define FXENT_ASSERT(rng)       mtx_assert(&fxent_pool_lk, MA_OWNED)
  236 #define FXENT_ASSERT_NOT(rng)   mtx_assert(&fxent_pool_lk, MA_NOTOWNED)
  237 static struct fxrng_hash fxent_pool[FXRNG_NPOOLS];
  238 static unsigned __read_mostly fxent_nactpools = 1;
  239 static struct timeout_task fxent_reseed_timer;
  240 static int __read_mostly fxent_timer_ready;
  241 
  242 /*
  243  * Track number of bytes of entropy harvested from high-quality sources prior
  244  * to initial keying.  The idea is to collect more jitter entropy when fewer
  245  * high-quality bytes were available and less if we had other good sources.  We
  246  * want to provide always-on availability but don't necessarily have *any*
  247  * great sources on some platforms.
  248  *
  249  * Like fxrng_ent_char: at some point, if the design seems reasonable, it would
  250  * make more sense to pull this up into the abstraction layer instead.
  251  *
  252  * Jitter entropy is unimplemented for now.
  253  */
  254 static unsigned long fxrng_preseed_ent;
  255 
  256 void
  257 fxrng_pools_init(void)
  258 {
  259         size_t i;
  260 
  261         for (i = 0; i < nitems(fxent_pool); i++)
  262                 fxrng_hash_init(&fxent_pool[i]);
  263 }
  264 
  265 static inline bool
  266 fxrng_hi_source(enum random_entropy_source src)
  267 {
  268         return (fxrng_ent_char[src].entc_cls->entc_src_cls == FXRNG_HI);
  269 }
  270 
  271 /*
  272  * A racy check that this high-entropy source's event should contribute to
  273  * pool0 on the basis of per-source byte count.  The check is racy for two
  274  * reasons:
  275  *   - Performance: The vast majority of the time, we've already taken 32 bytes
  276  *     from any present high quality source and the racy check lets us avoid
  277  *     dirtying the cache for the global array.
  278  *   - Correctness: It's fine that the check is racy.  The failure modes are:
  279  *     • False positive: We will detect when we take the lock.
  280  *     • False negative: We still collect the entropy; it just won't be
  281  *       preferentially placed in pool0 in this case.
  282  */
  283 static inline bool
  284 fxrng_hi_pool0_eligible_racy(enum random_entropy_source src)
  285 {
  286         return (atomic_load_acq_8(&fxrng_reseed_seen[src]) <
  287             FXENT_HI_SRC_POOL0_BYTES);
  288 }
  289 
  290 /*
  291  * Top level entropy processing API from randomdev.
  292  *
  293  * Invoked by the core randomdev subsystem both for preload entropy, "push"
  294  * sources (like interrupts, keyboard, etc) and pull sources (RDRAND, etc).
  295  */
  296 void
  297 fxrng_event_processor(struct harvest_event *event)
  298 {
  299         enum random_entropy_source src;
  300         unsigned pool;
  301         bool first_time, first_32;
  302 
  303         src = event->he_source;
  304 
  305         ASSERT_DEBUG(event->he_size <= sizeof(event->he_entropy),
  306             "%s: he_size: %u > sizeof(he_entropy): %zu", __func__,
  307             (unsigned)event->he_size, sizeof(event->he_entropy));
  308 
  309         /*
  310          * Zero bytes of source entropy doesn't count as observing this source
  311          * for the first time.  We still harvest the counter entropy.
  312          */
  313         first_time = event->he_size > 0 &&
  314             !FXENT_TEST_SET_ATOMIC_ACQ(src, &fxrng_seen);
  315         if (__predict_false(first_time)) {
  316                 /*
  317                  * "The first time [any source] provides entropy, it is used to
  318                  * directly reseed the root PRNG.  The entropy pools are
  319                  * bypassed." (§ 3.1)
  320                  *
  321                  * Unlike Windows, we cannot rely on loader(8) seed material
  322                  * being present, so we perform initial keying in the kernel.
  323                  * We use brng_generation 0 to represent an unkeyed state.
  324                  *
  325                  * Prior to initial keying, it doesn't make sense to try to mix
  326                  * the entropy directly with the root PRNG state, as the root
  327                  * PRNG is unkeyed.  Instead, we collect pre-keying dynamic
  328                  * entropy in pool0 and do not bump the root PRNG seed version
  329                  * or set its key.  Initial keying will incorporate pool0 and
  330                  * bump the brng_generation (seed version).
  331                  *
  332                  * After initial keying, we do directly mix in first-time
  333                  * entropy sources.  We use the root BRNG to generate 32 bytes
  334                  * and use fxrng_hash to mix it with the new entropy source and
  335                  * re-key with the first 256 bits of hash output.
  336                  */
  337                 FXENT_LOCK();
  338                 FXRNG_BRNG_LOCK(&fxrng_root);
  339                 if (__predict_true(fxrng_root.brng_generation > 0)) {
  340                         /* Bypass the pools: */
  341                         FXENT_UNLOCK();
  342                         fxrng_brng_src_reseed(event);
  343                         FXRNG_BRNG_ASSERT_NOT(&fxrng_root);
  344                         return;
  345                 }
  346 
  347                 /*
  348                  * Keying the root PRNG requires both FXENT_LOCK and the PRNG's
  349                  * lock, so we only need to hold on to the pool lock to prevent
  350                  * initial keying without this entropy.
  351                  */
  352                 FXRNG_BRNG_UNLOCK(&fxrng_root);
  353 
  354                 /* Root PRNG hasn't been keyed yet, just accumulate event. */
  355                 fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
  356                     sizeof(event->he_somecounter));
  357                 fxrng_hash_update(&fxent_pool[0], event->he_entropy,
  358                     event->he_size);
  359 
  360                 if (fxrng_hi_source(src)) {
  361                         /* Prevent overflow. */
  362                         if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
  363                                 fxrng_preseed_ent += event->he_size;
  364                 }
  365                 FXENT_UNLOCK();
  366                 return;
  367         }
  368         /* !first_time */
  369 
  370         /*
  371          * "The first 32 bytes produced by a high entropy source after a reseed
  372          * from the pools is always put in pool 0." (§ 3.4)
  373          *
  374          * The first-32-byte tracking data in fxrng_reseed_seen is reset in
  375          * fxent_timer_reseed_npools() below.
  376          */
  377         first_32 = event->he_size > 0 &&
  378             fxrng_hi_source(src) &&
  379             atomic_load_acq_int(&fxent_nactpools) > 1 &&
  380             fxrng_hi_pool0_eligible_racy(src);
  381         if (__predict_false(first_32)) {
  382                 unsigned rem, seen;
  383 
  384                 FXENT_LOCK();
  385                 seen = fxrng_reseed_seen[src];
  386                 if (seen == FXENT_HI_SRC_POOL0_BYTES)
  387                         goto round_robin;
  388 
  389                 rem = FXENT_HI_SRC_POOL0_BYTES - seen;
  390                 rem = MIN(rem, event->he_size);
  391 
  392                 fxrng_reseed_seen[src] = seen + rem;
  393 
  394                 /*
  395                  * We put 'rem' bytes in pool0, and any remaining bytes are
  396                  * round-robin'd across other pools.
  397                  */
  398                 fxrng_hash_update(&fxent_pool[0],
  399                     ((uint8_t *)event->he_entropy) + event->he_size - rem,
  400                     rem);
  401                 if (rem == event->he_size) {
  402                         fxrng_hash_update(&fxent_pool[0], &event->he_somecounter,
  403                             sizeof(event->he_somecounter));
  404                         FXENT_UNLOCK();
  405                         return;
  406                 }
  407 
  408                 /*
  409                  * If fewer bytes were needed than this even provied, We only
  410                  * take the last rem bytes of the entropy buffer and leave the
  411                  * timecounter to be round-robin'd with the remaining entropy.
  412                  */
  413                 event->he_size -= rem;
  414                 goto round_robin;
  415         }
  416         /* !first_32 */
  417 
  418         FXENT_LOCK();
  419 
  420 round_robin:
  421         FXENT_ASSERT();
  422         pool = event->he_destination % fxent_nactpools;
  423         fxrng_hash_update(&fxent_pool[pool], event->he_entropy,
  424             event->he_size);
  425         fxrng_hash_update(&fxent_pool[pool], &event->he_somecounter,
  426             sizeof(event->he_somecounter));
  427 
  428         if (__predict_false(fxrng_hi_source(src) &&
  429             atomic_load_acq_64(&fxrng_root_generation) == 0)) {
  430                 /* Prevent overflow. */
  431                 if (fxrng_preseed_ent <= ULONG_MAX - event->he_size)
  432                         fxrng_preseed_ent += event->he_size;
  433         }
  434         FXENT_UNLOCK();
  435 }
  436 
  437 /*
  438  * Top level "seeded" API/signal from randomdev.
  439  *
  440  * This is our warning that a request is coming: we need to be seeded.  In
  441  * fenestrasX, a request for random bytes _never_ fails.  "We (ed: ditto) have
  442  * observed that there are many callers that never check for the error code,
  443  * even if they are generating cryptographic key material." (§ 1.6)
  444  *
  445  * If we returned 'false', both read_random(9) and chacha20_randomstir()
  446  * (arc4random(9)) will blindly charge on with something almost certainly worse
  447  * than what we've got, or are able to get quickly enough.
  448  */
  449 bool
  450 fxrng_alg_seeded(void)
  451 {
  452         uint8_t hash[FXRNG_HASH_SZ];
  453         sbintime_t sbt;
  454 
  455         /* The vast majority of the time, we expect to already be seeded. */
  456         if (__predict_true(atomic_load_acq_64(&fxrng_root_generation) != 0))
  457                 return (true);
  458 
  459         /*
  460          * Take the lock and recheck; only one thread needs to do the initial
  461          * seeding work.
  462          */
  463         FXENT_LOCK();
  464         if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
  465                 FXENT_UNLOCK();
  466                 return (true);
  467         }
  468         /* XXX Any one-off initial seeding goes here. */
  469 
  470         fxrng_hash_finish(&fxent_pool[0], hash, sizeof(hash));
  471         fxrng_hash_init(&fxent_pool[0]);
  472 
  473         fxrng_brng_reseed(hash, sizeof(hash));
  474         FXENT_UNLOCK();
  475 
  476         randomdev_unblock();
  477         explicit_bzero(hash, sizeof(hash));
  478 
  479         /*
  480          * This may be called too early for taskqueue_thread to be initialized.
  481          * fxent_pool_timer_init will detect if we've already unblocked and
  482          * queue the first timer reseed at that point.
  483          */
  484         if (atomic_load_acq_int(&fxent_timer_ready) != 0) {
  485                 sbt = SBT_1S;
  486                 taskqueue_enqueue_timeout_sbt(taskqueue_thread,
  487                     &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
  488         }
  489         return (true);
  490 }
  491 
  492 /*
  493  * Timer-based reseeds and pool expansion.
  494  */
  495 static void
  496 fxent_timer_reseed_npools(unsigned n)
  497 {
  498         /*
  499          * 64 * 8 => moderately large 512 bytes.  Could be static, as we are
  500          * only used in a static context.  On the other hand, this is in
  501          * threadqueue TASK context and we're likely nearly at top of stack
  502          * already.
  503          */
  504         uint8_t hash[FXRNG_HASH_SZ * FXRNG_NPOOLS];
  505         unsigned i;
  506 
  507         ASSERT_DEBUG(n > 0 && n <= FXRNG_NPOOLS, "n:%u", n);
  508 
  509         FXENT_ASSERT();
  510         /*
  511          * Collect entropy from pools 0..n-1 by concatenating the output hashes
  512          * and then feeding them into fxrng_brng_reseed, which will hash the
  513          * aggregate together with the current root PRNG keystate to produce a
  514          * new key.  It will also bump the global generation counter
  515          * appropriately.
  516          */
  517         for (i = 0; i < n; i++) {
  518                 fxrng_hash_finish(&fxent_pool[i], hash + i * FXRNG_HASH_SZ,
  519                     FXRNG_HASH_SZ);
  520                 fxrng_hash_init(&fxent_pool[i]);
  521         }
  522 
  523         fxrng_brng_reseed(hash, n * FXRNG_HASH_SZ);
  524         explicit_bzero(hash, n * FXRNG_HASH_SZ);
  525 
  526         /*
  527          * "The first 32 bytes produced by a high entropy source after a reseed
  528          * from the pools is always put in pool 0." (§ 3.4)
  529          *
  530          * So here we reset the tracking (somewhat naively given the majority
  531          * of sources on most machines are not what we consider "high", but at
  532          * 32 bytes it's smaller than a cache line), so the next 32 bytes are
  533          * prioritized into pool0.
  534          *
  535          * See corresponding use of fxrng_reseed_seen in fxrng_event_processor.
  536          */
  537         memset(fxrng_reseed_seen, 0, sizeof(fxrng_reseed_seen));
  538         FXENT_ASSERT();
  539 }
  540 
  541 static void
  542 fxent_timer_reseed(void *ctx __unused, int pending __unused)
  543 {
  544         static unsigned reseed_intvl_sec = 1;
  545         /* Only reseeds after FXENT_RESEED_INTVL_MAX is achieved. */
  546         static uint64_t reseed_number = 1;
  547 
  548         unsigned next_ival, i, k;
  549         sbintime_t sbt;
  550 
  551         if (reseed_intvl_sec < FXENT_RESEED_INTVL_MAX) {
  552                 next_ival = FXENT_RESSED_INTVL_GFACT * reseed_intvl_sec;
  553                 if (next_ival > FXENT_RESEED_INTVL_MAX)
  554                         next_ival = FXENT_RESEED_INTVL_MAX;
  555                 FXENT_LOCK();
  556                 fxent_timer_reseed_npools(1);
  557                 FXENT_UNLOCK();
  558         } else {
  559                 /*
  560                  * The creation of entropy pools beyond 0 is enabled when the
  561                  * reseed interval hits the maximum. (§ 3.3)
  562                  */
  563                 next_ival = reseed_intvl_sec;
  564 
  565                 /*
  566                  * Pool 0 is used every reseed; pool 1..0 every 3rd reseed; and in
  567                  * general, pool n..0 every 3^n reseeds.
  568                  */
  569                 k = reseed_number;
  570                 reseed_number++;
  571 
  572                 /* Count how many pools, from [0, i), to use for reseed. */
  573                 for (i = 1; i < MIN(fxent_nactpools + 1, FXRNG_NPOOLS); i++) {
  574                         if ((k % FXENT_RESEED_BASE) != 0)
  575                                 break;
  576                         k /= FXENT_RESEED_BASE;
  577                 }
  578 
  579                 /*
  580                  * If we haven't activated pool i yet, activate it and only
  581                  * reseed from [0, i-1).  (§ 3.3)
  582                  */
  583                 FXENT_LOCK();
  584                 if (i == fxent_nactpools + 1) {
  585                         fxent_timer_reseed_npools(fxent_nactpools);
  586                         fxent_nactpools++;
  587                 } else {
  588                         /* Just reseed from [0, i). */
  589                         fxent_timer_reseed_npools(i);
  590                 }
  591                 FXENT_UNLOCK();
  592         }
  593 
  594         /* Schedule the next reseed. */
  595         sbt = next_ival * SBT_1S;
  596         taskqueue_enqueue_timeout_sbt(taskqueue_thread, &fxent_reseed_timer,
  597             -sbt, (sbt / 3), C_PREL(2));
  598 
  599         reseed_intvl_sec = next_ival;
  600 }
  601 
  602 static void
  603 fxent_pool_timer_init(void *dummy __unused)
  604 {
  605         sbintime_t sbt;
  606 
  607         TIMEOUT_TASK_INIT(taskqueue_thread, &fxent_reseed_timer, 0,
  608             fxent_timer_reseed, NULL);
  609 
  610         if (atomic_load_acq_64(&fxrng_root_generation) != 0) {
  611                 sbt = SBT_1S;
  612                 taskqueue_enqueue_timeout_sbt(taskqueue_thread,
  613                     &fxent_reseed_timer, -sbt, (sbt / 3), C_PREL(2));
  614         }
  615         atomic_store_rel_int(&fxent_timer_ready, 1);
  616 }
  617 /* After taskqueue_thread is initialized in SI_SUB_TASKQ:SI_ORDER_SECOND. */
  618 SYSINIT(fxent_pool_timer_init, SI_SUB_TASKQ, SI_ORDER_ANY,
  619     fxent_pool_timer_init, NULL);

Cache object: 7b55948a63fe0fc2e2e5810209b9afb2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.