The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/kern/kern_entropy.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: kern_entropy.c,v 1.57 2022/08/05 23:43:46 riastradh Exp $      */
    2 
    3 /*-
    4  * Copyright (c) 2019 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Taylor R. Campbell.
    9  *
   10  * Redistribution and use in source and binary forms, with or without
   11  * modification, are permitted provided that the following conditions
   12  * are met:
   13  * 1. Redistributions of source code must retain the above copyright
   14  *    notice, this list of conditions and the following disclaimer.
   15  * 2. Redistributions in binary form must reproduce the above copyright
   16  *    notice, this list of conditions and the following disclaimer in the
   17  *    documentation and/or other materials provided with the distribution.
   18  *
   19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   29  * POSSIBILITY OF SUCH DAMAGE.
   30  */
   31 
   32 /*
   33  * Entropy subsystem
   34  *
   35  *      * Each CPU maintains a per-CPU entropy pool so that gathering
   36  *        entropy requires no interprocessor synchronization, except
   37  *        early at boot when we may be scrambling to gather entropy as
   38  *        soon as possible.
   39  *
   40  *        - entropy_enter gathers entropy and never drops it on the
   41  *          floor, at the cost of sometimes having to do cryptography.
   42  *
   43  *        - entropy_enter_intr gathers entropy or drops it on the
   44  *          floor, with low latency.  Work to stir the pool or kick the
   45  *          housekeeping thread is scheduled in soft interrupts.
   46  *
   47  *      * entropy_enter immediately enters into the global pool if it
   48  *        can transition to full entropy in one swell foop.  Otherwise,
   49  *        it defers to a housekeeping thread that consolidates entropy,
   50  *        but only when the CPUs collectively have full entropy, in
   51  *        order to mitigate iterative-guessing attacks.
   52  *
   53  *      * The entropy housekeeping thread continues to consolidate
   54  *        entropy even after we think we have full entropy, in case we
   55  *        are wrong, but is limited to one discretionary consolidation
   56  *        per minute, and only when new entropy is actually coming in,
   57  *        to limit performance impact.
   58  *
   59  *      * The entropy epoch is the number that changes when we
   60  *        transition from partial entropy to full entropy, so that
   61  *        users can easily determine when to reseed.  This also
   62  *        facilitates an operator explicitly causing everything to
   63  *        reseed by sysctl -w kern.entropy.consolidate=1.
   64  *
   65  *      * No entropy estimation based on the sample values, which is a
   66  *        contradiction in terms and a potential source of side
   67  *        channels.  It is the responsibility of the driver author to
   68  *        study how predictable the physical source of input can ever
   69  *        be, and to furnish a lower bound on the amount of entropy it
   70  *        has.
   71  *
   72  *      * Entropy depletion is available for testing (or if you're into
   73  *        that sort of thing), with sysctl -w kern.entropy.depletion=1;
   74  *        the logic to support it is small, to minimize chance of bugs.
   75  */
   76 
   77 #include <sys/cdefs.h>
   78 __KERNEL_RCSID(0, "$NetBSD: kern_entropy.c,v 1.57 2022/08/05 23:43:46 riastradh Exp $");
   79 
   80 #include <sys/param.h>
   81 #include <sys/types.h>
   82 #include <sys/atomic.h>
   83 #include <sys/compat_stub.h>
   84 #include <sys/condvar.h>
   85 #include <sys/cpu.h>
   86 #include <sys/entropy.h>
   87 #include <sys/errno.h>
   88 #include <sys/evcnt.h>
   89 #include <sys/event.h>
   90 #include <sys/file.h>
   91 #include <sys/intr.h>
   92 #include <sys/kauth.h>
   93 #include <sys/kernel.h>
   94 #include <sys/kmem.h>
   95 #include <sys/kthread.h>
   96 #include <sys/lwp.h>
   97 #include <sys/module_hook.h>
   98 #include <sys/mutex.h>
   99 #include <sys/percpu.h>
  100 #include <sys/poll.h>
  101 #include <sys/proc.h>
  102 #include <sys/queue.h>
  103 #include <sys/reboot.h>
  104 #include <sys/rnd.h>            /* legacy kernel API */
  105 #include <sys/rndio.h>          /* userland ioctl interface */
  106 #include <sys/rndsource.h>      /* kernel rndsource driver API */
  107 #include <sys/select.h>
  108 #include <sys/selinfo.h>
  109 #include <sys/sha1.h>           /* for boot seed checksum */
  110 #include <sys/stdint.h>
  111 #include <sys/sysctl.h>
  112 #include <sys/syslog.h>
  113 #include <sys/systm.h>
  114 #include <sys/time.h>
  115 #include <sys/xcall.h>
  116 
  117 #include <lib/libkern/entpool.h>
  118 
  119 #include <machine/limits.h>
  120 
  121 #ifdef __HAVE_CPU_COUNTER
  122 #include <machine/cpu_counter.h>
  123 #endif
  124 
  125 /*
  126  * struct entropy_cpu
  127  *
  128  *      Per-CPU entropy state.  The pool is allocated separately
  129  *      because percpu(9) sometimes moves per-CPU objects around
  130  *      without zeroing them, which would lead to unwanted copies of
  131  *      sensitive secrets.  The evcnt is allocated separately because
  132  *      evcnt(9) assumes it stays put in memory.
  133  */
  134 struct entropy_cpu {
  135         struct entropy_cpu_evcnt {
  136                 struct evcnt            softint;
  137                 struct evcnt            intrdrop;
  138                 struct evcnt            intrtrunc;
  139         }                       *ec_evcnt;
  140         struct entpool          *ec_pool;
  141         unsigned                ec_pending;
  142         bool                    ec_locked;
  143 };
  144 
  145 /*
  146  * struct entropy_cpu_lock
  147  *
  148  *      State for locking the per-CPU entropy state.
  149  */
  150 struct entropy_cpu_lock {
  151         int             ecl_s;
  152         uint64_t        ecl_ncsw;
  153 };
  154 
  155 /*
  156  * struct rndsource_cpu
  157  *
  158  *      Per-CPU rndsource state.
  159  */
  160 struct rndsource_cpu {
  161         unsigned                rc_entropybits;
  162         unsigned                rc_timesamples;
  163         unsigned                rc_datasamples;
  164 };
  165 
  166 /*
  167  * entropy_global (a.k.a. E for short in this file)
  168  *
  169  *      Global entropy state.  Writes protected by the global lock.
  170  *      Some fields, marked (A), can be read outside the lock, and are
  171  *      maintained with atomic_load/store_relaxed.
  172  */
  173 struct {
  174         kmutex_t        lock;           /* covers all global state */
  175         struct entpool  pool;           /* global pool for extraction */
  176         unsigned        needed;         /* (A) needed globally */
  177         unsigned        pending;        /* (A) pending in per-CPU pools */
  178         unsigned        timestamp;      /* (A) time of last consolidation */
  179         unsigned        epoch;          /* (A) changes when needed -> 0 */
  180         kcondvar_t      cv;             /* notifies state changes */
  181         struct selinfo  selq;           /* notifies needed -> 0 */
  182         struct lwp      *sourcelock;    /* lock on list of sources */
  183         kcondvar_t      sourcelock_cv;  /* notifies sourcelock release */
  184         LIST_HEAD(,krndsource) sources; /* list of entropy sources */
  185         enum entropy_stage {
  186                 ENTROPY_COLD = 0, /* single-threaded */
  187                 ENTROPY_WARM,     /* multi-threaded at boot before CPUs */
  188                 ENTROPY_HOT,      /* multi-threaded multi-CPU */
  189         }               stage;
  190         bool            consolidate;    /* kick thread to consolidate */
  191         bool            seed_rndsource; /* true if seed source is attached */
  192         bool            seeded;         /* true if seed file already loaded */
  193 } entropy_global __cacheline_aligned = {
  194         /* Fields that must be initialized when the kernel is loaded.  */
  195         .needed = ENTROPY_CAPACITY*NBBY,
  196         .epoch = (unsigned)-1,  /* -1 means entropy never consolidated */
  197         .sources = LIST_HEAD_INITIALIZER(entropy_global.sources),
  198         .stage = ENTROPY_COLD,
  199 };
  200 
  201 #define E       (&entropy_global)       /* declutter */
  202 
  203 /* Read-mostly globals */
  204 static struct percpu    *entropy_percpu __read_mostly; /* struct entropy_cpu */
  205 static void             *entropy_sih __read_mostly; /* softint handler */
  206 static struct lwp       *entropy_lwp __read_mostly; /* housekeeping thread */
  207 
  208 static struct krndsource seed_rndsource __read_mostly;
  209 
  210 /*
  211  * Event counters
  212  *
  213  *      Must be careful with adding these because they can serve as
  214  *      side channels.
  215  */
  216 static struct evcnt entropy_discretionary_evcnt =
  217     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "discretionary");
  218 EVCNT_ATTACH_STATIC(entropy_discretionary_evcnt);
  219 static struct evcnt entropy_immediate_evcnt =
  220     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "immediate");
  221 EVCNT_ATTACH_STATIC(entropy_immediate_evcnt);
  222 static struct evcnt entropy_partial_evcnt =
  223     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "partial");
  224 EVCNT_ATTACH_STATIC(entropy_partial_evcnt);
  225 static struct evcnt entropy_consolidate_evcnt =
  226     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "consolidate");
  227 EVCNT_ATTACH_STATIC(entropy_consolidate_evcnt);
  228 static struct evcnt entropy_extract_fail_evcnt =
  229     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "extract fail");
  230 EVCNT_ATTACH_STATIC(entropy_extract_fail_evcnt);
  231 static struct evcnt entropy_request_evcnt =
  232     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "request");
  233 EVCNT_ATTACH_STATIC(entropy_request_evcnt);
  234 static struct evcnt entropy_deplete_evcnt =
  235     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "deplete");
  236 EVCNT_ATTACH_STATIC(entropy_deplete_evcnt);
  237 static struct evcnt entropy_notify_evcnt =
  238     EVCNT_INITIALIZER(EVCNT_TYPE_MISC, NULL, "entropy", "notify");
  239 EVCNT_ATTACH_STATIC(entropy_notify_evcnt);
  240 
  241 /* Sysctl knobs */
  242 static bool     entropy_collection = 1;
  243 static bool     entropy_depletion = 0; /* Silly!  */
  244 
  245 static const struct sysctlnode  *entropy_sysctlroot;
  246 static struct sysctllog         *entropy_sysctllog;
  247 
  248 /* Forward declarations */
  249 static void     entropy_init_cpu(void *, void *, struct cpu_info *);
  250 static void     entropy_fini_cpu(void *, void *, struct cpu_info *);
  251 static void     entropy_account_cpu(struct entropy_cpu *);
  252 static void     entropy_enter(const void *, size_t, unsigned);
  253 static bool     entropy_enter_intr(const void *, size_t, unsigned);
  254 static void     entropy_softintr(void *);
  255 static void     entropy_thread(void *);
  256 static uint32_t entropy_pending(void);
  257 static void     entropy_pending_cpu(void *, void *, struct cpu_info *);
  258 static void     entropy_do_consolidate(void);
  259 static void     entropy_consolidate_xc(void *, void *);
  260 static void     entropy_notify(void);
  261 static int      sysctl_entropy_consolidate(SYSCTLFN_ARGS);
  262 static int      sysctl_entropy_gather(SYSCTLFN_ARGS);
  263 static void     filt_entropy_read_detach(struct knote *);
  264 static int      filt_entropy_read_event(struct knote *, long);
  265 static int      entropy_request(size_t, int);
  266 static void     rnd_add_data_1(struct krndsource *, const void *, uint32_t,
  267                     uint32_t, uint32_t);
  268 static unsigned rndsource_entropybits(struct krndsource *);
  269 static void     rndsource_entropybits_cpu(void *, void *, struct cpu_info *);
  270 static void     rndsource_to_user(struct krndsource *, rndsource_t *);
  271 static void     rndsource_to_user_est(struct krndsource *, rndsource_est_t *);
  272 static void     rndsource_to_user_est_cpu(void *, void *, struct cpu_info *);
  273 
  274 /*
  275  * entropy_timer()
  276  *
  277  *      Cycle counter, time counter, or anything that changes a wee bit
  278  *      unpredictably.
  279  */
  280 static inline uint32_t
  281 entropy_timer(void)
  282 {
  283         struct bintime bt;
  284         uint32_t v;
  285 
  286         /* If we have a CPU cycle counter, use the low 32 bits.  */
  287 #ifdef __HAVE_CPU_COUNTER
  288         if (__predict_true(cpu_hascounter()))
  289                 return cpu_counter32();
  290 #endif  /* __HAVE_CPU_COUNTER */
  291 
  292         /* If we're cold, tough.  Can't binuptime while cold.  */
  293         if (__predict_false(cold))
  294                 return 0;
  295 
  296         /* Fold the 128 bits of binuptime into 32 bits.  */
  297         binuptime(&bt);
  298         v = bt.frac;
  299         v ^= bt.frac >> 32;
  300         v ^= bt.sec;
  301         v ^= bt.sec >> 32;
  302         return v;
  303 }
  304 
  305 static void
  306 attach_seed_rndsource(void)
  307 {
  308 
  309         /*
  310          * First called no later than entropy_init, while we are still
  311          * single-threaded, so no need for RUN_ONCE.
  312          */
  313         if (E->stage >= ENTROPY_WARM || E->seed_rndsource)
  314                 return;
  315         rnd_attach_source(&seed_rndsource, "seed", RND_TYPE_UNKNOWN,
  316             RND_FLAG_COLLECT_VALUE);
  317         E->seed_rndsource = true;
  318 }
  319 
  320 /*
  321  * entropy_init()
  322  *
  323  *      Initialize the entropy subsystem.  Panic on failure.
  324  *
  325  *      Requires percpu(9) and sysctl(9) to be initialized.
  326  */
  327 static void
  328 entropy_init(void)
  329 {
  330         uint32_t extra[2];
  331         struct krndsource *rs;
  332         unsigned i = 0;
  333 
  334         KASSERT(E->stage == ENTROPY_COLD);
  335 
  336         /* Grab some cycle counts early at boot.  */
  337         extra[i++] = entropy_timer();
  338 
  339         /* Run the entropy pool cryptography self-test.  */
  340         if (entpool_selftest() == -1)
  341                 panic("entropy pool crypto self-test failed");
  342 
  343         /* Create the sysctl directory.  */
  344         sysctl_createv(&entropy_sysctllog, 0, NULL, &entropy_sysctlroot,
  345             CTLFLAG_PERMANENT, CTLTYPE_NODE, "entropy",
  346             SYSCTL_DESCR("Entropy (random number sources) options"),
  347             NULL, 0, NULL, 0,
  348             CTL_KERN, CTL_CREATE, CTL_EOL);
  349 
  350         /* Create the sysctl knobs.  */
  351         /* XXX These shouldn't be writable at securelevel>0.  */
  352         sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
  353             CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "collection",
  354             SYSCTL_DESCR("Automatically collect entropy from hardware"),
  355             NULL, 0, &entropy_collection, 0, CTL_CREATE, CTL_EOL);
  356         sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
  357             CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_BOOL, "depletion",
  358             SYSCTL_DESCR("`Deplete' entropy pool when observed"),
  359             NULL, 0, &entropy_depletion, 0, CTL_CREATE, CTL_EOL);
  360         sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
  361             CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "consolidate",
  362             SYSCTL_DESCR("Trigger entropy consolidation now"),
  363             sysctl_entropy_consolidate, 0, NULL, 0, CTL_CREATE, CTL_EOL);
  364         sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
  365             CTLFLAG_PERMANENT|CTLFLAG_READWRITE, CTLTYPE_INT, "gather",
  366             SYSCTL_DESCR("Trigger entropy gathering from sources now"),
  367             sysctl_entropy_gather, 0, NULL, 0, CTL_CREATE, CTL_EOL);
  368         /* XXX These should maybe not be readable at securelevel>0.  */
  369         sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
  370             CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
  371             "needed", SYSCTL_DESCR("Systemwide entropy deficit"),
  372             NULL, 0, &E->needed, 0, CTL_CREATE, CTL_EOL);
  373         sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
  374             CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
  375             "pending", SYSCTL_DESCR("Entropy pending on CPUs"),
  376             NULL, 0, &E->pending, 0, CTL_CREATE, CTL_EOL);
  377         sysctl_createv(&entropy_sysctllog, 0, &entropy_sysctlroot, NULL,
  378             CTLFLAG_PERMANENT|CTLFLAG_READONLY|CTLFLAG_PRIVATE, CTLTYPE_INT,
  379             "epoch", SYSCTL_DESCR("Entropy epoch"),
  380             NULL, 0, &E->epoch, 0, CTL_CREATE, CTL_EOL);
  381 
  382         /* Initialize the global state for multithreaded operation.  */
  383         mutex_init(&E->lock, MUTEX_DEFAULT, IPL_SOFTSERIAL);
  384         cv_init(&E->cv, "entropy");
  385         selinit(&E->selq);
  386         cv_init(&E->sourcelock_cv, "entsrclock");
  387 
  388         /* Make sure the seed source is attached.  */
  389         attach_seed_rndsource();
  390 
  391         /* Note if the bootloader didn't provide a seed.  */
  392         if (!E->seeded)
  393                 aprint_debug("entropy: no seed from bootloader\n");
  394 
  395         /* Allocate the per-CPU records for all early entropy sources.  */
  396         LIST_FOREACH(rs, &E->sources, list)
  397                 rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
  398 
  399         /* Allocate and initialize the per-CPU state.  */
  400         entropy_percpu = percpu_create(sizeof(struct entropy_cpu),
  401             entropy_init_cpu, entropy_fini_cpu, NULL);
  402 
  403         /* Enter the boot cycle count to get started.  */
  404         extra[i++] = entropy_timer();
  405         KASSERT(i == __arraycount(extra));
  406         entropy_enter(extra, sizeof extra, 0);
  407         explicit_memset(extra, 0, sizeof extra);
  408 
  409         /* We are now ready for multi-threaded operation.  */
  410         E->stage = ENTROPY_WARM;
  411 }
  412 
  413 static void
  414 entropy_init_late_cpu(void *a, void *b)
  415 {
  416         int bound;
  417 
  418         /*
  419          * We're not necessarily in a softint lwp here (xc_broadcast
  420          * triggers softint on other CPUs, but calls directly on this
  421          * CPU), so explicitly bind to the current CPU to invoke the
  422          * softintr -- this lets us have a simpler assertion in
  423          * entropy_account_cpu.  Not necessary to avoid migration
  424          * because xc_broadcast disables kpreemption anyway, but it
  425          * doesn't hurt.
  426          */
  427         bound = curlwp_bind();
  428         entropy_softintr(NULL);
  429         curlwp_bindx(bound);
  430 }
  431 
  432 /*
  433  * entropy_init_late()
  434  *
  435  *      Late initialization.  Panic on failure.
  436  *
  437  *      Requires CPUs to have been detected and LWPs to have started.
  438  */
  439 static void
  440 entropy_init_late(void)
  441 {
  442         void *sih;
  443         int error;
  444 
  445         KASSERT(E->stage == ENTROPY_WARM);
  446 
  447         /*
  448          * Establish the softint at the highest softint priority level.
  449          * Must happen after CPU detection.
  450          */
  451         sih = softint_establish(SOFTINT_SERIAL|SOFTINT_MPSAFE,
  452             &entropy_softintr, NULL);
  453         if (sih == NULL)
  454                 panic("unable to establish entropy softint");
  455 
  456         /*
  457          * Create the entropy housekeeping thread.  Must happen after
  458          * lwpinit.
  459          */
  460         error = kthread_create(PRI_NONE, KTHREAD_MPSAFE|KTHREAD_TS, NULL,
  461             entropy_thread, NULL, &entropy_lwp, "entbutler");
  462         if (error)
  463                 panic("unable to create entropy housekeeping thread: %d",
  464                     error);
  465 
  466         /*
  467          * Wait until the per-CPU initialization has hit all CPUs
  468          * before proceeding to mark the entropy system hot and
  469          * enabling use of the softint.
  470          */
  471         xc_barrier(XC_HIGHPRI);
  472         E->stage = ENTROPY_HOT;
  473         atomic_store_relaxed(&entropy_sih, sih);
  474 
  475         /*
  476          * At this point, entering new samples from interrupt handlers
  477          * will trigger the softint to process them.  But there may be
  478          * some samples that were entered from interrupt handlers
  479          * before the softint was available.  Make sure we process
  480          * those samples on all CPUs by running the softint logic on
  481          * all CPUs.
  482          */
  483         xc_wait(xc_broadcast(XC_HIGHPRI, entropy_init_late_cpu, NULL, NULL));
  484 }
  485 
  486 /*
  487  * entropy_init_cpu(ptr, cookie, ci)
  488  *
  489  *      percpu(9) constructor for per-CPU entropy pool.
  490  */
  491 static void
  492 entropy_init_cpu(void *ptr, void *cookie, struct cpu_info *ci)
  493 {
  494         struct entropy_cpu *ec = ptr;
  495         const char *cpuname;
  496 
  497         ec->ec_evcnt = kmem_alloc(sizeof(*ec->ec_evcnt), KM_SLEEP);
  498         ec->ec_pool = kmem_zalloc(sizeof(*ec->ec_pool), KM_SLEEP);
  499         ec->ec_pending = 0;
  500         ec->ec_locked = false;
  501 
  502         /* XXX ci_cpuname may not be initialized early enough.  */
  503         cpuname = ci->ci_cpuname[0] == '\0' ? "cpu0" : ci->ci_cpuname;
  504         evcnt_attach_dynamic(&ec->ec_evcnt->softint, EVCNT_TYPE_MISC, NULL,
  505             cpuname, "entropy softint");
  506         evcnt_attach_dynamic(&ec->ec_evcnt->intrdrop, EVCNT_TYPE_MISC, NULL,
  507             cpuname, "entropy intrdrop");
  508         evcnt_attach_dynamic(&ec->ec_evcnt->intrtrunc, EVCNT_TYPE_MISC, NULL,
  509             cpuname, "entropy intrtrunc");
  510 }
  511 
  512 /*
  513  * entropy_fini_cpu(ptr, cookie, ci)
  514  *
  515  *      percpu(9) destructor for per-CPU entropy pool.
  516  */
  517 static void
  518 entropy_fini_cpu(void *ptr, void *cookie, struct cpu_info *ci)
  519 {
  520         struct entropy_cpu *ec = ptr;
  521 
  522         /*
  523          * Zero any lingering data.  Disclosure of the per-CPU pool
  524          * shouldn't retroactively affect the security of any keys
  525          * generated, because entpool(9) erases whatever we have just
  526          * drawn out of any pool, but better safe than sorry.
  527          */
  528         explicit_memset(ec->ec_pool, 0, sizeof(*ec->ec_pool));
  529 
  530         evcnt_detach(&ec->ec_evcnt->intrtrunc);
  531         evcnt_detach(&ec->ec_evcnt->intrdrop);
  532         evcnt_detach(&ec->ec_evcnt->softint);
  533 
  534         kmem_free(ec->ec_pool, sizeof(*ec->ec_pool));
  535         kmem_free(ec->ec_evcnt, sizeof(*ec->ec_evcnt));
  536 }
  537 
  538 /*
  539  * ec = entropy_cpu_get(&lock)
  540  * entropy_cpu_put(&lock, ec)
  541  *
  542  *      Lock and unlock the per-CPU entropy state.  This only prevents
  543  *      access on the same CPU -- by hard interrupts, by soft
  544  *      interrupts, or by other threads.
  545  *
  546  *      Blocks soft interrupts and preemption altogether; doesn't block
  547  *      hard interrupts, but causes samples in hard interrupts to be
  548  *      dropped.
  549  */
  550 static struct entropy_cpu *
  551 entropy_cpu_get(struct entropy_cpu_lock *lock)
  552 {
  553         struct entropy_cpu *ec;
  554 
  555         ec = percpu_getref(entropy_percpu);
  556         lock->ecl_s = splsoftserial();
  557         KASSERT(!ec->ec_locked);
  558         ec->ec_locked = true;
  559         lock->ecl_ncsw = curlwp->l_ncsw;
  560         __insn_barrier();
  561 
  562         return ec;
  563 }
  564 
  565 static void
  566 entropy_cpu_put(struct entropy_cpu_lock *lock, struct entropy_cpu *ec)
  567 {
  568 
  569         KASSERT(ec == percpu_getptr_remote(entropy_percpu, curcpu()));
  570         KASSERT(ec->ec_locked);
  571 
  572         __insn_barrier();
  573         KASSERT(lock->ecl_ncsw == curlwp->l_ncsw);
  574         ec->ec_locked = false;
  575         splx(lock->ecl_s);
  576         percpu_putref(entropy_percpu);
  577 }
  578 
  579 /*
  580  * entropy_seed(seed)
  581  *
  582  *      Seed the entropy pool with seed.  Meant to be called as early
  583  *      as possible by the bootloader; may be called before or after
  584  *      entropy_init.  Must be called before system reaches userland.
  585  *      Must be called in thread or soft interrupt context, not in hard
  586  *      interrupt context.  Must be called at most once.
  587  *
  588  *      Overwrites the seed in place.  Caller may then free the memory.
  589  */
  590 static void
  591 entropy_seed(rndsave_t *seed)
  592 {
  593         SHA1_CTX ctx;
  594         uint8_t digest[SHA1_DIGEST_LENGTH];
  595         bool seeded;
  596 
  597         /*
  598          * Verify the checksum.  If the checksum fails, take the data
  599          * but ignore the entropy estimate -- the file may have been
  600          * incompletely written with garbage, which is harmless to add
  601          * but may not be as unpredictable as alleged.
  602          */
  603         SHA1Init(&ctx);
  604         SHA1Update(&ctx, (const void *)&seed->entropy, sizeof(seed->entropy));
  605         SHA1Update(&ctx, seed->data, sizeof(seed->data));
  606         SHA1Final(digest, &ctx);
  607         CTASSERT(sizeof(seed->digest) == sizeof(digest));
  608         if (!consttime_memequal(digest, seed->digest, sizeof(digest))) {
  609                 printf("entropy: invalid seed checksum\n");
  610                 seed->entropy = 0;
  611         }
  612         explicit_memset(&ctx, 0, sizeof ctx);
  613         explicit_memset(digest, 0, sizeof digest);
  614 
  615         /*
  616          * If the entropy is insensibly large, try byte-swapping.
  617          * Otherwise assume the file is corrupted and act as though it
  618          * has zero entropy.
  619          */
  620         if (howmany(seed->entropy, NBBY) > sizeof(seed->data)) {
  621                 seed->entropy = bswap32(seed->entropy);
  622                 if (howmany(seed->entropy, NBBY) > sizeof(seed->data))
  623                         seed->entropy = 0;
  624         }
  625 
  626         /* Make sure the seed source is attached.  */
  627         attach_seed_rndsource();
  628 
  629         /* Test and set E->seeded.  */
  630         if (E->stage >= ENTROPY_WARM)
  631                 mutex_enter(&E->lock);
  632         seeded = E->seeded;
  633         E->seeded = (seed->entropy > 0);
  634         if (E->stage >= ENTROPY_WARM)
  635                 mutex_exit(&E->lock);
  636 
  637         /*
  638          * If we've been seeded, may be re-entering the same seed
  639          * (e.g., bootloader vs module init, or something).  No harm in
  640          * entering it twice, but it contributes no additional entropy.
  641          */
  642         if (seeded) {
  643                 printf("entropy: double-seeded by bootloader\n");
  644                 seed->entropy = 0;
  645         } else {
  646                 printf("entropy: entering seed from bootloader"
  647                     " with %u bits of entropy\n", (unsigned)seed->entropy);
  648         }
  649 
  650         /* Enter it into the pool and promptly zero it.  */
  651         rnd_add_data(&seed_rndsource, seed->data, sizeof(seed->data),
  652             seed->entropy);
  653         explicit_memset(seed, 0, sizeof(*seed));
  654 }
  655 
  656 /*
  657  * entropy_bootrequest()
  658  *
  659  *      Request entropy from all sources at boot, once config is
  660  *      complete and interrupts are running.
  661  */
  662 void
  663 entropy_bootrequest(void)
  664 {
  665         int error;
  666 
  667         KASSERT(E->stage >= ENTROPY_WARM);
  668 
  669         /*
  670          * Request enough to satisfy the maximum entropy shortage.
  671          * This is harmless overkill if the bootloader provided a seed.
  672          */
  673         mutex_enter(&E->lock);
  674         error = entropy_request(ENTROPY_CAPACITY, ENTROPY_WAIT);
  675         KASSERT(error == 0);
  676         mutex_exit(&E->lock);
  677 }
  678 
  679 /*
  680  * entropy_epoch()
  681  *
  682  *      Returns the current entropy epoch.  If this changes, you should
  683  *      reseed.  If -1, means system entropy has not yet reached full
  684  *      entropy or been explicitly consolidated; never reverts back to
  685  *      -1.  Never zero, so you can always use zero as an uninitialized
  686  *      sentinel value meaning `reseed ASAP'.
  687  *
  688  *      Usage model:
  689  *
  690  *              struct foo {
  691  *                      struct crypto_prng prng;
  692  *                      unsigned epoch;
  693  *              } *foo;
  694  *
  695  *              unsigned epoch = entropy_epoch();
  696  *              if (__predict_false(epoch != foo->epoch)) {
  697  *                      uint8_t seed[32];
  698  *                      if (entropy_extract(seed, sizeof seed, 0) != 0)
  699  *                              warn("no entropy");
  700  *                      crypto_prng_reseed(&foo->prng, seed, sizeof seed);
  701  *                      foo->epoch = epoch;
  702  *              }
  703  */
  704 unsigned
  705 entropy_epoch(void)
  706 {
  707 
  708         /*
  709          * Unsigned int, so no need for seqlock for an atomic read, but
  710          * make sure we read it afresh each time.
  711          */
  712         return atomic_load_relaxed(&E->epoch);
  713 }
  714 
  715 /*
  716  * entropy_ready()
  717  *
  718  *      True if the entropy pool has full entropy.
  719  */
  720 bool
  721 entropy_ready(void)
  722 {
  723 
  724         return atomic_load_relaxed(&E->needed) == 0;
  725 }
  726 
  727 /*
  728  * entropy_account_cpu(ec)
  729  *
  730  *      Consider whether to consolidate entropy into the global pool
  731  *      after we just added some into the current CPU's pending pool.
  732  *
  733  *      - If this CPU can provide enough entropy now, do so.
  734  *
  735  *      - If this and whatever else is available on other CPUs can
  736  *        provide enough entropy, kick the consolidation thread.
  737  *
  738  *      - Otherwise, do as little as possible, except maybe consolidate
  739  *        entropy at most once a minute.
  740  *
  741  *      Caller must be bound to a CPU and therefore have exclusive
  742  *      access to ec.  Will acquire and release the global lock.
  743  */
  744 static void
  745 entropy_account_cpu(struct entropy_cpu *ec)
  746 {
  747         struct entropy_cpu_lock lock;
  748         struct entropy_cpu *ec0;
  749         unsigned diff;
  750 
  751         KASSERT(E->stage >= ENTROPY_WARM);
  752         KASSERT(curlwp->l_pflag & LP_BOUND);
  753 
  754         /*
  755          * If there's no entropy needed, and entropy has been
  756          * consolidated in the last minute, do nothing.
  757          */
  758         if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
  759             __predict_true(!atomic_load_relaxed(&entropy_depletion)) &&
  760             __predict_true((time_uptime - E->timestamp) <= 60))
  761                 return;
  762 
  763         /*
  764          * Consider consolidation, under the global lock and with the
  765          * per-CPU state locked.
  766          */
  767         mutex_enter(&E->lock);
  768         ec0 = entropy_cpu_get(&lock);
  769         KASSERT(ec0 == ec);
  770         if (ec->ec_pending == 0) {
  771                 /* Raced with consolidation xcall.  Nothing to do.  */
  772         } else if (E->needed != 0 && E->needed <= ec->ec_pending) {
  773                 /*
  774                  * If we have not yet attained full entropy but we can
  775                  * now, do so.  This way we disseminate entropy
  776                  * promptly when it becomes available early at boot;
  777                  * otherwise we leave it to the entropy consolidation
  778                  * thread, which is rate-limited to mitigate side
  779                  * channels and abuse.
  780                  */
  781                 uint8_t buf[ENTPOOL_CAPACITY];
  782 
  783                 /* Transfer from the local pool to the global pool.  */
  784                 entpool_extract(ec->ec_pool, buf, sizeof buf);
  785                 entpool_enter(&E->pool, buf, sizeof buf);
  786                 atomic_store_relaxed(&ec->ec_pending, 0);
  787                 atomic_store_relaxed(&E->needed, 0);
  788 
  789                 /* Notify waiters that we now have full entropy.  */
  790                 entropy_notify();
  791                 entropy_immediate_evcnt.ev_count++;
  792         } else {
  793                 /* Determine how much we can add to the global pool.  */
  794                 KASSERTMSG(E->pending <= ENTROPY_CAPACITY*NBBY,
  795                     "E->pending=%u", E->pending);
  796                 diff = MIN(ec->ec_pending, ENTROPY_CAPACITY*NBBY - E->pending);
  797 
  798                 /*
  799                  * This should make a difference unless we are already
  800                  * saturated.
  801                  */
  802                 KASSERTMSG(diff || E->pending == ENTROPY_CAPACITY*NBBY,
  803                     "diff=%u E->pending=%u ec->ec_pending=%u cap=%u",
  804                     diff, E->pending, ec->ec_pending,
  805                     (unsigned)ENTROPY_CAPACITY*NBBY);
  806 
  807                 /* Add to the global, subtract from the local.  */
  808                 E->pending += diff;
  809                 KASSERT(E->pending);
  810                 KASSERTMSG(E->pending <= ENTROPY_CAPACITY*NBBY,
  811                     "E->pending=%u", E->pending);
  812                 atomic_store_relaxed(&ec->ec_pending, ec->ec_pending - diff);
  813 
  814                 if (E->needed <= E->pending) {
  815                         /*
  816                          * Enough entropy between all the per-CPU
  817                          * pools.  Wake up the housekeeping thread.
  818                          *
  819                          * If we don't need any entropy, this doesn't
  820                          * mean much, but it is the only time we ever
  821                          * gather additional entropy in case the
  822                          * accounting has been overly optimistic.  This
  823                          * happens at most once a minute, so there's
  824                          * negligible performance cost.
  825                          */
  826                         E->consolidate = true;
  827                         cv_broadcast(&E->cv);
  828                         if (E->needed == 0)
  829                                 entropy_discretionary_evcnt.ev_count++;
  830                 } else {
  831                         /* Can't get full entropy.  Keep gathering.  */
  832                         entropy_partial_evcnt.ev_count++;
  833                 }
  834         }
  835         entropy_cpu_put(&lock, ec);
  836         mutex_exit(&E->lock);
  837 }
  838 
  839 /*
  840  * entropy_enter_early(buf, len, nbits)
  841  *
  842  *      Do entropy bookkeeping globally, before we have established
  843  *      per-CPU pools.  Enter directly into the global pool in the hope
  844  *      that we enter enough before the first entropy_extract to thwart
  845  *      iterative-guessing attacks; entropy_extract will warn if not.
  846  */
  847 static void
  848 entropy_enter_early(const void *buf, size_t len, unsigned nbits)
  849 {
  850         bool notify = false;
  851 
  852         KASSERT(E->stage == ENTROPY_COLD);
  853 
  854         /* Enter it into the pool.  */
  855         entpool_enter(&E->pool, buf, len);
  856 
  857         /*
  858          * Decide whether to notify reseed -- we will do so if either:
  859          * (a) we transition from partial entropy to full entropy, or
  860          * (b) we get a batch of full entropy all at once.
  861          */
  862         notify |= (E->needed && E->needed <= nbits);
  863         notify |= (nbits >= ENTROPY_CAPACITY*NBBY);
  864 
  865         /* Subtract from the needed count and notify if appropriate.  */
  866         E->needed -= MIN(E->needed, nbits);
  867         if (notify) {
  868                 entropy_notify();
  869                 entropy_immediate_evcnt.ev_count++;
  870         }
  871 }
  872 
  873 /*
  874  * entropy_enter(buf, len, nbits)
  875  *
  876  *      Enter len bytes of data from buf into the system's entropy
  877  *      pool, stirring as necessary when the internal buffer fills up.
  878  *      nbits is a lower bound on the number of bits of entropy in the
  879  *      process that led to this sample.
  880  */
  881 static void
  882 entropy_enter(const void *buf, size_t len, unsigned nbits)
  883 {
  884         struct entropy_cpu_lock lock;
  885         struct entropy_cpu *ec;
  886         unsigned pending;
  887         int bound;
  888 
  889         KASSERTMSG(!cpu_intr_p(),
  890             "use entropy_enter_intr from interrupt context");
  891         KASSERTMSG(howmany(nbits, NBBY) <= len,
  892             "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
  893 
  894         /* If it's too early after boot, just use entropy_enter_early.  */
  895         if (__predict_false(E->stage == ENTROPY_COLD)) {
  896                 entropy_enter_early(buf, len, nbits);
  897                 return;
  898         }
  899 
  900         /*
  901          * Bind ourselves to the current CPU so we don't switch CPUs
  902          * between entering data into the current CPU's pool (and
  903          * updating the pending count) and transferring it to the
  904          * global pool in entropy_account_cpu.
  905          */
  906         bound = curlwp_bind();
  907 
  908         /*
  909          * With the per-CPU state locked, enter into the per-CPU pool
  910          * and count up what we can add.
  911          */
  912         ec = entropy_cpu_get(&lock);
  913         entpool_enter(ec->ec_pool, buf, len);
  914         pending = ec->ec_pending;
  915         pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
  916         atomic_store_relaxed(&ec->ec_pending, pending);
  917         entropy_cpu_put(&lock, ec);
  918 
  919         /* Consolidate globally if appropriate based on what we added.  */
  920         if (pending)
  921                 entropy_account_cpu(ec);
  922 
  923         curlwp_bindx(bound);
  924 }
  925 
  926 /*
  927  * entropy_enter_intr(buf, len, nbits)
  928  *
  929  *      Enter up to len bytes of data from buf into the system's
  930  *      entropy pool without stirring.  nbits is a lower bound on the
  931  *      number of bits of entropy in the process that led to this
  932  *      sample.  If the sample could be entered completely, assume
  933  *      nbits of entropy pending; otherwise assume none, since we don't
  934  *      know whether some parts of the sample are constant, for
  935  *      instance.  Schedule a softint to stir the entropy pool if
  936  *      needed.  Return true if used fully, false if truncated at all.
  937  *
  938  *      Using this in thread context will work, but you might as well
  939  *      use entropy_enter in that case.
  940  */
  941 static bool
  942 entropy_enter_intr(const void *buf, size_t len, unsigned nbits)
  943 {
  944         struct entropy_cpu *ec;
  945         bool fullyused = false;
  946         uint32_t pending;
  947         void *sih;
  948 
  949         KASSERT(cpu_intr_p());
  950         KASSERTMSG(howmany(nbits, NBBY) <= len,
  951             "impossible entropy rate: %u bits in %zu-byte string", nbits, len);
  952 
  953         /* If it's too early after boot, just use entropy_enter_early.  */
  954         if (__predict_false(E->stage == ENTROPY_COLD)) {
  955                 entropy_enter_early(buf, len, nbits);
  956                 return true;
  957         }
  958 
  959         /*
  960          * Acquire the per-CPU state.  If someone is in the middle of
  961          * using it, drop the sample.  Otherwise, take the lock so that
  962          * higher-priority interrupts will drop their samples.
  963          */
  964         ec = percpu_getref(entropy_percpu);
  965         if (ec->ec_locked) {
  966                 ec->ec_evcnt->intrdrop.ev_count++;
  967                 goto out0;
  968         }
  969         ec->ec_locked = true;
  970         __insn_barrier();
  971 
  972         /*
  973          * Enter as much as we can into the per-CPU pool.  If it was
  974          * truncated, schedule a softint to stir the pool and stop.
  975          */
  976         if (!entpool_enter_nostir(ec->ec_pool, buf, len)) {
  977                 sih = atomic_load_relaxed(&entropy_sih);
  978                 if (__predict_true(sih != NULL))
  979                         softint_schedule(sih);
  980                 ec->ec_evcnt->intrtrunc.ev_count++;
  981                 goto out1;
  982         }
  983         fullyused = true;
  984 
  985         /* Count up what we can contribute.  */
  986         pending = ec->ec_pending;
  987         pending += MIN(ENTROPY_CAPACITY*NBBY - pending, nbits);
  988         atomic_store_relaxed(&ec->ec_pending, pending);
  989 
  990         /* Schedule a softint if we added anything and it matters.  */
  991         if (__predict_false((atomic_load_relaxed(&E->needed) != 0) ||
  992                 atomic_load_relaxed(&entropy_depletion)) &&
  993             nbits != 0) {
  994                 sih = atomic_load_relaxed(&entropy_sih);
  995                 if (__predict_true(sih != NULL))
  996                         softint_schedule(sih);
  997         }
  998 
  999 out1:   /* Release the per-CPU state.  */
 1000         KASSERT(ec->ec_locked);
 1001         __insn_barrier();
 1002         ec->ec_locked = false;
 1003 out0:   percpu_putref(entropy_percpu);
 1004 
 1005         return fullyused;
 1006 }
 1007 
 1008 /*
 1009  * entropy_softintr(cookie)
 1010  *
 1011  *      Soft interrupt handler for entering entropy.  Takes care of
 1012  *      stirring the local CPU's entropy pool if it filled up during
 1013  *      hard interrupts, and promptly crediting entropy from the local
 1014  *      CPU's entropy pool to the global entropy pool if needed.
 1015  */
 1016 static void
 1017 entropy_softintr(void *cookie)
 1018 {
 1019         struct entropy_cpu_lock lock;
 1020         struct entropy_cpu *ec;
 1021         unsigned pending;
 1022 
 1023         /*
 1024          * With the per-CPU state locked, stir the pool if necessary
 1025          * and determine if there's any pending entropy on this CPU to
 1026          * account globally.
 1027          */
 1028         ec = entropy_cpu_get(&lock);
 1029         ec->ec_evcnt->softint.ev_count++;
 1030         entpool_stir(ec->ec_pool);
 1031         pending = ec->ec_pending;
 1032         entropy_cpu_put(&lock, ec);
 1033 
 1034         /* Consolidate globally if appropriate based on what we added.  */
 1035         if (pending)
 1036                 entropy_account_cpu(ec);
 1037 }
 1038 
 1039 /*
 1040  * entropy_thread(cookie)
 1041  *
 1042  *      Handle any asynchronous entropy housekeeping.
 1043  */
 1044 static void
 1045 entropy_thread(void *cookie)
 1046 {
 1047         bool consolidate;
 1048 
 1049         for (;;) {
 1050                 /*
 1051                  * Wait until there's full entropy somewhere among the
 1052                  * CPUs, as confirmed at most once per minute, or
 1053                  * someone wants to consolidate.
 1054                  */
 1055                 if (entropy_pending() >= ENTROPY_CAPACITY*NBBY) {
 1056                         consolidate = true;
 1057                 } else {
 1058                         mutex_enter(&E->lock);
 1059                         if (!E->consolidate)
 1060                                 cv_timedwait(&E->cv, &E->lock, 60*hz);
 1061                         consolidate = E->consolidate;
 1062                         E->consolidate = false;
 1063                         mutex_exit(&E->lock);
 1064                 }
 1065 
 1066                 if (consolidate) {
 1067                         /* Do it.  */
 1068                         entropy_do_consolidate();
 1069 
 1070                         /* Mitigate abuse.  */
 1071                         kpause("entropy", false, hz, NULL);
 1072                 }
 1073         }
 1074 }
 1075 
 1076 /*
 1077  * entropy_pending()
 1078  *
 1079  *      Count up the amount of entropy pending on other CPUs.
 1080  */
 1081 static uint32_t
 1082 entropy_pending(void)
 1083 {
 1084         uint32_t pending = 0;
 1085 
 1086         percpu_foreach(entropy_percpu, &entropy_pending_cpu, &pending);
 1087         return pending;
 1088 }
 1089 
 1090 static void
 1091 entropy_pending_cpu(void *ptr, void *cookie, struct cpu_info *ci)
 1092 {
 1093         struct entropy_cpu *ec = ptr;
 1094         uint32_t *pendingp = cookie;
 1095         uint32_t cpu_pending;
 1096 
 1097         cpu_pending = atomic_load_relaxed(&ec->ec_pending);
 1098         *pendingp += MIN(ENTROPY_CAPACITY*NBBY - *pendingp, cpu_pending);
 1099 }
 1100 
 1101 /*
 1102  * entropy_do_consolidate()
 1103  *
 1104  *      Issue a cross-call to gather entropy on all CPUs and advance
 1105  *      the entropy epoch.
 1106  */
 1107 static void
 1108 entropy_do_consolidate(void)
 1109 {
 1110         static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
 1111         static struct timeval lasttime; /* serialized by E->lock */
 1112         struct entpool pool;
 1113         uint8_t buf[ENTPOOL_CAPACITY];
 1114         unsigned diff;
 1115         uint64_t ticket;
 1116 
 1117         /* Gather entropy on all CPUs into a temporary pool.  */
 1118         memset(&pool, 0, sizeof pool);
 1119         ticket = xc_broadcast(0, &entropy_consolidate_xc, &pool, NULL);
 1120         xc_wait(ticket);
 1121 
 1122         /* Acquire the lock to notify waiters.  */
 1123         mutex_enter(&E->lock);
 1124 
 1125         /* Count another consolidation.  */
 1126         entropy_consolidate_evcnt.ev_count++;
 1127 
 1128         /* Note when we last consolidated, i.e. now.  */
 1129         E->timestamp = time_uptime;
 1130 
 1131         /* Mix what we gathered into the global pool.  */
 1132         entpool_extract(&pool, buf, sizeof buf);
 1133         entpool_enter(&E->pool, buf, sizeof buf);
 1134         explicit_memset(&pool, 0, sizeof pool);
 1135 
 1136         /* Count the entropy that was gathered.  */
 1137         diff = MIN(E->needed, E->pending);
 1138         atomic_store_relaxed(&E->needed, E->needed - diff);
 1139         E->pending -= diff;
 1140         if (__predict_false(E->needed > 0)) {
 1141                 if ((boothowto & AB_DEBUG) != 0 &&
 1142                     ratecheck(&lasttime, &interval)) {
 1143                         printf("WARNING:"
 1144                             " consolidating less than full entropy\n");
 1145                 }
 1146         }
 1147 
 1148         /* Advance the epoch and notify waiters.  */
 1149         entropy_notify();
 1150 
 1151         /* Release the lock.  */
 1152         mutex_exit(&E->lock);
 1153 }
 1154 
 1155 /*
 1156  * entropy_consolidate_xc(vpool, arg2)
 1157  *
 1158  *      Extract output from the local CPU's input pool and enter it
 1159  *      into a temporary pool passed as vpool.
 1160  */
 1161 static void
 1162 entropy_consolidate_xc(void *vpool, void *arg2 __unused)
 1163 {
 1164         struct entpool *pool = vpool;
 1165         struct entropy_cpu_lock lock;
 1166         struct entropy_cpu *ec;
 1167         uint8_t buf[ENTPOOL_CAPACITY];
 1168         uint32_t extra[7];
 1169         unsigned i = 0;
 1170 
 1171         /* Grab CPU number and cycle counter to mix extra into the pool.  */
 1172         extra[i++] = cpu_number();
 1173         extra[i++] = entropy_timer();
 1174 
 1175         /*
 1176          * With the per-CPU state locked, extract from the per-CPU pool
 1177          * and count it as no longer pending.
 1178          */
 1179         ec = entropy_cpu_get(&lock);
 1180         extra[i++] = entropy_timer();
 1181         entpool_extract(ec->ec_pool, buf, sizeof buf);
 1182         atomic_store_relaxed(&ec->ec_pending, 0);
 1183         extra[i++] = entropy_timer();
 1184         entropy_cpu_put(&lock, ec);
 1185         extra[i++] = entropy_timer();
 1186 
 1187         /*
 1188          * Copy over statistics, and enter the per-CPU extract and the
 1189          * extra timing into the temporary pool, under the global lock.
 1190          */
 1191         mutex_enter(&E->lock);
 1192         extra[i++] = entropy_timer();
 1193         entpool_enter(pool, buf, sizeof buf);
 1194         explicit_memset(buf, 0, sizeof buf);
 1195         extra[i++] = entropy_timer();
 1196         KASSERT(i == __arraycount(extra));
 1197         entpool_enter(pool, extra, sizeof extra);
 1198         explicit_memset(extra, 0, sizeof extra);
 1199         mutex_exit(&E->lock);
 1200 }
 1201 
 1202 /*
 1203  * entropy_notify()
 1204  *
 1205  *      Caller just contributed entropy to the global pool.  Advance
 1206  *      the entropy epoch and notify waiters.
 1207  *
 1208  *      Caller must hold the global entropy lock.  Except for the
 1209  *      `sysctl -w kern.entropy.consolidate=1` trigger, the caller must
 1210  *      have just have transitioned from partial entropy to full
 1211  *      entropy -- E->needed should be zero now.
 1212  */
 1213 static void
 1214 entropy_notify(void)
 1215 {
 1216         static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
 1217         static struct timeval lasttime; /* serialized by E->lock */
 1218         unsigned epoch;
 1219 
 1220         KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
 1221 
 1222         /*
 1223          * If this is the first time, print a message to the console
 1224          * that we're ready so operators can compare it to the timing
 1225          * of other events.
 1226          */
 1227         if (__predict_false(E->epoch == (unsigned)-1) && E->needed == 0)
 1228                 printf("entropy: ready\n");
 1229 
 1230         /* Set the epoch; roll over from UINTMAX-1 to 1.  */
 1231         if (__predict_true(!atomic_load_relaxed(&entropy_depletion)) ||
 1232             ratecheck(&lasttime, &interval)) {
 1233                 epoch = E->epoch + 1;
 1234                 if (epoch == 0 || epoch == (unsigned)-1)
 1235                         epoch = 1;
 1236                 atomic_store_relaxed(&E->epoch, epoch);
 1237         }
 1238         KASSERT(E->epoch != (unsigned)-1);
 1239 
 1240         /* Notify waiters.  */
 1241         if (E->stage >= ENTROPY_WARM) {
 1242                 cv_broadcast(&E->cv);
 1243                 selnotify(&E->selq, POLLIN|POLLRDNORM, NOTE_SUBMIT);
 1244         }
 1245 
 1246         /* Count another notification.  */
 1247         entropy_notify_evcnt.ev_count++;
 1248 }
 1249 
 1250 /*
 1251  * entropy_consolidate()
 1252  *
 1253  *      Trigger entropy consolidation and wait for it to complete.
 1254  *
 1255  *      This should be used sparingly, not periodically -- requiring
 1256  *      conscious intervention by the operator or a clear policy
 1257  *      decision.  Otherwise, the kernel will automatically consolidate
 1258  *      when enough entropy has been gathered into per-CPU pools to
 1259  *      transition to full entropy.
 1260  */
 1261 void
 1262 entropy_consolidate(void)
 1263 {
 1264         uint64_t ticket;
 1265         int error;
 1266 
 1267         KASSERT(E->stage == ENTROPY_HOT);
 1268 
 1269         mutex_enter(&E->lock);
 1270         ticket = entropy_consolidate_evcnt.ev_count;
 1271         E->consolidate = true;
 1272         cv_broadcast(&E->cv);
 1273         while (ticket == entropy_consolidate_evcnt.ev_count) {
 1274                 error = cv_wait_sig(&E->cv, &E->lock);
 1275                 if (error)
 1276                         break;
 1277         }
 1278         mutex_exit(&E->lock);
 1279 }
 1280 
 1281 /*
 1282  * sysctl -w kern.entropy.consolidate=1
 1283  *
 1284  *      Trigger entropy consolidation and wait for it to complete.
 1285  *      Writable only by superuser.  This, writing to /dev/random, and
 1286  *      ioctl(RNDADDDATA) are the only ways for the system to
 1287  *      consolidate entropy if the operator knows something the kernel
 1288  *      doesn't about how unpredictable the pending entropy pools are.
 1289  */
 1290 static int
 1291 sysctl_entropy_consolidate(SYSCTLFN_ARGS)
 1292 {
 1293         struct sysctlnode node = *rnode;
 1294         int arg = 0;
 1295         int error;
 1296 
 1297         KASSERT(E->stage == ENTROPY_HOT);
 1298 
 1299         node.sysctl_data = &arg;
 1300         error = sysctl_lookup(SYSCTLFN_CALL(&node));
 1301         if (error || newp == NULL)
 1302                 return error;
 1303         if (arg)
 1304                 entropy_consolidate();
 1305 
 1306         return error;
 1307 }
 1308 
 1309 /*
 1310  * sysctl -w kern.entropy.gather=1
 1311  *
 1312  *      Trigger gathering entropy from all on-demand sources, and wait
 1313  *      for synchronous sources (but not asynchronous sources) to
 1314  *      complete.  Writable only by superuser.
 1315  */
 1316 static int
 1317 sysctl_entropy_gather(SYSCTLFN_ARGS)
 1318 {
 1319         struct sysctlnode node = *rnode;
 1320         int arg = 0;
 1321         int error;
 1322 
 1323         KASSERT(E->stage == ENTROPY_HOT);
 1324 
 1325         node.sysctl_data = &arg;
 1326         error = sysctl_lookup(SYSCTLFN_CALL(&node));
 1327         if (error || newp == NULL)
 1328                 return error;
 1329         if (arg) {
 1330                 mutex_enter(&E->lock);
 1331                 error = entropy_request(ENTROPY_CAPACITY,
 1332                     ENTROPY_WAIT|ENTROPY_SIG);
 1333                 mutex_exit(&E->lock);
 1334         }
 1335 
 1336         return 0;
 1337 }
 1338 
 1339 /*
 1340  * entropy_extract(buf, len, flags)
 1341  *
 1342  *      Extract len bytes from the global entropy pool into buf.
 1343  *
 1344  *      Caller MUST NOT expose these bytes directly -- must use them
 1345  *      ONLY to seed a cryptographic pseudorandom number generator
 1346  *      (`CPRNG'), a.k.a. deterministic random bit generator (`DRBG'),
 1347  *      and then erase them.  entropy_extract does not, on its own,
 1348  *      provide backtracking resistance -- it must be combined with a
 1349  *      PRNG/DRBG that does.
 1350  *
 1351  *      You generally shouldn't use this directly -- use cprng(9)
 1352  *      instead.
 1353  *
 1354  *      Flags may have:
 1355  *
 1356  *              ENTROPY_WAIT    Wait for entropy if not available yet.
 1357  *              ENTROPY_SIG     Allow interruption by a signal during wait.
 1358  *              ENTROPY_HARDFAIL Either fill the buffer with full entropy,
 1359  *                              or fail without filling it at all.
 1360  *
 1361  *      Return zero on success, or error on failure:
 1362  *
 1363  *              EWOULDBLOCK     No entropy and ENTROPY_WAIT not set.
 1364  *              EINTR/ERESTART  No entropy, ENTROPY_SIG set, and interrupted.
 1365  *
 1366  *      If ENTROPY_WAIT is set, allowed only in thread context.  If
 1367  *      ENTROPY_WAIT is not set, allowed also in softint context.
 1368  *      Forbidden in hard interrupt context.
 1369  */
 1370 int
 1371 entropy_extract(void *buf, size_t len, int flags)
 1372 {
 1373         static const struct timeval interval = {.tv_sec = 60, .tv_usec = 0};
 1374         static struct timeval lasttime; /* serialized by E->lock */
 1375         int error;
 1376 
 1377         if (ISSET(flags, ENTROPY_WAIT)) {
 1378                 ASSERT_SLEEPABLE();
 1379                 KASSERTMSG(E->stage >= ENTROPY_WARM,
 1380                     "can't wait for entropy until warm");
 1381         }
 1382 
 1383         /* Refuse to operate in interrupt context.  */
 1384         KASSERT(!cpu_intr_p());
 1385 
 1386         /* Acquire the global lock to get at the global pool.  */
 1387         if (E->stage >= ENTROPY_WARM)
 1388                 mutex_enter(&E->lock);
 1389 
 1390         /* Wait until there is enough entropy in the system.  */
 1391         error = 0;
 1392         while (E->needed) {
 1393                 /* Ask for more, synchronously if possible.  */
 1394                 error = entropy_request(len, flags);
 1395                 if (error)
 1396                         break;
 1397 
 1398                 /* If we got enough, we're done.  */
 1399                 if (E->needed == 0) {
 1400                         KASSERT(error == 0);
 1401                         break;
 1402                 }
 1403 
 1404                 /* If not waiting, stop here.  */
 1405                 if (!ISSET(flags, ENTROPY_WAIT)) {
 1406                         error = EWOULDBLOCK;
 1407                         break;
 1408                 }
 1409 
 1410                 /* Wait for some entropy to come in and try again.  */
 1411                 KASSERT(E->stage >= ENTROPY_WARM);
 1412                 printf("entropy: pid %d (%s) blocking due to lack of entropy\n",
 1413                        curproc->p_pid, curproc->p_comm);
 1414 
 1415                 if (ISSET(flags, ENTROPY_SIG)) {
 1416                         error = cv_wait_sig(&E->cv, &E->lock);
 1417                         if (error)
 1418                                 break;
 1419                 } else {
 1420                         cv_wait(&E->cv, &E->lock);
 1421                 }
 1422         }
 1423 
 1424         /*
 1425          * Count failure -- but fill the buffer nevertheless, unless
 1426          * the caller specified ENTROPY_HARDFAIL.
 1427          */
 1428         if (error) {
 1429                 if (ISSET(flags, ENTROPY_HARDFAIL))
 1430                         goto out;
 1431                 entropy_extract_fail_evcnt.ev_count++;
 1432         }
 1433 
 1434         /*
 1435          * Report a warning if we have never yet reached full entropy.
 1436          * This is the only case where we consider entropy to be
 1437          * `depleted' without kern.entropy.depletion enabled -- when we
 1438          * only have partial entropy, an adversary may be able to
 1439          * narrow the state of the pool down to a small number of
 1440          * possibilities; the output then enables them to confirm a
 1441          * guess, reducing its entropy from the adversary's perspective
 1442          * to zero.
 1443          */
 1444         if (__predict_false(E->epoch == (unsigned)-1)) {
 1445                 if (ratecheck(&lasttime, &interval))
 1446                         printf("WARNING:"
 1447                             " system needs entropy for security;"
 1448                             " see entropy(7)\n");
 1449                 atomic_store_relaxed(&E->needed, ENTROPY_CAPACITY*NBBY);
 1450         }
 1451 
 1452         /* Extract data from the pool, and `deplete' if we're doing that.  */
 1453         entpool_extract(&E->pool, buf, len);
 1454         if (__predict_false(atomic_load_relaxed(&entropy_depletion)) &&
 1455             error == 0) {
 1456                 unsigned cost = MIN(len, ENTROPY_CAPACITY)*NBBY;
 1457 
 1458                 atomic_store_relaxed(&E->needed,
 1459                     E->needed + MIN(ENTROPY_CAPACITY*NBBY - E->needed, cost));
 1460                 entropy_deplete_evcnt.ev_count++;
 1461         }
 1462 
 1463 out:    /* Release the global lock and return the error.  */
 1464         if (E->stage >= ENTROPY_WARM)
 1465                 mutex_exit(&E->lock);
 1466         return error;
 1467 }
 1468 
 1469 /*
 1470  * entropy_poll(events)
 1471  *
 1472  *      Return the subset of events ready, and if it is not all of
 1473  *      events, record curlwp as waiting for entropy.
 1474  */
 1475 int
 1476 entropy_poll(int events)
 1477 {
 1478         int revents = 0;
 1479 
 1480         KASSERT(E->stage >= ENTROPY_WARM);
 1481 
 1482         /* Always ready for writing.  */
 1483         revents |= events & (POLLOUT|POLLWRNORM);
 1484 
 1485         /* Narrow it down to reads.  */
 1486         events &= POLLIN|POLLRDNORM;
 1487         if (events == 0)
 1488                 return revents;
 1489 
 1490         /*
 1491          * If we have reached full entropy and we're not depleting
 1492          * entropy, we are forever ready.
 1493          */
 1494         if (__predict_true(atomic_load_relaxed(&E->needed) == 0) &&
 1495             __predict_true(!atomic_load_relaxed(&entropy_depletion)))
 1496                 return revents | events;
 1497 
 1498         /*
 1499          * Otherwise, check whether we need entropy under the lock.  If
 1500          * we don't, we're ready; if we do, add ourselves to the queue.
 1501          */
 1502         mutex_enter(&E->lock);
 1503         if (E->needed == 0)
 1504                 revents |= events;
 1505         else
 1506                 selrecord(curlwp, &E->selq);
 1507         mutex_exit(&E->lock);
 1508 
 1509         return revents;
 1510 }
 1511 
 1512 /*
 1513  * filt_entropy_read_detach(kn)
 1514  *
 1515  *      struct filterops::f_detach callback for entropy read events:
 1516  *      remove kn from the list of waiters.
 1517  */
 1518 static void
 1519 filt_entropy_read_detach(struct knote *kn)
 1520 {
 1521 
 1522         KASSERT(E->stage >= ENTROPY_WARM);
 1523 
 1524         mutex_enter(&E->lock);
 1525         selremove_knote(&E->selq, kn);
 1526         mutex_exit(&E->lock);
 1527 }
 1528 
 1529 /*
 1530  * filt_entropy_read_event(kn, hint)
 1531  *
 1532  *      struct filterops::f_event callback for entropy read events:
 1533  *      poll for entropy.  Caller must hold the global entropy lock if
 1534  *      hint is NOTE_SUBMIT, and must not if hint is not NOTE_SUBMIT.
 1535  */
 1536 static int
 1537 filt_entropy_read_event(struct knote *kn, long hint)
 1538 {
 1539         int ret;
 1540 
 1541         KASSERT(E->stage >= ENTROPY_WARM);
 1542 
 1543         /* Acquire the lock, if caller is outside entropy subsystem.  */
 1544         if (hint == NOTE_SUBMIT)
 1545                 KASSERT(mutex_owned(&E->lock));
 1546         else
 1547                 mutex_enter(&E->lock);
 1548 
 1549         /*
 1550          * If we still need entropy, can't read anything; if not, can
 1551          * read arbitrarily much.
 1552          */
 1553         if (E->needed != 0) {
 1554                 ret = 0;
 1555         } else {
 1556                 if (atomic_load_relaxed(&entropy_depletion))
 1557                         kn->kn_data = ENTROPY_CAPACITY*NBBY;
 1558                 else
 1559                         kn->kn_data = MIN(INT64_MAX, SSIZE_MAX);
 1560                 ret = 1;
 1561         }
 1562 
 1563         /* Release the lock, if caller is outside entropy subsystem.  */
 1564         if (hint == NOTE_SUBMIT)
 1565                 KASSERT(mutex_owned(&E->lock));
 1566         else
 1567                 mutex_exit(&E->lock);
 1568 
 1569         return ret;
 1570 }
 1571 
 1572 /* XXX Makes sense only for /dev/u?random.  */
 1573 static const struct filterops entropy_read_filtops = {
 1574         .f_flags = FILTEROP_ISFD | FILTEROP_MPSAFE,
 1575         .f_attach = NULL,
 1576         .f_detach = filt_entropy_read_detach,
 1577         .f_event = filt_entropy_read_event,
 1578 };
 1579 
 1580 /*
 1581  * entropy_kqfilter(kn)
 1582  *
 1583  *      Register kn to receive entropy event notifications.  May be
 1584  *      EVFILT_READ or EVFILT_WRITE; anything else yields EINVAL.
 1585  */
 1586 int
 1587 entropy_kqfilter(struct knote *kn)
 1588 {
 1589 
 1590         KASSERT(E->stage >= ENTROPY_WARM);
 1591 
 1592         switch (kn->kn_filter) {
 1593         case EVFILT_READ:
 1594                 /* Enter into the global select queue.  */
 1595                 mutex_enter(&E->lock);
 1596                 kn->kn_fop = &entropy_read_filtops;
 1597                 selrecord_knote(&E->selq, kn);
 1598                 mutex_exit(&E->lock);
 1599                 return 0;
 1600         case EVFILT_WRITE:
 1601                 /* Can always dump entropy into the system.  */
 1602                 kn->kn_fop = &seltrue_filtops;
 1603                 return 0;
 1604         default:
 1605                 return EINVAL;
 1606         }
 1607 }
 1608 
 1609 /*
 1610  * rndsource_setcb(rs, get, getarg)
 1611  *
 1612  *      Set the request callback for the entropy source rs, if it can
 1613  *      provide entropy on demand.  Must precede rnd_attach_source.
 1614  */
 1615 void
 1616 rndsource_setcb(struct krndsource *rs, void (*get)(size_t, void *),
 1617     void *getarg)
 1618 {
 1619 
 1620         rs->get = get;
 1621         rs->getarg = getarg;
 1622 }
 1623 
 1624 /*
 1625  * rnd_attach_source(rs, name, type, flags)
 1626  *
 1627  *      Attach the entropy source rs.  Must be done after
 1628  *      rndsource_setcb, if any, and before any calls to rnd_add_data.
 1629  */
 1630 void
 1631 rnd_attach_source(struct krndsource *rs, const char *name, uint32_t type,
 1632     uint32_t flags)
 1633 {
 1634         uint32_t extra[4];
 1635         unsigned i = 0;
 1636 
 1637         /* Grab cycle counter to mix extra into the pool.  */
 1638         extra[i++] = entropy_timer();
 1639 
 1640         /*
 1641          * Apply some standard flags:
 1642          *
 1643          * - We do not bother with network devices by default, for
 1644          *   hysterical raisins (perhaps: because it is often the case
 1645          *   that an adversary can influence network packet timings).
 1646          */
 1647         switch (type) {
 1648         case RND_TYPE_NET:
 1649                 flags |= RND_FLAG_NO_COLLECT;
 1650                 break;
 1651         }
 1652 
 1653         /* Sanity-check the callback if RND_FLAG_HASCB is set.  */
 1654         KASSERT(!ISSET(flags, RND_FLAG_HASCB) || rs->get != NULL);
 1655 
 1656         /* Initialize the random source.  */
 1657         memset(rs->name, 0, sizeof(rs->name)); /* paranoia */
 1658         strlcpy(rs->name, name, sizeof(rs->name));
 1659         memset(&rs->time_delta, 0, sizeof(rs->time_delta));
 1660         memset(&rs->value_delta, 0, sizeof(rs->value_delta));
 1661         rs->total = 0;
 1662         rs->type = type;
 1663         rs->flags = flags;
 1664         if (E->stage >= ENTROPY_WARM)
 1665                 rs->state = percpu_alloc(sizeof(struct rndsource_cpu));
 1666         extra[i++] = entropy_timer();
 1667 
 1668         /* Wire it into the global list of random sources.  */
 1669         if (E->stage >= ENTROPY_WARM)
 1670                 mutex_enter(&E->lock);
 1671         LIST_INSERT_HEAD(&E->sources, rs, list);
 1672         if (E->stage >= ENTROPY_WARM)
 1673                 mutex_exit(&E->lock);
 1674         extra[i++] = entropy_timer();
 1675 
 1676         /* Request that it provide entropy ASAP, if we can.  */
 1677         if (ISSET(flags, RND_FLAG_HASCB))
 1678                 (*rs->get)(ENTROPY_CAPACITY, rs->getarg);
 1679         extra[i++] = entropy_timer();
 1680 
 1681         /* Mix the extra into the pool.  */
 1682         KASSERT(i == __arraycount(extra));
 1683         entropy_enter(extra, sizeof extra, 0);
 1684         explicit_memset(extra, 0, sizeof extra);
 1685 }
 1686 
 1687 /*
 1688  * rnd_detach_source(rs)
 1689  *
 1690  *      Detach the entropy source rs.  May sleep waiting for users to
 1691  *      drain.  Further use is not allowed.
 1692  */
 1693 void
 1694 rnd_detach_source(struct krndsource *rs)
 1695 {
 1696 
 1697         /*
 1698          * If we're cold (shouldn't happen, but hey), just remove it
 1699          * from the list -- there's nothing allocated.
 1700          */
 1701         if (E->stage == ENTROPY_COLD) {
 1702                 LIST_REMOVE(rs, list);
 1703                 return;
 1704         }
 1705 
 1706         /* We may have to wait for entropy_request.  */
 1707         ASSERT_SLEEPABLE();
 1708 
 1709         /* Wait until the source list is not in use, and remove it.  */
 1710         mutex_enter(&E->lock);
 1711         while (E->sourcelock)
 1712                 cv_wait(&E->sourcelock_cv, &E->lock);
 1713         LIST_REMOVE(rs, list);
 1714         mutex_exit(&E->lock);
 1715 
 1716         /* Free the per-CPU data.  */
 1717         percpu_free(rs->state, sizeof(struct rndsource_cpu));
 1718 }
 1719 
 1720 /*
 1721  * rnd_lock_sources(flags)
 1722  *
 1723  *      Lock the list of entropy sources.  Caller must hold the global
 1724  *      entropy lock.  If successful, no rndsource will go away until
 1725  *      rnd_unlock_sources even while the caller releases the global
 1726  *      entropy lock.
 1727  *
 1728  *      If flags & ENTROPY_WAIT, wait for concurrent access to finish.
 1729  *      If flags & ENTROPY_SIG, allow interruption by signal.
 1730  */
 1731 static int __attribute__((warn_unused_result))
 1732 rnd_lock_sources(int flags)
 1733 {
 1734         int error;
 1735 
 1736         KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
 1737 
 1738         while (E->sourcelock) {
 1739                 KASSERT(E->stage >= ENTROPY_WARM);
 1740                 if (!ISSET(flags, ENTROPY_WAIT))
 1741                         return EWOULDBLOCK;
 1742                 if (ISSET(flags, ENTROPY_SIG)) {
 1743                         error = cv_wait_sig(&E->sourcelock_cv, &E->lock);
 1744                         if (error)
 1745                                 return error;
 1746                 } else {
 1747                         cv_wait(&E->sourcelock_cv, &E->lock);
 1748                 }
 1749         }
 1750 
 1751         E->sourcelock = curlwp;
 1752         return 0;
 1753 }
 1754 
 1755 /*
 1756  * rnd_unlock_sources()
 1757  *
 1758  *      Unlock the list of sources after rnd_lock_sources.  Caller must
 1759  *      hold the global entropy lock.
 1760  */
 1761 static void
 1762 rnd_unlock_sources(void)
 1763 {
 1764 
 1765         KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
 1766 
 1767         KASSERTMSG(E->sourcelock == curlwp, "lwp %p releasing lock held by %p",
 1768             curlwp, E->sourcelock);
 1769         E->sourcelock = NULL;
 1770         if (E->stage >= ENTROPY_WARM)
 1771                 cv_signal(&E->sourcelock_cv);
 1772 }
 1773 
 1774 /*
 1775  * rnd_sources_locked()
 1776  *
 1777  *      True if we hold the list of rndsources locked, for diagnostic
 1778  *      assertions.
 1779  */
 1780 static bool __diagused
 1781 rnd_sources_locked(void)
 1782 {
 1783 
 1784         return E->sourcelock == curlwp;
 1785 }
 1786 
 1787 /*
 1788  * entropy_request(nbytes, flags)
 1789  *
 1790  *      Request nbytes bytes of entropy from all sources in the system.
 1791  *      OK if we overdo it.  Caller must hold the global entropy lock;
 1792  *      will release and re-acquire it.
 1793  *
 1794  *      If flags & ENTROPY_WAIT, wait for concurrent access to finish.
 1795  *      If flags & ENTROPY_SIG, allow interruption by signal.
 1796  */
 1797 static int
 1798 entropy_request(size_t nbytes, int flags)
 1799 {
 1800         struct krndsource *rs;
 1801         int error;
 1802 
 1803         KASSERT(E->stage == ENTROPY_COLD || mutex_owned(&E->lock));
 1804         if (flags & ENTROPY_WAIT)
 1805                 ASSERT_SLEEPABLE();
 1806 
 1807         /*
 1808          * Lock the list of entropy sources to block rnd_detach_source
 1809          * until we're done, and to serialize calls to the entropy
 1810          * callbacks as guaranteed to drivers.
 1811          */
 1812         error = rnd_lock_sources(flags);
 1813         if (error)
 1814                 return error;
 1815         entropy_request_evcnt.ev_count++;
 1816 
 1817         /* Clamp to the maximum reasonable request.  */
 1818         nbytes = MIN(nbytes, ENTROPY_CAPACITY);
 1819 
 1820         /* Walk the list of sources.  */
 1821         LIST_FOREACH(rs, &E->sources, list) {
 1822                 /* Skip sources without callbacks.  */
 1823                 if (!ISSET(rs->flags, RND_FLAG_HASCB))
 1824                         continue;
 1825 
 1826                 /*
 1827                  * Skip sources that are disabled altogether -- we
 1828                  * would just ignore their samples anyway.
 1829                  */
 1830                 if (ISSET(rs->flags, RND_FLAG_NO_COLLECT))
 1831                         continue;
 1832 
 1833                 /* Drop the lock while we call the callback.  */
 1834                 if (E->stage >= ENTROPY_WARM)
 1835                         mutex_exit(&E->lock);
 1836                 (*rs->get)(nbytes, rs->getarg);
 1837                 if (E->stage >= ENTROPY_WARM)
 1838                         mutex_enter(&E->lock);
 1839         }
 1840 
 1841         /* Request done; unlock the list of entropy sources.  */
 1842         rnd_unlock_sources();
 1843         return 0;
 1844 }
 1845 
 1846 /*
 1847  * rnd_add_uint32(rs, value)
 1848  *
 1849  *      Enter 32 bits of data from an entropy source into the pool.
 1850  *
 1851  *      If rs is NULL, may not be called from interrupt context.
 1852  *
 1853  *      If rs is non-NULL, may be called from any context.  May drop
 1854  *      data if called from interrupt context.
 1855  */
 1856 void
 1857 rnd_add_uint32(struct krndsource *rs, uint32_t value)
 1858 {
 1859 
 1860         rnd_add_data(rs, &value, sizeof value, 0);
 1861 }
 1862 
 1863 void
 1864 _rnd_add_uint32(struct krndsource *rs, uint32_t value)
 1865 {
 1866 
 1867         rnd_add_data(rs, &value, sizeof value, 0);
 1868 }
 1869 
 1870 void
 1871 _rnd_add_uint64(struct krndsource *rs, uint64_t value)
 1872 {
 1873 
 1874         rnd_add_data(rs, &value, sizeof value, 0);
 1875 }
 1876 
 1877 /*
 1878  * rnd_add_data(rs, buf, len, entropybits)
 1879  *
 1880  *      Enter data from an entropy source into the pool, with a
 1881  *      driver's estimate of how much entropy the physical source of
 1882  *      the data has.  If RND_FLAG_NO_ESTIMATE, we ignore the driver's
 1883  *      estimate and treat it as zero.
 1884  *
 1885  *      If rs is NULL, may not be called from interrupt context.
 1886  *
 1887  *      If rs is non-NULL, may be called from any context.  May drop
 1888  *      data if called from interrupt context.
 1889  */
 1890 void
 1891 rnd_add_data(struct krndsource *rs, const void *buf, uint32_t len,
 1892     uint32_t entropybits)
 1893 {
 1894         uint32_t extra;
 1895         uint32_t flags;
 1896 
 1897         KASSERTMSG(howmany(entropybits, NBBY) <= len,
 1898             "%s: impossible entropy rate:"
 1899             " %"PRIu32" bits in %"PRIu32"-byte string",
 1900             rs ? rs->name : "(anonymous)", entropybits, len);
 1901 
 1902         /* If there's no rndsource, just enter the data and time now.  */
 1903         if (rs == NULL) {
 1904                 entropy_enter(buf, len, entropybits);
 1905                 extra = entropy_timer();
 1906                 entropy_enter(&extra, sizeof extra, 0);
 1907                 explicit_memset(&extra, 0, sizeof extra);
 1908                 return;
 1909         }
 1910 
 1911         /* Load a snapshot of the flags.  Ioctl may change them under us.  */
 1912         flags = atomic_load_relaxed(&rs->flags);
 1913 
 1914         /*
 1915          * Skip if:
 1916          * - we're not collecting entropy, or
 1917          * - the operator doesn't want to collect entropy from this, or
 1918          * - neither data nor timings are being collected from this.
 1919          */
 1920         if (!atomic_load_relaxed(&entropy_collection) ||
 1921             ISSET(flags, RND_FLAG_NO_COLLECT) ||
 1922             !ISSET(flags, RND_FLAG_COLLECT_VALUE|RND_FLAG_COLLECT_TIME))
 1923                 return;
 1924 
 1925         /* If asked, ignore the estimate.  */
 1926         if (ISSET(flags, RND_FLAG_NO_ESTIMATE))
 1927                 entropybits = 0;
 1928 
 1929         /* If we are collecting data, enter them.  */
 1930         if (ISSET(flags, RND_FLAG_COLLECT_VALUE))
 1931                 rnd_add_data_1(rs, buf, len, entropybits,
 1932                     RND_FLAG_COLLECT_VALUE);
 1933 
 1934         /* If we are collecting timings, enter one.  */
 1935         if (ISSET(flags, RND_FLAG_COLLECT_TIME)) {
 1936                 extra = entropy_timer();
 1937                 rnd_add_data_1(rs, &extra, sizeof extra, 0,
 1938                     RND_FLAG_COLLECT_TIME);
 1939         }
 1940 }
 1941 
 1942 static unsigned
 1943 add_sat(unsigned a, unsigned b)
 1944 {
 1945         unsigned c = a + b;
 1946 
 1947         return (c < a ? UINT_MAX : c);
 1948 }
 1949 
 1950 /*
 1951  * rnd_add_data_1(rs, buf, len, entropybits, flag)
 1952  *
 1953  *      Internal subroutine to call either entropy_enter_intr, if we're
 1954  *      in interrupt context, or entropy_enter if not, and to count the
 1955  *      entropy in an rndsource.
 1956  */
 1957 static void
 1958 rnd_add_data_1(struct krndsource *rs, const void *buf, uint32_t len,
 1959     uint32_t entropybits, uint32_t flag)
 1960 {
 1961         bool fullyused;
 1962 
 1963         /*
 1964          * If we're in interrupt context, use entropy_enter_intr and
 1965          * take note of whether it consumed the full sample; if not,
 1966          * use entropy_enter, which always consumes the full sample.
 1967          */
 1968         if (curlwp && cpu_intr_p()) {
 1969                 fullyused = entropy_enter_intr(buf, len, entropybits);
 1970         } else {
 1971                 entropy_enter(buf, len, entropybits);
 1972                 fullyused = true;
 1973         }
 1974 
 1975         /*
 1976          * If we used the full sample, note how many bits were
 1977          * contributed from this source.
 1978          */
 1979         if (fullyused) {
 1980                 if (__predict_false(E->stage == ENTROPY_COLD)) {
 1981                         rs->total = add_sat(rs->total, entropybits);
 1982                         switch (flag) {
 1983                         case RND_FLAG_COLLECT_TIME:
 1984                                 rs->time_delta.insamples =
 1985                                     add_sat(rs->time_delta.insamples, 1);
 1986                                 break;
 1987                         case RND_FLAG_COLLECT_VALUE:
 1988                                 rs->value_delta.insamples =
 1989                                     add_sat(rs->value_delta.insamples, 1);
 1990                                 break;
 1991                         }
 1992                 } else {
 1993                         struct rndsource_cpu *rc = percpu_getref(rs->state);
 1994 
 1995                         atomic_store_relaxed(&rc->rc_entropybits,
 1996                             add_sat(rc->rc_entropybits, entropybits));
 1997                         switch (flag) {
 1998                         case RND_FLAG_COLLECT_TIME:
 1999                                 atomic_store_relaxed(&rc->rc_timesamples,
 2000                                     add_sat(rc->rc_timesamples, 1));
 2001                                 break;
 2002                         case RND_FLAG_COLLECT_VALUE:
 2003                                 atomic_store_relaxed(&rc->rc_datasamples,
 2004                                     add_sat(rc->rc_datasamples, 1));
 2005                                 break;
 2006                         }
 2007                         percpu_putref(rs->state);
 2008                 }
 2009         }
 2010 }
 2011 
 2012 /*
 2013  * rnd_add_data_sync(rs, buf, len, entropybits)
 2014  *
 2015  *      Same as rnd_add_data.  Originally used in rndsource callbacks,
 2016  *      to break an unnecessary cycle; no longer really needed.
 2017  */
 2018 void
 2019 rnd_add_data_sync(struct krndsource *rs, const void *buf, uint32_t len,
 2020     uint32_t entropybits)
 2021 {
 2022 
 2023         rnd_add_data(rs, buf, len, entropybits);
 2024 }
 2025 
 2026 /*
 2027  * rndsource_entropybits(rs)
 2028  *
 2029  *      Return approximately the number of bits of entropy that have
 2030  *      been contributed via rs so far.  Approximate if other CPUs may
 2031  *      be calling rnd_add_data concurrently.
 2032  */
 2033 static unsigned
 2034 rndsource_entropybits(struct krndsource *rs)
 2035 {
 2036         unsigned nbits = rs->total;
 2037 
 2038         KASSERT(E->stage >= ENTROPY_WARM);
 2039         KASSERT(rnd_sources_locked());
 2040         percpu_foreach(rs->state, rndsource_entropybits_cpu, &nbits);
 2041         return nbits;
 2042 }
 2043 
 2044 static void
 2045 rndsource_entropybits_cpu(void *ptr, void *cookie, struct cpu_info *ci)
 2046 {
 2047         struct rndsource_cpu *rc = ptr;
 2048         unsigned *nbitsp = cookie;
 2049         unsigned cpu_nbits;
 2050 
 2051         cpu_nbits = atomic_load_relaxed(&rc->rc_entropybits);
 2052         *nbitsp += MIN(UINT_MAX - *nbitsp, cpu_nbits);
 2053 }
 2054 
 2055 /*
 2056  * rndsource_to_user(rs, urs)
 2057  *
 2058  *      Copy a description of rs out to urs for userland.
 2059  */
 2060 static void
 2061 rndsource_to_user(struct krndsource *rs, rndsource_t *urs)
 2062 {
 2063 
 2064         KASSERT(E->stage >= ENTROPY_WARM);
 2065         KASSERT(rnd_sources_locked());
 2066 
 2067         /* Avoid kernel memory disclosure.  */
 2068         memset(urs, 0, sizeof(*urs));
 2069 
 2070         CTASSERT(sizeof(urs->name) == sizeof(rs->name));
 2071         strlcpy(urs->name, rs->name, sizeof(urs->name));
 2072         urs->total = rndsource_entropybits(rs);
 2073         urs->type = rs->type;
 2074         urs->flags = atomic_load_relaxed(&rs->flags);
 2075 }
 2076 
 2077 /*
 2078  * rndsource_to_user_est(rs, urse)
 2079  *
 2080  *      Copy a description of rs and estimation statistics out to urse
 2081  *      for userland.
 2082  */
 2083 static void
 2084 rndsource_to_user_est(struct krndsource *rs, rndsource_est_t *urse)
 2085 {
 2086 
 2087         KASSERT(E->stage >= ENTROPY_WARM);
 2088         KASSERT(rnd_sources_locked());
 2089 
 2090         /* Avoid kernel memory disclosure.  */
 2091         memset(urse, 0, sizeof(*urse));
 2092 
 2093         /* Copy out the rndsource description.  */
 2094         rndsource_to_user(rs, &urse->rt);
 2095 
 2096         /* Gather the statistics.  */
 2097         urse->dt_samples = rs->time_delta.insamples;
 2098         urse->dt_total = 0;
 2099         urse->dv_samples = rs->value_delta.insamples;
 2100         urse->dv_total = urse->rt.total;
 2101         percpu_foreach(rs->state, rndsource_to_user_est_cpu, urse);
 2102 }
 2103 
 2104 static void
 2105 rndsource_to_user_est_cpu(void *ptr, void *cookie, struct cpu_info *ci)
 2106 {
 2107         struct rndsource_cpu *rc = ptr;
 2108         rndsource_est_t *urse = cookie;
 2109 
 2110         urse->dt_samples = add_sat(urse->dt_samples,
 2111             atomic_load_relaxed(&rc->rc_timesamples));
 2112         urse->dv_samples = add_sat(urse->dv_samples,
 2113             atomic_load_relaxed(&rc->rc_datasamples));
 2114 }
 2115 
 2116 /*
 2117  * entropy_reset_xc(arg1, arg2)
 2118  *
 2119  *      Reset the current CPU's pending entropy to zero.
 2120  */
 2121 static void
 2122 entropy_reset_xc(void *arg1 __unused, void *arg2 __unused)
 2123 {
 2124         uint32_t extra = entropy_timer();
 2125         struct entropy_cpu_lock lock;
 2126         struct entropy_cpu *ec;
 2127 
 2128         /*
 2129          * With the per-CPU state locked, zero the pending count and
 2130          * enter a cycle count for fun.
 2131          */
 2132         ec = entropy_cpu_get(&lock);
 2133         ec->ec_pending = 0;
 2134         entpool_enter(ec->ec_pool, &extra, sizeof extra);
 2135         entropy_cpu_put(&lock, ec);
 2136 }
 2137 
 2138 /*
 2139  * entropy_ioctl(cmd, data)
 2140  *
 2141  *      Handle various /dev/random ioctl queries.
 2142  */
 2143 int
 2144 entropy_ioctl(unsigned long cmd, void *data)
 2145 {
 2146         struct krndsource *rs;
 2147         bool privileged;
 2148         int error;
 2149 
 2150         KASSERT(E->stage >= ENTROPY_WARM);
 2151 
 2152         /* Verify user's authorization to perform the ioctl.  */
 2153         switch (cmd) {
 2154         case RNDGETENTCNT:
 2155         case RNDGETPOOLSTAT:
 2156         case RNDGETSRCNUM:
 2157         case RNDGETSRCNAME:
 2158         case RNDGETESTNUM:
 2159         case RNDGETESTNAME:
 2160                 error = kauth_authorize_device(kauth_cred_get(),
 2161                     KAUTH_DEVICE_RND_GETPRIV, NULL, NULL, NULL, NULL);
 2162                 break;
 2163         case RNDCTL:
 2164                 error = kauth_authorize_device(kauth_cred_get(),
 2165                     KAUTH_DEVICE_RND_SETPRIV, NULL, NULL, NULL, NULL);
 2166                 break;
 2167         case RNDADDDATA:
 2168                 error = kauth_authorize_device(kauth_cred_get(),
 2169                     KAUTH_DEVICE_RND_ADDDATA, NULL, NULL, NULL, NULL);
 2170                 /* Ascertain whether the user's inputs should be counted.  */
 2171                 if (kauth_authorize_device(kauth_cred_get(),
 2172                         KAUTH_DEVICE_RND_ADDDATA_ESTIMATE,
 2173                         NULL, NULL, NULL, NULL) == 0)
 2174                         privileged = true;
 2175                 break;
 2176         default: {
 2177                 /*
 2178                  * XXX Hack to avoid changing module ABI so this can be
 2179                  * pulled up.  Later, we can just remove the argument.
 2180                  */
 2181                 static const struct fileops fops = {
 2182                         .fo_ioctl = rnd_system_ioctl,
 2183                 };
 2184                 struct file f = {
 2185                         .f_ops = &fops,
 2186                 };
 2187                 MODULE_HOOK_CALL(rnd_ioctl_50_hook, (&f, cmd, data),
 2188                     enosys(), error);
 2189 #if defined(_LP64)
 2190                 if (error == ENOSYS)
 2191                         MODULE_HOOK_CALL(rnd_ioctl32_50_hook, (&f, cmd, data),
 2192                             enosys(), error);
 2193 #endif
 2194                 if (error == ENOSYS)
 2195                         error = ENOTTY;
 2196                 break;
 2197         }
 2198         }
 2199 
 2200         /* If anything went wrong with authorization, stop here.  */
 2201         if (error)
 2202                 return error;
 2203 
 2204         /* Dispatch on the command.  */
 2205         switch (cmd) {
 2206         case RNDGETENTCNT: {    /* Get current entropy count in bits.  */
 2207                 uint32_t *countp = data;
 2208 
 2209                 mutex_enter(&E->lock);
 2210                 *countp = ENTROPY_CAPACITY*NBBY - E->needed;
 2211                 mutex_exit(&E->lock);
 2212 
 2213                 break;
 2214         }
 2215         case RNDGETPOOLSTAT: {  /* Get entropy pool statistics.  */
 2216                 rndpoolstat_t *pstat = data;
 2217 
 2218                 mutex_enter(&E->lock);
 2219 
 2220                 /* parameters */
 2221                 pstat->poolsize = ENTPOOL_SIZE/sizeof(uint32_t); /* words */
 2222                 pstat->threshold = ENTROPY_CAPACITY*1; /* bytes */
 2223                 pstat->maxentropy = ENTROPY_CAPACITY*NBBY; /* bits */
 2224 
 2225                 /* state */
 2226                 pstat->added = 0; /* XXX total entropy_enter count */
 2227                 pstat->curentropy = ENTROPY_CAPACITY*NBBY - E->needed;
 2228                 pstat->removed = 0; /* XXX total entropy_extract count */
 2229                 pstat->discarded = 0; /* XXX bits of entropy beyond capacity */
 2230                 pstat->generated = 0; /* XXX bits of data...fabricated? */
 2231 
 2232                 mutex_exit(&E->lock);
 2233                 break;
 2234         }
 2235         case RNDGETSRCNUM: {    /* Get entropy sources by number.  */
 2236                 rndstat_t *stat = data;
 2237                 uint32_t start = 0, i = 0;
 2238 
 2239                 /* Skip if none requested; fail if too many requested.  */
 2240                 if (stat->count == 0)
 2241                         break;
 2242                 if (stat->count > RND_MAXSTATCOUNT)
 2243                         return EINVAL;
 2244 
 2245                 /*
 2246                  * Under the lock, find the first one, copy out as many
 2247                  * as requested, and report how many we copied out.
 2248                  */
 2249                 mutex_enter(&E->lock);
 2250                 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
 2251                 if (error) {
 2252                         mutex_exit(&E->lock);
 2253                         return error;
 2254                 }
 2255                 LIST_FOREACH(rs, &E->sources, list) {
 2256                         if (start++ == stat->start)
 2257                                 break;
 2258                 }
 2259                 while (i < stat->count && rs != NULL) {
 2260                         mutex_exit(&E->lock);
 2261                         rndsource_to_user(rs, &stat->source[i++]);
 2262                         mutex_enter(&E->lock);
 2263                         rs = LIST_NEXT(rs, list);
 2264                 }
 2265                 KASSERT(i <= stat->count);
 2266                 stat->count = i;
 2267                 rnd_unlock_sources();
 2268                 mutex_exit(&E->lock);
 2269                 break;
 2270         }
 2271         case RNDGETESTNUM: {    /* Get sources and estimates by number.  */
 2272                 rndstat_est_t *estat = data;
 2273                 uint32_t start = 0, i = 0;
 2274 
 2275                 /* Skip if none requested; fail if too many requested.  */
 2276                 if (estat->count == 0)
 2277                         break;
 2278                 if (estat->count > RND_MAXSTATCOUNT)
 2279                         return EINVAL;
 2280 
 2281                 /*
 2282                  * Under the lock, find the first one, copy out as many
 2283                  * as requested, and report how many we copied out.
 2284                  */
 2285                 mutex_enter(&E->lock);
 2286                 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
 2287                 if (error) {
 2288                         mutex_exit(&E->lock);
 2289                         return error;
 2290                 }
 2291                 LIST_FOREACH(rs, &E->sources, list) {
 2292                         if (start++ == estat->start)
 2293                                 break;
 2294                 }
 2295                 while (i < estat->count && rs != NULL) {
 2296                         mutex_exit(&E->lock);
 2297                         rndsource_to_user_est(rs, &estat->source[i++]);
 2298                         mutex_enter(&E->lock);
 2299                         rs = LIST_NEXT(rs, list);
 2300                 }
 2301                 KASSERT(i <= estat->count);
 2302                 estat->count = i;
 2303                 rnd_unlock_sources();
 2304                 mutex_exit(&E->lock);
 2305                 break;
 2306         }
 2307         case RNDGETSRCNAME: {   /* Get entropy sources by name.  */
 2308                 rndstat_name_t *nstat = data;
 2309                 const size_t n = sizeof(rs->name);
 2310 
 2311                 CTASSERT(sizeof(rs->name) == sizeof(nstat->name));
 2312 
 2313                 /*
 2314                  * Under the lock, search by name.  If found, copy it
 2315                  * out; if not found, fail with ENOENT.
 2316                  */
 2317                 mutex_enter(&E->lock);
 2318                 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
 2319                 if (error) {
 2320                         mutex_exit(&E->lock);
 2321                         return error;
 2322                 }
 2323                 LIST_FOREACH(rs, &E->sources, list) {
 2324                         if (strncmp(rs->name, nstat->name, n) == 0)
 2325                                 break;
 2326                 }
 2327                 if (rs != NULL) {
 2328                         mutex_exit(&E->lock);
 2329                         rndsource_to_user(rs, &nstat->source);
 2330                         mutex_enter(&E->lock);
 2331                 } else {
 2332                         error = ENOENT;
 2333                 }
 2334                 rnd_unlock_sources();
 2335                 mutex_exit(&E->lock);
 2336                 break;
 2337         }
 2338         case RNDGETESTNAME: {   /* Get sources and estimates by name.  */
 2339                 rndstat_est_name_t *enstat = data;
 2340                 const size_t n = sizeof(rs->name);
 2341 
 2342                 CTASSERT(sizeof(rs->name) == sizeof(enstat->name));
 2343 
 2344                 /*
 2345                  * Under the lock, search by name.  If found, copy it
 2346                  * out; if not found, fail with ENOENT.
 2347                  */
 2348                 mutex_enter(&E->lock);
 2349                 error = rnd_lock_sources(ENTROPY_WAIT|ENTROPY_SIG);
 2350                 if (error) {
 2351                         mutex_exit(&E->lock);
 2352                         return error;
 2353                 }
 2354                 LIST_FOREACH(rs, &E->sources, list) {
 2355                         if (strncmp(rs->name, enstat->name, n) == 0)
 2356                                 break;
 2357                 }
 2358                 if (rs != NULL) {
 2359                         mutex_exit(&E->lock);
 2360                         rndsource_to_user_est(rs, &enstat->source);
 2361                         mutex_enter(&E->lock);
 2362                 } else {
 2363                         error = ENOENT;
 2364                 }
 2365                 rnd_unlock_sources();
 2366                 mutex_exit(&E->lock);
 2367                 break;
 2368         }
 2369         case RNDCTL: {          /* Modify entropy source flags.  */
 2370                 rndctl_t *rndctl = data;
 2371                 const size_t n = sizeof(rs->name);
 2372                 uint32_t resetflags = RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
 2373                 uint32_t flags;
 2374                 bool reset = false, request = false;
 2375 
 2376                 CTASSERT(sizeof(rs->name) == sizeof(rndctl->name));
 2377 
 2378                 /* Whitelist the flags that user can change.  */
 2379                 rndctl->mask &= RND_FLAG_NO_ESTIMATE|RND_FLAG_NO_COLLECT;
 2380 
 2381                 /*
 2382                  * For each matching rndsource, either by type if
 2383                  * specified or by name if not, set the masked flags.
 2384                  */
 2385                 mutex_enter(&E->lock);
 2386                 LIST_FOREACH(rs, &E->sources, list) {
 2387                         if (rndctl->type != 0xff) {
 2388                                 if (rs->type != rndctl->type)
 2389                                         continue;
 2390                         } else {
 2391                                 if (strncmp(rs->name, rndctl->name, n) != 0)
 2392                                         continue;
 2393                         }
 2394                         flags = rs->flags & ~rndctl->mask;
 2395                         flags |= rndctl->flags & rndctl->mask;
 2396                         if ((rs->flags & resetflags) == 0 &&
 2397                             (flags & resetflags) != 0)
 2398                                 reset = true;
 2399                         if ((rs->flags ^ flags) & resetflags)
 2400                                 request = true;
 2401                         atomic_store_relaxed(&rs->flags, flags);
 2402                 }
 2403                 mutex_exit(&E->lock);
 2404 
 2405                 /*
 2406                  * If we disabled estimation or collection, nix all the
 2407                  * pending entropy and set needed to the maximum.
 2408                  */
 2409                 if (reset) {
 2410                         xc_broadcast(0, &entropy_reset_xc, NULL, NULL);
 2411                         mutex_enter(&E->lock);
 2412                         E->pending = 0;
 2413                         atomic_store_relaxed(&E->needed,
 2414                             ENTROPY_CAPACITY*NBBY);
 2415                         mutex_exit(&E->lock);
 2416                 }
 2417 
 2418                 /*
 2419                  * If we changed any of the estimation or collection
 2420                  * flags, request new samples from everyone -- either
 2421                  * to make up for what we just lost, or to get new
 2422                  * samples from what we just added.
 2423                  *
 2424                  * Failing on signal, while waiting for another process
 2425                  * to finish requesting entropy, is OK here even though
 2426                  * we have committed side effects, because this ioctl
 2427                  * command is idempotent, so repeating it is safe.
 2428                  */
 2429                 if (request) {
 2430                         mutex_enter(&E->lock);
 2431                         error = entropy_request(ENTROPY_CAPACITY,
 2432                             ENTROPY_WAIT|ENTROPY_SIG);
 2433                         mutex_exit(&E->lock);
 2434                 }
 2435                 break;
 2436         }
 2437         case RNDADDDATA: {      /* Enter seed into entropy pool.  */
 2438                 rnddata_t *rdata = data;
 2439                 unsigned entropybits = 0;
 2440 
 2441                 if (!atomic_load_relaxed(&entropy_collection))
 2442                         break;  /* thanks but no thanks */
 2443                 if (rdata->len > MIN(sizeof(rdata->data), UINT32_MAX/NBBY))
 2444                         return EINVAL;
 2445 
 2446                 /*
 2447                  * This ioctl serves as the userland alternative a
 2448                  * bootloader-provided seed -- typically furnished by
 2449                  * /etc/rc.d/random_seed.  We accept the user's entropy
 2450                  * claim only if
 2451                  *
 2452                  * (a) the user is privileged, and
 2453                  * (b) we have not entered a bootloader seed.
 2454                  *
 2455                  * under the assumption that the user may use this to
 2456                  * load a seed from disk that we have already loaded
 2457                  * from the bootloader, so we don't double-count it.
 2458                  */
 2459                 if (privileged && rdata->entropy && rdata->len) {
 2460                         mutex_enter(&E->lock);
 2461                         if (!E->seeded) {
 2462                                 entropybits = MIN(rdata->entropy,
 2463                                     MIN(rdata->len, ENTROPY_CAPACITY)*NBBY);
 2464                                 E->seeded = true;
 2465                         }
 2466                         mutex_exit(&E->lock);
 2467                 }
 2468 
 2469                 /* Enter the data and consolidate entropy.  */
 2470                 rnd_add_data(&seed_rndsource, rdata->data, rdata->len,
 2471                     entropybits);
 2472                 entropy_consolidate();
 2473                 break;
 2474         }
 2475         default:
 2476                 error = ENOTTY;
 2477         }
 2478 
 2479         /* Return any error that may have come up.  */
 2480         return error;
 2481 }
 2482 
 2483 /* Legacy entry points */
 2484 
 2485 void
 2486 rnd_seed(void *seed, size_t len)
 2487 {
 2488 
 2489         if (len != sizeof(rndsave_t)) {
 2490                 printf("entropy: invalid seed length: %zu,"
 2491                     " expected sizeof(rndsave_t) = %zu\n",
 2492                     len, sizeof(rndsave_t));
 2493                 return;
 2494         }
 2495         entropy_seed(seed);
 2496 }
 2497 
 2498 void
 2499 rnd_init(void)
 2500 {
 2501 
 2502         entropy_init();
 2503 }
 2504 
 2505 void
 2506 rnd_init_softint(void)
 2507 {
 2508 
 2509         entropy_init_late();
 2510         entropy_bootrequest();
 2511 }
 2512 
 2513 int
 2514 rnd_system_ioctl(struct file *fp, unsigned long cmd, void *data)
 2515 {
 2516 
 2517         return entropy_ioctl(cmd, data);
 2518 }

Cache object: 022409fc4bcdf917d34356d32ba3f7a0


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.