The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/rnd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: rnd.c,v 1.71.4.3 2011/01/07 01:35:05 riz Exp $ */
    2 
    3 /*-
    4  * Copyright (c) 1997 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Michael Graff <explorer@flame.org>.  This code uses ideas and
    9  * algorithms from the Linux driver written by Ted Ts'o.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   30  * POSSIBILITY OF SUCH DAMAGE.
   31  */
   32 
   33 #include <sys/cdefs.h>
   34 __KERNEL_RCSID(0, "$NetBSD: rnd.c,v 1.71.4.3 2011/01/07 01:35:05 riz Exp $");
   35 
   36 #include <sys/param.h>
   37 #include <sys/ioctl.h>
   38 #include <sys/fcntl.h>
   39 #include <sys/select.h>
   40 #include <sys/poll.h>
   41 #include <sys/malloc.h>
   42 #include <sys/mutex.h>
   43 #include <sys/proc.h>
   44 #include <sys/kernel.h>
   45 #include <sys/conf.h>
   46 #include <sys/systm.h>
   47 #include <sys/callout.h>
   48 #include <sys/rnd.h>
   49 #include <sys/vnode.h>
   50 #include <sys/pool.h>
   51 #include <sys/kauth.h>
   52 
   53 #ifdef __HAVE_CPU_COUNTER
   54 #include <machine/cpu_counter.h>
   55 #endif
   56 
   57 #ifdef RND_DEBUG
   58 #define DPRINTF(l,x)      if (rnd_debug & (l)) printf x
   59 int     rnd_debug = 0;
   60 #else
   61 #define DPRINTF(l,x)
   62 #endif
   63 
   64 #define RND_DEBUG_WRITE         0x0001
   65 #define RND_DEBUG_READ          0x0002
   66 #define RND_DEBUG_IOCTL         0x0004
   67 #define RND_DEBUG_SNOOZE        0x0008
   68 
   69 /*
   70  * list devices attached
   71  */
   72 #if 0
   73 #define RND_VERBOSE
   74 #endif
   75 
   76 /*
   77  * The size of a temporary buffer, malloc()ed when needed, and used for
   78  * reading and writing data.
   79  */
   80 #define RND_TEMP_BUFFER_SIZE    128
   81 
   82 /*
   83  * This is a little bit of state information attached to each device that we
   84  * collect entropy from.  This is simply a collection buffer, and when it
   85  * is full it will be "detached" from the source and added to the entropy
   86  * pool after entropy is distilled as much as possible.
   87  */
   88 #define RND_SAMPLE_COUNT        64      /* collect N samples, then compress */
   89 typedef struct _rnd_sample_t {
   90         SIMPLEQ_ENTRY(_rnd_sample_t) next;
   91         rndsource_t     *source;
   92         int             cursor;
   93         int             entropy;
   94         u_int32_t       ts[RND_SAMPLE_COUNT];
   95         u_int32_t       values[RND_SAMPLE_COUNT];
   96 } rnd_sample_t;
   97 
   98 /*
   99  * The event queue.  Fields are altered at an interrupt level.
  100  * All accesses must be protected with the mutex.
  101  */
  102 volatile int                    rnd_timeout_pending;
  103 SIMPLEQ_HEAD(, _rnd_sample_t)   rnd_samples;
  104 kmutex_t                        rnd_mtx;
  105 
  106 /*
  107  * our select/poll queue
  108  */
  109 struct selinfo rnd_selq;
  110 
  111 /*
  112  * Set when there are readers blocking on data from us
  113  */
  114 #define RND_READWAITING 0x00000001
  115 volatile u_int32_t rnd_status;
  116 
  117 /*
  118  * Memory pool for sample buffers
  119  */
  120 POOL_INIT(rnd_mempool, sizeof(rnd_sample_t), 0, 0, 0, "rndsample", NULL,
  121     IPL_VM);
  122 
  123 /*
  124  * Our random pool.  This is defined here rather than using the general
  125  * purpose one defined in rndpool.c.
  126  *
  127  * Samples are collected and queued into a separate mutex-protected queue
  128  * (rnd_samples, see above), and processed in a timeout routine; therefore,
  129  * the mutex protecting the random pool is at IPL_SOFTCLOCK() as well.
  130  */
  131 rndpool_t rnd_pool;
  132 kmutex_t  rndpool_mtx;
  133 
  134 /*
  135  * This source is used to easily "remove" queue entries when the source
  136  * which actually generated the events is going away.
  137  */
  138 static rndsource_t rnd_source_no_collect = {
  139         { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', 0, 0, 0, 0, 0, 0, 0 },
  140         0, 0, 0, 0,
  141         RND_TYPE_UNKNOWN,
  142         (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE | RND_TYPE_UNKNOWN),
  143         NULL
  144 };
  145 
  146 struct callout rnd_callout;
  147 
  148 void    rndattach(int);
  149 
  150 dev_type_open(rndopen);
  151 dev_type_read(rndread);
  152 dev_type_write(rndwrite);
  153 dev_type_ioctl(rndioctl);
  154 dev_type_poll(rndpoll);
  155 dev_type_kqfilter(rndkqfilter);
  156 
  157 const struct cdevsw rnd_cdevsw = {
  158         rndopen, nullclose, rndread, rndwrite, rndioctl,
  159         nostop, notty, rndpoll, nommap, rndkqfilter, D_OTHER,
  160 };
  161 
  162 static inline void      rnd_wakeup_readers(void);
  163 static inline u_int32_t rnd_estimate_entropy(rndsource_t *, u_int32_t);
  164 static inline u_int32_t rnd_counter(void);
  165 static        void      rnd_timeout(void *);
  166 
  167 static int              rnd_ready = 0;
  168 static int              rnd_have_entropy = 0;
  169 
  170 LIST_HEAD(, __rndsource_element)        rnd_sources;
  171 
  172 /*
  173  * Generate a 32-bit counter.  This should be more machine dependant,
  174  * using cycle counters and the like when possible.
  175  */
  176 static inline u_int32_t
  177 rnd_counter(void)
  178 {
  179         struct timeval tv;
  180 
  181 #ifdef __HAVE_CPU_COUNTER
  182         if (cpu_hascounter())
  183                 return (cpu_counter32());
  184 #endif
  185         if (rnd_ready) {
  186                 microtime(&tv);
  187                 return (tv.tv_sec * 1000000 + tv.tv_usec);
  188         }
  189         /* when called from rnd_init, its too early to call microtime safely */
  190         return (0);
  191 }
  192 
  193 /*
  194  * Check to see if there are readers waiting on us.  If so, kick them.
  195  */
  196 static inline void
  197 rnd_wakeup_readers(void)
  198 {
  199 
  200         /*
  201          * If we have added new bits, and now have enough to do something,
  202          * wake up sleeping readers.
  203          */
  204         mutex_enter(&rndpool_mtx);
  205         if (rndpool_get_entropy_count(&rnd_pool) > RND_ENTROPY_THRESHOLD * 8) {
  206                 if (rnd_status & RND_READWAITING) {
  207                         DPRINTF(RND_DEBUG_SNOOZE,
  208                             ("waking up pending readers.\n"));
  209                         rnd_status &= ~RND_READWAITING;
  210                         wakeup(&rnd_selq);
  211                 }
  212                 selnotify(&rnd_selq, 0, 0);
  213 
  214 #ifdef RND_VERBOSE
  215                 if (!rnd_have_entropy)
  216                         printf("rnd: have initial entropy (%u)\n",
  217                                rndpool_get_entropy_count(&rnd_pool));
  218 #endif
  219                 rnd_have_entropy = 1;
  220         }
  221         mutex_exit(&rndpool_mtx);
  222 }
  223 
  224 /*
  225  * Use the timing of the event to estimate the entropy gathered.
  226  * If all the differentials (first, second, and third) are non-zero, return
  227  * non-zero.  If any of these are zero, return zero.
  228  */
  229 static inline u_int32_t
  230 rnd_estimate_entropy(rndsource_t *rs, u_int32_t t)
  231 {
  232         int32_t delta, delta2, delta3;
  233 
  234         /*
  235          * If the time counter has overflowed, calculate the real difference.
  236          * If it has not, it is simplier.
  237          */
  238         if (t < rs->last_time)
  239                 delta = UINT_MAX - rs->last_time + t;
  240         else
  241                 delta = rs->last_time - t;
  242 
  243         if (delta < 0)
  244                 delta = -delta;
  245 
  246         /*
  247          * Calculate the second and third order differentials
  248          */
  249         delta2 = rs->last_delta - delta;
  250         if (delta2 < 0)
  251                 delta2 = -delta2;
  252 
  253         delta3 = rs->last_delta2 - delta2;
  254         if (delta3 < 0)
  255                 delta3 = -delta3;
  256 
  257         rs->last_time = t;
  258         rs->last_delta = delta;
  259         rs->last_delta2 = delta2;
  260 
  261         /*
  262          * If any delta is 0, we got no entropy.  If all are non-zero, we
  263          * might have something.
  264          */
  265         if (delta == 0 || delta2 == 0 || delta3 == 0)
  266                 return (0);
  267 
  268         return (1);
  269 }
  270 
  271 /*
  272  * "Attach" the random device. This is an (almost) empty stub, since
  273  * pseudo-devices don't get attached until after config, after the
  274  * entropy sources will attach. We just use the timing of this event
  275  * as another potential source of initial entropy.
  276  */
  277 void
  278 rndattach(int num)
  279 {
  280         u_int32_t c;
  281 
  282         /* Trap unwary players who don't call rnd_init() early */
  283         KASSERT(rnd_ready);
  284 
  285         /* mix in another counter */
  286         c = rnd_counter();
  287         rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
  288 }
  289 
  290 /*
  291  * initialize the global random pool for our use.
  292  * rnd_init() must be called very early on in the boot process, so
  293  * the pool is ready for other devices to attach as sources.
  294  */
  295 void
  296 rnd_init(void)
  297 {
  298         u_int32_t c;
  299 
  300         if (rnd_ready)
  301                 return;
  302 
  303         mutex_init(&rnd_mtx, MUTEX_DEFAULT, IPL_VM);
  304 
  305         callout_init(&rnd_callout, CALLOUT_MPSAFE);
  306 
  307         /*
  308          * take a counter early, hoping that there's some variance in
  309          * the following operations
  310          */
  311         c = rnd_counter();
  312 
  313         LIST_INIT(&rnd_sources);
  314         SIMPLEQ_INIT(&rnd_samples);
  315         selinit(&rnd_selq);
  316 
  317         rndpool_init(&rnd_pool);
  318         mutex_init(&rndpool_mtx, MUTEX_DEFAULT, IPL_SOFTCLOCK);
  319 
  320         /* Mix *something*, *anything* into the pool to help it get started.
  321          * However, it's not safe for rnd_counter() to call microtime() yet,
  322          * so on some platforms we might just end up with zeros anyway.
  323          * XXX more things to add would be nice.
  324          */
  325         if (c) {
  326                 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
  327                 c = rnd_counter();
  328                 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
  329         }
  330 
  331         rnd_ready = 1;
  332 
  333 #ifdef RND_VERBOSE
  334         printf("rnd: initialised (%u)%s", RND_POOLBITS,
  335                c ? " with counter\n" : "\n");
  336 #endif
  337 }
  338 
  339 int
  340 rndopen(dev_t dev, int flags, int ifmt,
  341     struct lwp *l)
  342 {
  343 
  344         if (rnd_ready == 0)
  345                 return (ENXIO);
  346 
  347         if (minor(dev) == RND_DEV_URANDOM || minor(dev) == RND_DEV_RANDOM)
  348                 return (0);
  349 
  350         return (ENXIO);
  351 }
  352 
  353 int
  354 rndread(dev_t dev, struct uio *uio, int ioflag)
  355 {
  356         u_int8_t *bf;
  357         u_int32_t entcnt, mode, n, nread;
  358         int ret;
  359 
  360         DPRINTF(RND_DEBUG_READ,
  361             ("Random:  Read of %d requested, flags 0x%08x\n",
  362             uio->uio_resid, ioflag));
  363 
  364         if (uio->uio_resid == 0)
  365                 return (0);
  366 
  367         switch (minor(dev)) {
  368         case RND_DEV_RANDOM:
  369                 mode = RND_EXTRACT_GOOD;
  370                 break;
  371         case RND_DEV_URANDOM:
  372                 mode = RND_EXTRACT_ANY;
  373                 break;
  374         default:
  375                 /* Can't happen, but this is cheap */
  376                 return (ENXIO);
  377         }
  378 
  379         ret = 0;
  380 
  381         bf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
  382 
  383         while (uio->uio_resid > 0) {
  384                 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
  385 
  386                 /*
  387                  * Make certain there is data available.  If there
  388                  * is, do the I/O even if it is partial.  If not,
  389                  * sleep unless the user has requested non-blocking
  390                  * I/O.
  391                  */
  392                 for (;;) {
  393                         /*
  394                          * If not requesting strong randomness, we
  395                          * can always read.
  396                          */
  397                         if (mode == RND_EXTRACT_ANY)
  398                                 break;
  399 
  400                         /*
  401                          * How much entropy do we have?  If it is enough for
  402                          * one hash, we can read.
  403                          */
  404                         mutex_enter(&rndpool_mtx);
  405                         entcnt = rndpool_get_entropy_count(&rnd_pool);
  406                         mutex_exit(&rndpool_mtx);
  407                         if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
  408                                 break;
  409 
  410                         /*
  411                          * Data is not available.
  412                          */
  413                         if (ioflag & IO_NDELAY) {
  414                                 ret = EWOULDBLOCK;
  415                                 goto out;
  416                         }
  417 
  418                         rnd_status |= RND_READWAITING;
  419                         ret = tsleep(&rnd_selq, PRIBIO|PCATCH,
  420                             "rndread", 0);
  421 
  422                         if (ret)
  423                                 goto out;
  424                 }
  425 
  426                 nread = rnd_extract_data(bf, n, mode);
  427 
  428                 /*
  429                  * Copy (possibly partial) data to the user.
  430                  * If an error occurs, or this is a partial
  431                  * read, bail out.
  432                  */
  433                 ret = uiomove((void *)bf, nread, uio);
  434                 if (ret != 0 || nread != n)
  435                         goto out;
  436         }
  437 
  438 out:
  439         free(bf, M_TEMP);
  440         return (ret);
  441 }
  442 
  443 int
  444 rndwrite(dev_t dev, struct uio *uio, int ioflag)
  445 {
  446         u_int8_t *bf;
  447         int n, ret;
  448 
  449         DPRINTF(RND_DEBUG_WRITE,
  450             ("Random: Write of %d requested\n", uio->uio_resid));
  451 
  452         if (uio->uio_resid == 0)
  453                 return (0);
  454 
  455         ret = 0;
  456 
  457         bf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
  458 
  459         while (uio->uio_resid > 0) {
  460                 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
  461 
  462                 ret = uiomove((void *)bf, n, uio);
  463                 if (ret != 0)
  464                         break;
  465 
  466                 /*
  467                  * Mix in the bytes.
  468                  */
  469                 mutex_enter(&rndpool_mtx);
  470                 rndpool_add_data(&rnd_pool, bf, n, 0);
  471                 mutex_exit(&rndpool_mtx);
  472 
  473                 DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n));
  474         }
  475 
  476         free(bf, M_TEMP);
  477         return (ret);
  478 }
  479 
  480 int
  481 rndioctl(dev_t dev, u_long cmd, void *addr, int flag,
  482     struct lwp *l)
  483 {
  484         rndsource_element_t *rse;
  485         rndstat_t *rst;
  486         rndstat_name_t *rstnm;
  487         rndctl_t *rctl;
  488         rnddata_t *rnddata;
  489         u_int32_t count, start;
  490         int ret;
  491 
  492         ret = 0;
  493 
  494         switch (cmd) {
  495         case FIONBIO:
  496         case FIOASYNC:
  497         case RNDGETENTCNT:
  498                 break;
  499         case RNDGETPOOLSTAT:
  500         case RNDGETSRCNUM:
  501         case RNDGETSRCNAME:
  502         case RNDCTL:
  503         case RNDADDDATA:
  504                 ret = kauth_authorize_generic(l->l_cred, KAUTH_GENERIC_ISSUSER,
  505                     NULL);
  506                 if (ret)
  507                         return (ret);
  508                 break;
  509         default:
  510                 return (EINVAL);
  511         }
  512 
  513         switch (cmd) {
  514 
  515         /*
  516          * Handled in upper layer really, but we have to return zero
  517          * for it to be accepted by the upper layer.
  518          */
  519         case FIONBIO:
  520         case FIOASYNC:
  521                 break;
  522 
  523         case RNDGETENTCNT:
  524                 mutex_enter(&rndpool_mtx);
  525                 *(u_int32_t *)addr = rndpool_get_entropy_count(&rnd_pool);
  526                 mutex_exit(&rndpool_mtx);
  527                 break;
  528 
  529         case RNDGETPOOLSTAT:
  530                 mutex_enter(&rndpool_mtx);
  531                 rndpool_get_stats(&rnd_pool, addr, sizeof(rndpoolstat_t));
  532                 mutex_exit(&rndpool_mtx);
  533                 break;
  534 
  535         case RNDGETSRCNUM:
  536                 rst = (rndstat_t *)addr;
  537 
  538                 if (rst->count == 0)
  539                         break;
  540 
  541                 if (rst->count > RND_MAXSTATCOUNT)
  542                         return (EINVAL);
  543 
  544                 /*
  545                  * Find the starting source by running through the
  546                  * list of sources.
  547                  */
  548                 rse = rnd_sources.lh_first;
  549                 start = rst->start;
  550                 while (rse != NULL && start >= 1) {
  551                         rse = rse->list.le_next;
  552                         start--;
  553                 }
  554 
  555                 /*
  556                  * Return up to as many structures as the user asked
  557                  * for.  If we run out of sources, a count of zero
  558                  * will be returned, without an error.
  559                  */
  560                 for (count = 0; count < rst->count && rse != NULL; count++) {
  561                         memcpy(&rst->source[count], &rse->data,
  562                             sizeof(rndsource_t));
  563                         /* Zero out information which may leak */
  564                         rst->source[count].last_time = 0;
  565                         rst->source[count].last_delta = 0;
  566                         rst->source[count].last_delta2 = 0;
  567                         rst->source[count].state = 0;
  568                         rse = rse->list.le_next;
  569                 }
  570 
  571                 rst->count = count;
  572 
  573                 break;
  574 
  575         case RNDGETSRCNAME:
  576                 /*
  577                  * Scan through the list, trying to find the name.
  578                  */
  579                 rstnm = (rndstat_name_t *)addr;
  580                 rse = rnd_sources.lh_first;
  581                 while (rse != NULL) {
  582                         if (strncmp(rse->data.name, rstnm->name, 16) == 0) {
  583                                 memcpy(&rstnm->source, &rse->data,
  584                                     sizeof(rndsource_t));
  585 
  586                                 return (0);
  587                         }
  588                         rse = rse->list.le_next;
  589                 }
  590 
  591                 ret = ENOENT;           /* name not found */
  592 
  593                 break;
  594 
  595         case RNDCTL:
  596                 /*
  597                  * Set flags to enable/disable entropy counting and/or
  598                  * collection.
  599                  */
  600                 rctl = (rndctl_t *)addr;
  601                 rse = rnd_sources.lh_first;
  602 
  603                 /*
  604                  * Flags set apply to all sources of this type.
  605                  */
  606                 if (rctl->type != 0xff) {
  607                         while (rse != NULL) {
  608                                 if (rse->data.type == rctl->type) {
  609                                         rse->data.flags &= ~rctl->mask;
  610                                         rse->data.flags |=
  611                                             (rctl->flags & rctl->mask);
  612                                 }
  613                                 rse = rse->list.le_next;
  614                         }
  615 
  616                         return (0);
  617                 }
  618 
  619                 /*
  620                  * scan through the list, trying to find the name
  621                  */
  622                 while (rse != NULL) {
  623                         if (strncmp(rse->data.name, rctl->name, 16) == 0) {
  624                                 rse->data.flags &= ~rctl->mask;
  625                                 rse->data.flags |= (rctl->flags & rctl->mask);
  626 
  627                                 return (0);
  628                         }
  629                         rse = rse->list.le_next;
  630                 }
  631 
  632                 ret = ENOENT;           /* name not found */
  633 
  634                 break;
  635 
  636         case RNDADDDATA:
  637                 rnddata = (rnddata_t *)addr;
  638 
  639                 if (rnddata->len > sizeof(rnddata->data))
  640                         return E2BIG;
  641 
  642                 mutex_enter(&rndpool_mtx);
  643                 rndpool_add_data(&rnd_pool, rnddata->data, rnddata->len,
  644                     rnddata->entropy);
  645                 mutex_exit(&rndpool_mtx);
  646 
  647                 rnd_wakeup_readers();
  648 
  649                 break;
  650 
  651         default:
  652                 return (EINVAL);
  653         }
  654 
  655         return (ret);
  656 }
  657 
  658 int
  659 rndpoll(dev_t dev, int events, struct lwp *l)
  660 {
  661         u_int32_t entcnt;
  662         int revents;
  663 
  664         /*
  665          * We are always writable.
  666          */
  667         revents = events & (POLLOUT | POLLWRNORM);
  668 
  669         /*
  670          * Save some work if not checking for reads.
  671          */
  672         if ((events & (POLLIN | POLLRDNORM)) == 0)
  673                 return (revents);
  674 
  675         /*
  676          * If the minor device is not /dev/random, we are always readable.
  677          */
  678         if (minor(dev) != RND_DEV_RANDOM) {
  679                 revents |= events & (POLLIN | POLLRDNORM);
  680                 return (revents);
  681         }
  682 
  683         /*
  684          * Make certain we have enough entropy to be readable.
  685          */
  686         mutex_enter(&rndpool_mtx);
  687         entcnt = rndpool_get_entropy_count(&rnd_pool);
  688         mutex_exit(&rndpool_mtx);
  689 
  690         if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
  691                 revents |= events & (POLLIN | POLLRDNORM);
  692         else
  693                 selrecord(l, &rnd_selq);
  694 
  695         return (revents);
  696 }
  697 
  698 static void
  699 filt_rnddetach(struct knote *kn)
  700 {
  701         mutex_enter(&rndpool_mtx);
  702         SLIST_REMOVE(&rnd_selq.sel_klist, kn, knote, kn_selnext);
  703         mutex_exit(&rndpool_mtx);
  704 }
  705 
  706 static int
  707 filt_rndread(struct knote *kn, long hint)
  708 {
  709         uint32_t entcnt;
  710 
  711         entcnt = rndpool_get_entropy_count(&rnd_pool);
  712         if (entcnt >= RND_ENTROPY_THRESHOLD * 8) {
  713                 kn->kn_data = RND_TEMP_BUFFER_SIZE;
  714                 return (1);
  715         }
  716         return (0);
  717 }
  718 
  719 static const struct filterops rnd_seltrue_filtops =
  720         { 1, NULL, filt_rnddetach, filt_seltrue };
  721 
  722 static const struct filterops rndread_filtops =
  723         { 1, NULL, filt_rnddetach, filt_rndread };
  724 
  725 int
  726 rndkqfilter(dev_t dev, struct knote *kn)
  727 {
  728         struct klist *klist;
  729 
  730         switch (kn->kn_filter) {
  731         case EVFILT_READ:
  732                 klist = &rnd_selq.sel_klist;
  733                 if (minor(dev) == RND_DEV_URANDOM)
  734                         kn->kn_fop = &rnd_seltrue_filtops;
  735                 else
  736                         kn->kn_fop = &rndread_filtops;
  737                 break;
  738 
  739         case EVFILT_WRITE:
  740                 klist = &rnd_selq.sel_klist;
  741                 kn->kn_fop = &rnd_seltrue_filtops;
  742                 break;
  743 
  744         default:
  745                 return (EINVAL);
  746         }
  747 
  748         kn->kn_hook = NULL;
  749 
  750         mutex_enter(&rndpool_mtx);
  751         SLIST_INSERT_HEAD(klist, kn, kn_selnext);
  752         mutex_exit(&rndpool_mtx);
  753 
  754         return (0);
  755 }
  756 
  757 static rnd_sample_t *
  758 rnd_sample_allocate(rndsource_t *source)
  759 {
  760         rnd_sample_t *c;
  761 
  762         c = pool_get(&rnd_mempool, PR_WAITOK);
  763         if (c == NULL)
  764                 return (NULL);
  765 
  766         c->source = source;
  767         c->cursor = 0;
  768         c->entropy = 0;
  769 
  770         return (c);
  771 }
  772 
  773 /*
  774  * Don't wait on allocation.  To be used in an interrupt context.
  775  */
  776 static rnd_sample_t *
  777 rnd_sample_allocate_isr(rndsource_t *source)
  778 {
  779         rnd_sample_t *c;
  780 
  781         c = pool_get(&rnd_mempool, PR_NOWAIT);
  782         if (c == NULL)
  783                 return (NULL);
  784 
  785         c->source = source;
  786         c->cursor = 0;
  787         c->entropy = 0;
  788 
  789         return (c);
  790 }
  791 
  792 static void
  793 rnd_sample_free(rnd_sample_t *c)
  794 {
  795         memset(c, 0, sizeof(rnd_sample_t));
  796         pool_put(&rnd_mempool, c);
  797 }
  798 
  799 /*
  800  * Add a source to our list of sources.
  801  */
  802 void
  803 rnd_attach_source(rndsource_element_t *rs, const char *name, u_int32_t type,
  804     u_int32_t flags)
  805 {
  806         u_int32_t ts;
  807 
  808         ts = rnd_counter();
  809 
  810         strlcpy(rs->data.name, name, sizeof(rs->data.name));
  811         rs->data.last_time = ts;
  812         rs->data.last_delta = 0;
  813         rs->data.last_delta2 = 0;
  814         rs->data.total = 0;
  815 
  816         /*
  817          * Force network devices to not collect any entropy by
  818          * default.
  819          */
  820         if (type == RND_TYPE_NET)
  821                 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
  822 
  823         rs->data.type = type;
  824         rs->data.flags = flags;
  825 
  826         rs->data.state = rnd_sample_allocate(&rs->data);
  827 
  828         LIST_INSERT_HEAD(&rnd_sources, rs, list);
  829 
  830 #ifdef RND_VERBOSE
  831         printf("rnd: %s attached as an entropy source (", rs->data.name);
  832         if (!(flags & RND_FLAG_NO_COLLECT)) {
  833                 printf("collecting");
  834                 if (flags & RND_FLAG_NO_ESTIMATE)
  835                         printf(" without estimation");
  836         }
  837         else
  838                 printf("off");
  839         printf(")\n");
  840 #endif
  841 
  842         /*
  843          * Again, put some more initial junk in the pool.
  844          * XXX Bogus, but harder to guess than zeros.
  845          */
  846         rndpool_add_data(&rnd_pool, &ts, sizeof(u_int32_t), 1);
  847 }
  848 
  849 /*
  850  * Remove a source from our list of sources.
  851  */
  852 void
  853 rnd_detach_source(rndsource_element_t *rs)
  854 {
  855         rnd_sample_t *sample;
  856         rndsource_t *source;
  857 
  858         mutex_enter(&rnd_mtx);
  859 
  860         LIST_REMOVE(rs, list);
  861 
  862         source = &rs->data;
  863 
  864         if (source->state) {
  865                 rnd_sample_free(source->state);
  866                 source->state = NULL;
  867         }
  868 
  869         /*
  870          * If there are samples queued up "remove" them from the sample queue
  871          * by setting the source to the no-collect pseudosource.
  872          */
  873         sample = SIMPLEQ_FIRST(&rnd_samples);
  874         while (sample != NULL) {
  875                 if (sample->source == source)
  876                         sample->source = &rnd_source_no_collect;
  877 
  878                 sample = SIMPLEQ_NEXT(sample, next);
  879         }
  880 
  881         mutex_exit(&rnd_mtx);
  882 #ifdef RND_VERBOSE
  883         printf("rnd: %s detached as an entropy source\n", rs->data.name);
  884 #endif
  885 }
  886 
  887 /*
  888  * Add a value to the entropy pool. The rs parameter should point to the
  889  * source-specific source structure.
  890  */
  891 void
  892 rnd_add_uint32(rndsource_element_t *rs, u_int32_t val)
  893 {
  894         rndsource_t *rst;
  895         rnd_sample_t *state;
  896         u_int32_t ts;
  897 
  898 
  899         rst = &rs->data;
  900 
  901         if (rst->flags & RND_FLAG_NO_COLLECT)
  902                 return;
  903 
  904         /*
  905          * Sample the counter as soon as possible to avoid
  906          * entropy overestimation.
  907          */
  908         ts = rnd_counter();
  909 
  910         /*
  911          * If the sample buffer is NULL, try to allocate one here.  If this
  912          * fails, drop this sample.
  913          */
  914         state = rst->state;
  915         if (state == NULL) {
  916                 state = rnd_sample_allocate_isr(rst);
  917                 if (state == NULL)
  918                         return;
  919                 rst->state = state;
  920         }
  921 
  922         /*
  923          * If we are estimating entropy on this source,
  924          * calculate differentials.
  925          */
  926 
  927         if ((rst->flags & RND_FLAG_NO_ESTIMATE) == 0)
  928                 state->entropy += rnd_estimate_entropy(rst, ts);
  929 
  930         state->ts[state->cursor] = ts;
  931         state->values[state->cursor] = val;
  932         state->cursor++;
  933 
  934         /*
  935          * If the state arrays are not full, we're done.
  936          */
  937         if (state->cursor < RND_SAMPLE_COUNT)
  938                 return;
  939 
  940         /*
  941          * State arrays are full.  Queue this chunk on the processing queue.
  942          */
  943         mutex_enter(&rnd_mtx);
  944         SIMPLEQ_INSERT_HEAD(&rnd_samples, state, next);
  945         rst->state = NULL;
  946 
  947         /*
  948          * If the timeout isn't pending, have it run in the near future.
  949          */
  950         if (rnd_timeout_pending == 0) {
  951                 rnd_timeout_pending = 1;
  952                 callout_reset(&rnd_callout, 1, rnd_timeout, NULL);
  953         }
  954         mutex_exit(&rnd_mtx);
  955 
  956         /*
  957          * To get here we have to have queued the state up, and therefore
  958          * we need a new state buffer.  If we can, allocate one now;
  959          * if we don't get it, it doesn't matter; we'll try again on
  960          * the next random event.
  961          */
  962         rst->state = rnd_sample_allocate_isr(rst);
  963 }
  964 
  965 void
  966 rnd_add_data(rndsource_element_t *rs, void *data, u_int32_t len,
  967     u_int32_t entropy)
  968 {
  969         rndsource_t *rst;
  970 
  971         /* Mix in the random data directly into the pool. */
  972         rndpool_add_data(&rnd_pool, data, len, entropy);
  973 
  974         if (rs != NULL) {
  975                 rst = &rs->data;
  976                 rst->total += entropy;
  977 
  978                 if ((rst->flags & RND_FLAG_NO_ESTIMATE) == 0)
  979                         /* Estimate entropy using timing information */
  980                         rnd_add_uint32(rs, *(u_int8_t *)data);
  981         }
  982 
  983         /* Wake up any potential readers since we've just added some data. */
  984         rnd_wakeup_readers();
  985 }
  986 
  987 /*
  988  * Timeout, run to process the events in the ring buffer. 
  989  */
  990 static void
  991 rnd_timeout(void *arg)
  992 {
  993         rnd_sample_t *sample;
  994         rndsource_t *source;
  995         u_int32_t entropy;
  996 
  997         /*
  998          * Sample queue is protected by rnd_mtx, take it briefly to dequeue.
  999          */
 1000         mutex_enter(&rnd_mtx);
 1001         rnd_timeout_pending = 0;
 1002 
 1003         sample = SIMPLEQ_FIRST(&rnd_samples);
 1004         while (sample != NULL) {
 1005                 SIMPLEQ_REMOVE_HEAD(&rnd_samples, next);
 1006                 mutex_exit(&rnd_mtx);
 1007 
 1008                 source = sample->source;
 1009 
 1010                 /*
 1011                  * We repeat this check here, since it is possible the source
 1012                  * was disabled before we were called, but after the entry
 1013                  * was queued.
 1014                  */
 1015                 if ((source->flags & RND_FLAG_NO_COLLECT) == 0) {
 1016                         entropy = sample->entropy;
 1017                         if (source->flags & RND_FLAG_NO_ESTIMATE)
 1018                                 entropy = 0;
 1019 
 1020                         mutex_enter(&rndpool_mtx);
 1021                         rndpool_add_data(&rnd_pool, sample->values,
 1022                             RND_SAMPLE_COUNT * 4, 0);
 1023 
 1024                         rndpool_add_data(&rnd_pool, sample->ts,
 1025                             RND_SAMPLE_COUNT * 4,
 1026                             entropy);
 1027                         mutex_exit(&rndpool_mtx);
 1028 
 1029                         source->total += sample->entropy;
 1030                 }
 1031 
 1032                 rnd_sample_free(sample);
 1033 
 1034                 /* Get mtx back to dequeue the next one.. */
 1035                 mutex_enter(&rnd_mtx);
 1036                 sample = SIMPLEQ_FIRST(&rnd_samples);
 1037         }
 1038         mutex_exit(&rnd_mtx);
 1039 
 1040         /*
 1041          * Wake up any potential readers waiting.
 1042          */
 1043         rnd_wakeup_readers();
 1044 }
 1045 
 1046 u_int32_t
 1047 rnd_extract_data(void *p, u_int32_t len, u_int32_t flags)
 1048 {
 1049         int retval;
 1050         u_int32_t c;
 1051 
 1052         mutex_enter(&rndpool_mtx);
 1053         if (!rnd_have_entropy) {
 1054 #ifdef RND_VERBOSE
 1055                 printf("rnd: WARNING! initial entropy low (%u).\n",
 1056                        rndpool_get_entropy_count(&rnd_pool));
 1057 #endif
 1058                 /* Try once again to put something in the pool */
 1059                 c = rnd_counter();
 1060                 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
 1061         }
 1062         retval = rndpool_extract_data(&rnd_pool, p, len, flags);
 1063         mutex_exit(&rndpool_mtx);
 1064 
 1065         return (retval);
 1066 }

Cache object: 3bb5f4b854565370d2a0e3f420d0b014


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.