The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/rnd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: rnd.c,v 1.42 2003/06/29 22:30:01 fvdl Exp $    */
    2 
    3 /*-
    4  * Copyright (c) 1997 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Michael Graff <explorer@flame.org>.  This code uses ideas and
    9  * algorithms from the Linux driver written by Ted Ts'o.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  * 3. All advertising materials mentioning features or use of this software
   20  *    must display the following acknowledgement:
   21  *        This product includes software developed by the NetBSD
   22  *        Foundation, Inc. and its contributors.
   23  * 4. Neither the name of The NetBSD Foundation nor the names of its
   24  *    contributors may be used to endorse or promote products derived
   25  *    from this software without specific prior written permission.
   26  *
   27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   37  * POSSIBILITY OF SUCH DAMAGE.
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __KERNEL_RCSID(0, "$NetBSD: rnd.c,v 1.42 2003/06/29 22:30:01 fvdl Exp $");
   42 
   43 #include <sys/param.h>
   44 #include <sys/ioctl.h>
   45 #include <sys/fcntl.h>
   46 #include <sys/select.h>
   47 #include <sys/poll.h>
   48 #include <sys/malloc.h>
   49 #include <sys/proc.h>
   50 #include <sys/kernel.h>
   51 #include <sys/conf.h>
   52 #include <sys/systm.h>
   53 #include <sys/callout.h>
   54 #include <sys/rnd.h>
   55 #include <sys/vnode.h>
   56 #include <sys/pool.h>
   57 
   58 #ifdef __HAVE_CPU_COUNTER
   59 #include <machine/cpu_counter.h>
   60 #endif
   61 
   62 #ifdef RND_DEBUG
   63 #define DPRINTF(l,x)      if (rnd_debug & (l)) printf x
   64 int     rnd_debug = 0;
   65 #else
   66 #define DPRINTF(l,x)
   67 #endif
   68 
   69 #define RND_DEBUG_WRITE         0x0001
   70 #define RND_DEBUG_READ          0x0002
   71 #define RND_DEBUG_IOCTL         0x0004
   72 #define RND_DEBUG_SNOOZE        0x0008
   73 
   74 /*
   75  * list devices attached
   76  */
   77 #if 0
   78 #define RND_VERBOSE
   79 #endif
   80 
   81 /*
   82  * Use the extraction time as a somewhat-random source
   83  */
   84 #ifndef RND_USE_EXTRACT_TIME
   85 #define RND_USE_EXTRACT_TIME 1
   86 #endif
   87 
   88 /*
   89  * The size of a temporary buffer, malloc()ed when needed, and used for
   90  * reading and writing data.
   91  */
   92 #define RND_TEMP_BUFFER_SIZE    128
   93 
   94 /*
   95  * This is a little bit of state information attached to each device that we
   96  * collect entropy from.  This is simply a collection buffer, and when it
   97  * is full it will be "detached" from the source and added to the entropy
   98  * pool after entropy is distilled as much as possible.
   99  */
  100 #define RND_SAMPLE_COUNT        64      /* collect N samples, then compress */
  101 typedef struct _rnd_sample_t {
  102         SIMPLEQ_ENTRY(_rnd_sample_t) next;
  103         rndsource_t     *source;
  104         int             cursor;
  105         int             entropy;
  106         u_int32_t       ts[RND_SAMPLE_COUNT];
  107         u_int32_t       values[RND_SAMPLE_COUNT];
  108 } rnd_sample_t;
  109 
  110 /*
  111  * The event queue.  Fields are altered at an interrupt level.
  112  * All accesses must be protected at splhigh().
  113  */
  114 volatile int                    rnd_timeout_pending;
  115 SIMPLEQ_HEAD(, _rnd_sample_t)   rnd_samples;
  116 
  117 /*
  118  * our select/poll queue
  119  */
  120 struct selinfo rnd_selq;
  121 
  122 /*
  123  * Set when there are readers blocking on data from us
  124  */
  125 #define RND_READWAITING 0x00000001
  126 volatile u_int32_t rnd_status;
  127 
  128 /*
  129  * Memory pool; accessed only at splhigh().
  130  */
  131 struct pool rnd_mempool;
  132 
  133 /*
  134  * Our random pool.  This is defined here rather than using the general
  135  * purpose one defined in rndpool.c.
  136  *
  137  * Samples are collected and queued at splhigh() into a separate queue
  138  * (rnd_samples, see above), and processed in a timeout routine; therefore,
  139  * all other accesses to the random pool must be at splsoftclock() as well.
  140  */
  141 rndpool_t rnd_pool;
  142 
  143 /*
  144  * This source is used to easily "remove" queue entries when the source
  145  * which actually generated the events is going away.
  146  */
  147 static rndsource_t rnd_source_no_collect = {
  148         { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', 0, 0, 0, 0, 0, 0, 0 },
  149         0, 0, 0, 0,
  150         RND_TYPE_UNKNOWN,
  151         (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE | RND_TYPE_UNKNOWN),
  152         NULL
  153 };
  154 
  155 struct callout rnd_callout = CALLOUT_INITIALIZER;
  156 
  157 void    rndattach __P((int));
  158 
  159 dev_type_open(rndopen);
  160 dev_type_read(rndread);
  161 dev_type_write(rndwrite);
  162 dev_type_ioctl(rndioctl);
  163 dev_type_poll(rndpoll);
  164 dev_type_kqfilter(rndkqfilter);
  165 
  166 const struct cdevsw rnd_cdevsw = {
  167         rndopen, nullclose, rndread, rndwrite, rndioctl,
  168         nostop, notty, rndpoll, nommap, rndkqfilter,
  169 };
  170 
  171 static inline void      rnd_wakeup_readers(void);
  172 static inline u_int32_t rnd_estimate_entropy(rndsource_t *, u_int32_t);
  173 static inline u_int32_t rnd_counter(void);
  174 static        void      rnd_timeout(void *);
  175 
  176 static int              rnd_ready = 0;
  177 static int              rnd_have_entropy = 0;
  178 
  179 LIST_HEAD(, __rndsource_element)        rnd_sources;
  180 
  181 /*
  182  * Generate a 32-bit counter.  This should be more machine dependant,
  183  * using cycle counters and the like when possible.
  184  */
  185 static inline u_int32_t
  186 rnd_counter(void)
  187 {
  188         struct timeval tv;
  189 
  190 #ifdef __HAVE_CPU_COUNTER
  191         if (cpu_hascounter())
  192                 return (cpu_counter32());
  193 #endif
  194         if (rnd_ready) {
  195                 microtime(&tv);
  196                 return (tv.tv_sec * 1000000 + tv.tv_usec);
  197         } 
  198         /* when called from rnd_init, its too early to call microtime safely */
  199         return (0);
  200 }
  201 
  202 /*
  203  * Check to see if there are readers waiting on us.  If so, kick them.
  204  *
  205  * Must be called at splsoftclock().
  206  */
  207 static inline void
  208 rnd_wakeup_readers(void)
  209 {
  210 
  211         /*
  212          * If we have added new bits, and now have enough to do something,
  213          * wake up sleeping readers.
  214          */
  215         if (rndpool_get_entropy_count(&rnd_pool) > RND_ENTROPY_THRESHOLD * 8) {
  216                 if (rnd_status & RND_READWAITING) {
  217                         DPRINTF(RND_DEBUG_SNOOZE,
  218                             ("waking up pending readers.\n"));
  219                         rnd_status &= ~RND_READWAITING;
  220                         wakeup(&rnd_selq);
  221                 }
  222                 selnotify(&rnd_selq, 0);
  223 
  224 #ifdef RND_VERBOSE
  225                 if (!rnd_have_entropy)
  226                         printf("rnd: have initial entropy (%u)\n",
  227                                rndpool_get_entropy_count(&rnd_pool));
  228 #endif
  229                 /*
  230                  * Allow open of /dev/random now, too.
  231                  */
  232                 rnd_have_entropy = 1;
  233         }
  234 }
  235 
  236 /*
  237  * Use the timing of the event to estimate the entropy gathered.
  238  * If all the differentials (first, second, and third) are non-zero, return
  239  * non-zero.  If any of these are zero, return zero.
  240  */
  241 static inline u_int32_t
  242 rnd_estimate_entropy(rndsource_t *rs, u_int32_t t)
  243 {
  244         int32_t delta, delta2, delta3;
  245 
  246         /*
  247          * If the time counter has overflowed, calculate the real difference.
  248          * If it has not, it is simplier.
  249          */
  250         if (t < rs->last_time)
  251                 delta = UINT_MAX - rs->last_time + t;
  252         else
  253                 delta = rs->last_time - t;
  254 
  255         if (delta < 0)
  256                 delta = -delta;
  257 
  258         /*
  259          * Calculate the second and third order differentials
  260          */
  261         delta2 = rs->last_delta - delta;
  262         if (delta2 < 0)
  263                 delta2 = -delta2;
  264 
  265         delta3 = rs->last_delta2 - delta2;
  266         if (delta3 < 0)
  267                 delta3 = -delta3;
  268 
  269         rs->last_time = t;
  270         rs->last_delta = delta;
  271         rs->last_delta2 = delta2;
  272 
  273         /*
  274          * If any delta is 0, we got no entropy.  If all are non-zero, we
  275          * might have something.
  276          */
  277         if (delta == 0 || delta2 == 0 || delta3 == 0)
  278                 return (0);
  279 
  280         return (1);
  281 }
  282 
  283 /*
  284  * "Attach" the random device. This is an (almost) empty stub, since
  285  * pseudo-devices don't get attached until after config, after the
  286  * entropy sources will attach. We just use the timing of this event
  287  * as another potential source of initial entropy.
  288  */
  289 void
  290 rndattach(int num)
  291 {
  292         u_int32_t c;
  293 
  294         /* Trap unwary players who don't call rnd_init() early */
  295         KASSERT(rnd_ready);
  296 
  297         /* mix in another counter */
  298         c = rnd_counter();
  299         rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
  300 }
  301 
  302 /*
  303  * initialize the global random pool for our use.
  304  * rnd_init() must be called very early on in the boot process, so
  305  * the pool is ready for other devices to attach as sources.
  306  */
  307 void
  308 rnd_init(void)
  309 {
  310         u_int32_t c;
  311 
  312         if (rnd_ready)
  313                 return;
  314 
  315         /* 
  316          * take a counter early, hoping that there's some variance in
  317          * the following operations 
  318          */
  319         c = rnd_counter();
  320 
  321         LIST_INIT(&rnd_sources);
  322         SIMPLEQ_INIT(&rnd_samples);
  323 
  324         pool_init(&rnd_mempool, sizeof(rnd_sample_t), 0, 0, 0, "rndsample",
  325             NULL);
  326 
  327         rndpool_init(&rnd_pool);
  328 
  329         /* Mix *something*, *anything* into the pool to help it get started. 
  330          * However, it's not safe for rnd_counter() to call microtime() yet,
  331          * so on some platforms we might just end up with zeros anyway.
  332          * XXX more things to add would be nice.
  333          */ 
  334         if (c) {
  335                 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
  336                 c = rnd_counter();
  337                 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
  338         }
  339 
  340         rnd_ready = 1;
  341 
  342 #ifdef RND_VERBOSE
  343         printf("rnd: initialised (%u)%s", RND_POOLBITS,
  344                c ? " with counter\n" : "\n");
  345 #endif
  346 }
  347 
  348 int
  349 rndopen(dev_t dev, int flags, int ifmt, struct proc *p)
  350 {
  351 
  352         if (rnd_ready == 0)
  353                 return (ENXIO);
  354 
  355         if (minor(dev) == RND_DEV_URANDOM)
  356                 return (0);
  357 
  358         /*
  359          * If this is the strong random device and we have never collected
  360          * entropy (or have not yet) don't allow it to be opened.  This will
  361          * prevent waiting forever for something that just will not appear.
  362          */
  363         if (minor(dev) == RND_DEV_RANDOM) {
  364                 if (rnd_have_entropy == 0)
  365                         return (ENXIO);
  366                 else
  367                         return (0);
  368         }
  369 
  370         return (ENXIO);
  371 }
  372 
  373 int
  374 rndread(dev_t dev, struct uio *uio, int ioflag)
  375 {
  376         u_int8_t *buf;
  377         u_int32_t entcnt, mode, n, nread;
  378         int ret, s;
  379 
  380         DPRINTF(RND_DEBUG_READ,
  381             ("Random:  Read of %d requested, flags 0x%08x\n",
  382             uio->uio_resid, ioflag));
  383 
  384         if (uio->uio_resid == 0)
  385                 return (0);
  386 
  387         switch (minor(dev)) {
  388         case RND_DEV_RANDOM:
  389                 mode = RND_EXTRACT_GOOD;
  390                 break;
  391         case RND_DEV_URANDOM:
  392                 mode = RND_EXTRACT_ANY;
  393                 break;
  394         default:
  395                 /* Can't happen, but this is cheap */
  396                 return (ENXIO);
  397         }
  398 
  399         ret = 0;
  400 
  401         buf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
  402 
  403         while (uio->uio_resid > 0) {
  404                 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
  405 
  406                 /*
  407                  * Make certain there is data available.  If there
  408                  * is, do the I/O even if it is partial.  If not,
  409                  * sleep unless the user has requested non-blocking
  410                  * I/O.
  411                  */
  412                 for (;;) {
  413                         /*
  414                          * If not requesting strong randomness, we
  415                          * can always read.
  416                          */
  417                         if (mode == RND_EXTRACT_ANY)
  418                                 break;
  419 
  420                         /*
  421                          * How much entropy do we have?  If it is enough for
  422                          * one hash, we can read.
  423                          */
  424                         s = splsoftclock();
  425                         entcnt = rndpool_get_entropy_count(&rnd_pool);
  426                         splx(s);
  427                         if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
  428                                 break;
  429 
  430                         /*
  431                          * Data is not available.
  432                          */
  433                         if (ioflag & IO_NDELAY) {
  434                                 ret = EWOULDBLOCK;
  435                                 goto out;
  436                         }
  437 
  438                         rnd_status |= RND_READWAITING;
  439                         ret = tsleep(&rnd_selq, PRIBIO|PCATCH,
  440                             "rndread", 0);
  441 
  442                         if (ret)
  443                                 goto out;
  444                 }
  445 
  446                 nread = rnd_extract_data(buf, n, mode);
  447 
  448                 /*
  449                  * Copy (possibly partial) data to the user.
  450                  * If an error occurs, or this is a partial
  451                  * read, bail out.
  452                  */
  453                 ret = uiomove((caddr_t)buf, nread, uio);
  454                 if (ret != 0 || nread != n)
  455                         goto out;
  456         }
  457 
  458 out:
  459         free(buf, M_TEMP);
  460         return (ret);
  461 }
  462 
  463 int
  464 rndwrite(dev_t dev, struct uio *uio, int ioflag)
  465 {
  466         u_int8_t *buf;
  467         int n, ret, s;
  468 
  469         DPRINTF(RND_DEBUG_WRITE,
  470             ("Random: Write of %d requested\n", uio->uio_resid));
  471 
  472         if (uio->uio_resid == 0)
  473                 return (0);
  474 
  475         ret = 0;
  476 
  477         buf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
  478 
  479         while (uio->uio_resid > 0) {
  480                 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
  481 
  482                 ret = uiomove((caddr_t)buf, n, uio);
  483                 if (ret != 0)
  484                         break;
  485 
  486                 /*
  487                  * Mix in the bytes.
  488                  */
  489                 s = splsoftclock();
  490                 rndpool_add_data(&rnd_pool, buf, n, 0);
  491                 splx(s);
  492 
  493                 DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n));
  494         }
  495 
  496         free(buf, M_TEMP);
  497         return (ret);
  498 }
  499 
  500 int
  501 rndioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
  502 {
  503         rndsource_element_t *rse;
  504         rndstat_t *rst;
  505         rndstat_name_t *rstnm;
  506         rndctl_t *rctl;
  507         rnddata_t *rnddata;
  508         u_int32_t count, start;
  509         int ret, s;
  510 
  511         ret = 0;
  512 
  513         switch (cmd) {
  514 
  515         /*
  516          * Handled in upper layer really, but we have to return zero
  517          * for it to be accepted by the upper layer.
  518          */
  519         case FIONBIO:
  520         case FIOASYNC:
  521                 break;
  522 
  523         case RNDGETENTCNT:
  524                 s = splsoftclock();
  525                 *(u_int32_t *)addr = rndpool_get_entropy_count(&rnd_pool);
  526                 splx(s);
  527                 break;
  528 
  529         case RNDGETPOOLSTAT:
  530                 if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
  531                         return (ret);
  532 
  533                 s = splsoftclock();
  534                 rndpool_get_stats(&rnd_pool, addr, sizeof(rndpoolstat_t));
  535                 splx(s);
  536                 break;
  537 
  538         case RNDGETSRCNUM:
  539                 if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
  540                         return (ret);
  541 
  542                 rst = (rndstat_t *)addr;
  543 
  544                 if (rst->count == 0)
  545                         break;
  546 
  547                 if (rst->count > RND_MAXSTATCOUNT)
  548                         return (EINVAL);
  549 
  550                 /*
  551                  * Find the starting source by running through the
  552                  * list of sources.
  553                  */
  554                 rse = rnd_sources.lh_first;
  555                 start = rst->start;
  556                 while (rse != NULL && start >= 1) {
  557                         rse = rse->list.le_next;
  558                         start--;
  559                 }
  560 
  561                 /*
  562                  * Return up to as many structures as the user asked
  563                  * for.  If we run out of sources, a count of zero
  564                  * will be returned, without an error.
  565                  */
  566                 for (count = 0; count < rst->count && rse != NULL; count++) {
  567                         memcpy(&rst->source[count], &rse->data,
  568                             sizeof(rndsource_t));
  569                         /* Zero out information which may leak */
  570                         rst->source[count].last_time = 0;
  571                         rst->source[count].last_delta = 0;
  572                         rst->source[count].last_delta2 = 0;
  573                         rst->source[count].state = 0;
  574                         rse = rse->list.le_next;
  575                 }
  576 
  577                 rst->count = count;
  578 
  579                 break;
  580 
  581         case RNDGETSRCNAME:
  582                 if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
  583                         return (ret);
  584 
  585                 /*
  586                  * Scan through the list, trying to find the name.
  587                  */
  588                 rstnm = (rndstat_name_t *)addr;
  589                 rse = rnd_sources.lh_first;
  590                 while (rse != NULL) {
  591                         if (strncmp(rse->data.name, rstnm->name, 16) == 0) {
  592                                 memcpy(&rstnm->source, &rse->data,
  593                                     sizeof(rndsource_t));
  594 
  595                                 return (0);
  596                         }
  597                         rse = rse->list.le_next;
  598                 }
  599 
  600                 ret = ENOENT;           /* name not found */
  601 
  602                 break;
  603 
  604         case RNDCTL:
  605                 if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
  606                         return (ret);
  607 
  608                 /*
  609                  * Set flags to enable/disable entropy counting and/or
  610                  * collection.
  611                  */
  612                 rctl = (rndctl_t *)addr;
  613                 rse = rnd_sources.lh_first;
  614 
  615                 /*
  616                  * Flags set apply to all sources of this type.
  617                  */
  618                 if (rctl->type != 0xff) {
  619                         while (rse != NULL) {
  620                                 if (rse->data.type == rctl->type) {
  621                                         rse->data.flags &= ~rctl->mask;
  622                                         rse->data.flags |=
  623                                             (rctl->flags & rctl->mask);
  624                                 }
  625                                 rse = rse->list.le_next;
  626                         }
  627 
  628                         return (0);
  629                 }
  630 
  631                 /*
  632                  * scan through the list, trying to find the name
  633                  */
  634                 while (rse != NULL) {
  635                         if (strncmp(rse->data.name, rctl->name, 16) == 0) {
  636                                 rse->data.flags &= ~rctl->mask;
  637                                 rse->data.flags |= (rctl->flags & rctl->mask);
  638 
  639                                 return (0);
  640                         }
  641                         rse = rse->list.le_next;
  642                 }
  643 
  644                 ret = ENOENT;           /* name not found */
  645 
  646                 break;
  647 
  648         case RNDADDDATA:
  649                 if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
  650                         return (ret);
  651 
  652                 rnddata = (rnddata_t *)addr;
  653 
  654                 s = splsoftclock();
  655                 rndpool_add_data(&rnd_pool, rnddata->data, rnddata->len,
  656                     rnddata->entropy);
  657 
  658                 rnd_wakeup_readers();
  659                 splx(s);
  660 
  661                 break;
  662 
  663         default:
  664                 return (EINVAL);
  665         }
  666 
  667         return (ret);
  668 }
  669 
  670 int
  671 rndpoll(dev_t dev, int events, struct proc *p)
  672 {
  673         u_int32_t entcnt;
  674         int revents, s;
  675 
  676         /*
  677          * We are always writable.
  678          */
  679         revents = events & (POLLOUT | POLLWRNORM);
  680 
  681         /*
  682          * Save some work if not checking for reads.
  683          */
  684         if ((events & (POLLIN | POLLRDNORM)) == 0)
  685                 return (revents);
  686 
  687         /*
  688          * If the minor device is not /dev/random, we are always readable.
  689          */
  690         if (minor(dev) != RND_DEV_RANDOM) {
  691                 revents |= events & (POLLIN | POLLRDNORM);
  692                 return (revents);
  693         }
  694 
  695         /*
  696          * Make certain we have enough entropy to be readable.
  697          */
  698         s = splsoftclock();
  699         entcnt = rndpool_get_entropy_count(&rnd_pool);
  700         splx(s);
  701 
  702         if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
  703                 revents |= events & (POLLIN | POLLRDNORM);
  704         else
  705                 selrecord(p, &rnd_selq);
  706 
  707         return (revents);
  708 }
  709 
  710 static void
  711 filt_rnddetach(struct knote *kn)
  712 {
  713         int s;
  714 
  715         s = splsoftclock();
  716         SLIST_REMOVE(&rnd_selq.sel_klist, kn, knote, kn_selnext);
  717         splx(s);
  718 }
  719 
  720 static int
  721 filt_rndread(struct knote *kn, long hint)
  722 {
  723         uint32_t entcnt;
  724 
  725         entcnt = rndpool_get_entropy_count(&rnd_pool);
  726         if (entcnt >= RND_ENTROPY_THRESHOLD * 8) {
  727                 kn->kn_data = RND_TEMP_BUFFER_SIZE;
  728                 return (1);
  729         }
  730         return (0);
  731 }
  732 
  733 static const struct filterops rnd_seltrue_filtops =
  734         { 1, NULL, filt_rnddetach, filt_seltrue };
  735 
  736 static const struct filterops rndread_filtops =
  737         { 1, NULL, filt_rnddetach, filt_rndread };
  738 
  739 int
  740 rndkqfilter(dev_t dev, struct knote *kn)
  741 {
  742         struct klist *klist;
  743         int s;
  744 
  745         switch (kn->kn_filter) {
  746         case EVFILT_READ:
  747                 klist = &rnd_selq.sel_klist;
  748                 if (minor(dev) == RND_DEV_URANDOM)
  749                         kn->kn_fop = &rnd_seltrue_filtops;
  750                 else
  751                         kn->kn_fop = &rndread_filtops;
  752                 break;
  753 
  754         case EVFILT_WRITE:
  755                 klist = &rnd_selq.sel_klist;
  756                 kn->kn_fop = &rnd_seltrue_filtops;
  757                 break;
  758 
  759         default:
  760                 return (1);
  761         }
  762 
  763         kn->kn_hook = NULL;
  764 
  765         s = splsoftclock();
  766         SLIST_INSERT_HEAD(klist, kn, kn_selnext);
  767         splx(s);
  768 
  769         return (0);
  770 }
  771 
  772 static rnd_sample_t *
  773 rnd_sample_allocate(rndsource_t *source)
  774 {
  775         rnd_sample_t *c;
  776         int s;
  777 
  778         s = splhigh();
  779         c = pool_get(&rnd_mempool, PR_WAITOK);
  780         splx(s);
  781         if (c == NULL)
  782                 return (NULL);
  783 
  784         c->source = source;
  785         c->cursor = 0;
  786         c->entropy = 0;
  787 
  788         return (c);
  789 }
  790 
  791 /*
  792  * Don't wait on allocation.  To be used in an interrupt context.
  793  */
  794 static rnd_sample_t *
  795 rnd_sample_allocate_isr(rndsource_t *source)
  796 {
  797         rnd_sample_t *c;
  798         int s;
  799 
  800         s = splhigh();
  801         c = pool_get(&rnd_mempool, 0);
  802         splx(s);
  803         if (c == NULL)
  804                 return (NULL);
  805 
  806         c->source = source;
  807         c->cursor = 0;
  808         c->entropy = 0;
  809 
  810         return (c);
  811 }
  812 
  813 static void
  814 rnd_sample_free(rnd_sample_t *c)
  815 {
  816         int s;
  817 
  818         memset(c, 0, sizeof(rnd_sample_t));
  819         s = splhigh();
  820         pool_put(&rnd_mempool, c);
  821         splx(s);
  822 }
  823 
  824 /*
  825  * Add a source to our list of sources.
  826  */
  827 void
  828 rnd_attach_source(rndsource_element_t *rs, char *name, u_int32_t type,
  829     u_int32_t flags)
  830 {
  831         u_int32_t ts;
  832 
  833         ts = rnd_counter();
  834 
  835         strlcpy(rs->data.name, name, sizeof(rs->data.name));
  836         rs->data.last_time = ts;
  837         rs->data.last_delta = 0;
  838         rs->data.last_delta2 = 0;
  839         rs->data.total = 0;
  840 
  841         /*
  842          * Force network devices to not collect any entropy by
  843          * default.
  844          */
  845         if (type == RND_TYPE_NET)
  846                 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
  847 
  848         rs->data.type = type;
  849         rs->data.flags = flags;
  850 
  851         rs->data.state = rnd_sample_allocate(&rs->data);
  852 
  853         LIST_INSERT_HEAD(&rnd_sources, rs, list);
  854 
  855 #ifdef RND_VERBOSE
  856         printf("rnd: %s attached as an entropy source (", rs->data.name);
  857         if (!(flags & RND_FLAG_NO_COLLECT)) {
  858                 printf("collecting");
  859                 if (flags & RND_FLAG_NO_ESTIMATE)
  860                         printf(" without estimation");
  861         }
  862         else
  863                 printf("off");
  864         printf(")\n");
  865 #endif
  866 
  867         /* 
  868          * Again, put some more initial junk in the pool.
  869          * XXX Bogus, but harder to guess than zeros.
  870          */
  871         rndpool_add_data(&rnd_pool, &ts, sizeof(u_int32_t), 1);
  872 }
  873 
  874 /*
  875  * Remove a source from our list of sources.
  876  */
  877 void
  878 rnd_detach_source(rndsource_element_t *rs)
  879 {
  880         rnd_sample_t *sample;
  881         rndsource_t *source;
  882         int s;
  883 
  884         s = splhigh();
  885 
  886         LIST_REMOVE(rs, list);
  887 
  888         source = &rs->data;
  889 
  890         if (source->state) {
  891                 rnd_sample_free(source->state);
  892                 source->state = NULL;
  893         }
  894 
  895         /*
  896          * If there are samples queued up "remove" them from the sample queue
  897          * by setting the source to the no-collect pseudosource.
  898          */
  899         sample = SIMPLEQ_FIRST(&rnd_samples);
  900         while (sample != NULL) {
  901                 if (sample->source == source)
  902                         sample->source = &rnd_source_no_collect;
  903 
  904                 sample = SIMPLEQ_NEXT(sample, next);
  905         }
  906 
  907         splx(s);
  908 #ifdef RND_VERBOSE
  909         printf("rnd: %s detached as an entropy source\n", rs->data.name);
  910 #endif
  911 }
  912 
  913 /*
  914  * Add a value to the entropy pool.  If rs is NULL no entropy estimation
  915  * will be performed, otherwise it should point to the source-specific
  916  * source structure.
  917  */
  918 void
  919 rnd_add_uint32(rndsource_element_t *rs, u_int32_t val)
  920 {
  921         rndsource_t *rst;
  922         rnd_sample_t *state;
  923         u_int32_t ts;
  924         int s;
  925 
  926         /*
  927          * If we are not collecting any data at all, just return.
  928          */
  929         if (rs == NULL)
  930                 return;
  931 
  932         rst = &rs->data;
  933 
  934         if (rst->flags & RND_FLAG_NO_COLLECT)
  935                 return;
  936 
  937         /*
  938          * Sample the counter as soon as possible to avoid
  939          * entropy overestimation.
  940          */
  941         ts = rnd_counter();
  942 
  943         /*
  944          * If the sample buffer is NULL, try to allocate one here.  If this
  945          * fails, drop this sample.
  946          */
  947         state = rst->state;
  948         if (state == NULL) {
  949                 state = rnd_sample_allocate_isr(rst);
  950                 if (state == NULL)
  951                         return;
  952                 rst->state = state;
  953         }
  954 
  955         /*
  956          * If we are estimating entropy on this source,
  957          * calculate differentials.
  958          */
  959 
  960         if ((rst->flags & RND_FLAG_NO_ESTIMATE) == 0)
  961                 state->entropy += rnd_estimate_entropy(rst, ts);
  962 
  963         state->ts[state->cursor] = ts;
  964         state->values[state->cursor] = val;
  965         state->cursor++;
  966 
  967         /*
  968          * If the state arrays are not full, we're done.
  969          */
  970         if (state->cursor < RND_SAMPLE_COUNT)
  971                 return;
  972 
  973         /*
  974          * State arrays are full.  Queue this chunk on the processing queue.
  975          */
  976         s = splhigh();
  977         SIMPLEQ_INSERT_HEAD(&rnd_samples, state, next);
  978         rst->state = NULL;
  979 
  980         /*
  981          * If the timeout isn't pending, have it run in the near future.
  982          */
  983         if (rnd_timeout_pending == 0) {
  984                 rnd_timeout_pending = 1;
  985                 callout_reset(&rnd_callout, 1, rnd_timeout, NULL);
  986         }
  987         splx(s);
  988 
  989         /*
  990          * To get here we have to have queued the state up, and therefore
  991          * we need a new state buffer.  If we can, allocate one now;
  992          * if we don't get it, it doesn't matter; we'll try again on
  993          * the next random event.
  994          */
  995         rst->state = rnd_sample_allocate_isr(rst);
  996 }
  997 
  998 void
  999 rnd_add_data(rndsource_element_t *rs, void *data, u_int32_t len,
 1000     u_int32_t entropy)
 1001 {
 1002         rndsource_t *rst;
 1003 
 1004         /* Mix in the random data directly into the pool. */
 1005         rndpool_add_data(&rnd_pool, data, len, entropy);
 1006 
 1007         if (rs != NULL) {
 1008                 rst = &rs->data;
 1009                 rst->total += entropy;
 1010 
 1011                 if ((rst->flags & RND_FLAG_NO_ESTIMATE) == 0)
 1012                         /* Estimate entropy using timing information */
 1013                         rnd_add_uint32(rs, *(u_int8_t *)data);
 1014         }
 1015 
 1016         /* Wake up any potential readers since we've just added some data. */
 1017         rnd_wakeup_readers();
 1018 }
 1019 
 1020 /*
 1021  * Timeout, run to process the events in the ring buffer.  Only one of these
 1022  * can possibly be running at a time, run at splsoftclock().
 1023  */
 1024 static void
 1025 rnd_timeout(void *arg)
 1026 {
 1027         rnd_sample_t *sample;
 1028         rndsource_t *source;
 1029         u_int32_t entropy;
 1030         int s;
 1031 
 1032         /*
 1033          * Sample queue is protected at splhigh(); go there briefly to dequeue.
 1034          */
 1035         s = splhigh();
 1036         rnd_timeout_pending = 0;
 1037 
 1038         sample = SIMPLEQ_FIRST(&rnd_samples);
 1039         while (sample != NULL) {
 1040                 SIMPLEQ_REMOVE_HEAD(&rnd_samples, next);
 1041                 splx(s);
 1042 
 1043                 source = sample->source;
 1044 
 1045                 /*
 1046                  * We repeat this check here, since it is possible the source
 1047                  * was disabled before we were called, but after the entry
 1048                  * was queued.
 1049                  */
 1050                 if ((source->flags & RND_FLAG_NO_COLLECT) == 0) {
 1051                         rndpool_add_data(&rnd_pool, sample->values,
 1052                             RND_SAMPLE_COUNT * 4, 0);
 1053 
 1054                         entropy = sample->entropy;
 1055                         if (source->flags & RND_FLAG_NO_ESTIMATE)
 1056                                 entropy = 0;
 1057 
 1058                         rndpool_add_data(&rnd_pool, sample->ts,
 1059                             RND_SAMPLE_COUNT * 4,
 1060                             entropy);
 1061 
 1062                         source->total += sample->entropy;
 1063                 }
 1064 
 1065                 rnd_sample_free(sample);
 1066 
 1067                 /* Go back to splhigh to dequeue the next one.. */
 1068                 s = splhigh();
 1069                 sample = SIMPLEQ_FIRST(&rnd_samples);
 1070         }
 1071         splx(s);
 1072 
 1073         /*
 1074          * Wake up any potential readers waiting.
 1075          */
 1076         rnd_wakeup_readers();
 1077 }
 1078 
 1079 u_int32_t
 1080 rnd_extract_data(void *p, u_int32_t len, u_int32_t flags)
 1081 {
 1082         int retval, s;
 1083         u_int32_t c;
 1084 
 1085         s = splsoftclock();
 1086         if (!rnd_have_entropy) {
 1087 #ifdef RND_VERBOSE
 1088                 printf("rnd: WARNING! initial entropy low (%u).\n",
 1089                        rndpool_get_entropy_count(&rnd_pool));
 1090 #endif
 1091                 /* Try once again to put something in the pool */
 1092                 c = rnd_counter();
 1093                 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
 1094         }
 1095         retval = rndpool_extract_data(&rnd_pool, p, len, flags);
 1096         splx(s);
 1097 
 1098         return (retval);
 1099 }

Cache object: 52dde7f66acf82b13770643f38044446


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.