The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/rnd.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*      $NetBSD: rnd.c,v 1.46.2.1 2005/12/15 20:03:00 tron Exp $        */
    2 
    3 /*-
    4  * Copyright (c) 1997 The NetBSD Foundation, Inc.
    5  * All rights reserved.
    6  *
    7  * This code is derived from software contributed to The NetBSD Foundation
    8  * by Michael Graff <explorer@flame.org>.  This code uses ideas and
    9  * algorithms from the Linux driver written by Ted Ts'o.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  * 3. All advertising materials mentioning features or use of this software
   20  *    must display the following acknowledgement:
   21  *        This product includes software developed by the NetBSD
   22  *        Foundation, Inc. and its contributors.
   23  * 4. Neither the name of The NetBSD Foundation nor the names of its
   24  *    contributors may be used to endorse or promote products derived
   25  *    from this software without specific prior written permission.
   26  *
   27  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
   28  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
   29  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   30  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
   31  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
   32  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
   33  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
   34  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
   35  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
   36  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
   37  * POSSIBILITY OF SUCH DAMAGE.
   38  */
   39 
   40 #include <sys/cdefs.h>
   41 __KERNEL_RCSID(0, "$NetBSD: rnd.c,v 1.46.2.1 2005/12/15 20:03:00 tron Exp $");
   42 
   43 #include <sys/param.h>
   44 #include <sys/ioctl.h>
   45 #include <sys/fcntl.h>
   46 #include <sys/select.h>
   47 #include <sys/poll.h>
   48 #include <sys/malloc.h>
   49 #include <sys/proc.h>
   50 #include <sys/kernel.h>
   51 #include <sys/conf.h>
   52 #include <sys/systm.h>
   53 #include <sys/callout.h>
   54 #include <sys/rnd.h>
   55 #include <sys/vnode.h>
   56 #include <sys/pool.h>
   57 
   58 #ifdef __HAVE_CPU_COUNTER
   59 #include <machine/cpu_counter.h>
   60 #endif
   61 
   62 #ifdef RND_DEBUG
   63 #define DPRINTF(l,x)      if (rnd_debug & (l)) printf x
   64 int     rnd_debug = 0;
   65 #else
   66 #define DPRINTF(l,x)
   67 #endif
   68 
   69 #define RND_DEBUG_WRITE         0x0001
   70 #define RND_DEBUG_READ          0x0002
   71 #define RND_DEBUG_IOCTL         0x0004
   72 #define RND_DEBUG_SNOOZE        0x0008
   73 
   74 /*
   75  * list devices attached
   76  */
   77 #if 0
   78 #define RND_VERBOSE
   79 #endif
   80 
   81 /*
   82  * Use the extraction time as a somewhat-random source
   83  */
   84 #ifndef RND_USE_EXTRACT_TIME
   85 #define RND_USE_EXTRACT_TIME 1
   86 #endif
   87 
   88 /*
   89  * The size of a temporary buffer, malloc()ed when needed, and used for
   90  * reading and writing data.
   91  */
   92 #define RND_TEMP_BUFFER_SIZE    128
   93 
   94 /*
   95  * This is a little bit of state information attached to each device that we
   96  * collect entropy from.  This is simply a collection buffer, and when it
   97  * is full it will be "detached" from the source and added to the entropy
   98  * pool after entropy is distilled as much as possible.
   99  */
  100 #define RND_SAMPLE_COUNT        64      /* collect N samples, then compress */
  101 typedef struct _rnd_sample_t {
  102         SIMPLEQ_ENTRY(_rnd_sample_t) next;
  103         rndsource_t     *source;
  104         int             cursor;
  105         int             entropy;
  106         u_int32_t       ts[RND_SAMPLE_COUNT];
  107         u_int32_t       values[RND_SAMPLE_COUNT];
  108 } rnd_sample_t;
  109 
  110 /*
  111  * The event queue.  Fields are altered at an interrupt level.
  112  * All accesses must be protected at splvm().
  113  */
  114 volatile int                    rnd_timeout_pending;
  115 SIMPLEQ_HEAD(, _rnd_sample_t)   rnd_samples;
  116 
  117 /*
  118  * our select/poll queue
  119  */
  120 struct selinfo rnd_selq;
  121 
  122 /*
  123  * Set when there are readers blocking on data from us
  124  */
  125 #define RND_READWAITING 0x00000001
  126 volatile u_int32_t rnd_status;
  127 
  128 /*
  129  * Memory pool; accessed only at splvm().
  130  */
  131 POOL_INIT(rnd_mempool, sizeof(rnd_sample_t), 0, 0, 0, "rndsample", NULL);
  132 
  133 /*
  134  * Our random pool.  This is defined here rather than using the general
  135  * purpose one defined in rndpool.c.
  136  *
  137  * Samples are collected and queued at splvm() into a separate queue
  138  * (rnd_samples, see above), and processed in a timeout routine; therefore,
  139  * all other accesses to the random pool must be at splsoftclock() as well.
  140  */
  141 rndpool_t rnd_pool;
  142 
  143 /*
  144  * This source is used to easily "remove" queue entries when the source
  145  * which actually generated the events is going away.
  146  */
  147 static rndsource_t rnd_source_no_collect = {
  148         { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', 0, 0, 0, 0, 0, 0, 0 },
  149         0, 0, 0, 0,
  150         RND_TYPE_UNKNOWN,
  151         (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE | RND_TYPE_UNKNOWN),
  152         NULL
  153 };
  154 
  155 struct callout rnd_callout = CALLOUT_INITIALIZER;
  156 
  157 void    rndattach(int);
  158 
  159 dev_type_open(rndopen);
  160 dev_type_read(rndread);
  161 dev_type_write(rndwrite);
  162 dev_type_ioctl(rndioctl);
  163 dev_type_poll(rndpoll);
  164 dev_type_kqfilter(rndkqfilter);
  165 
  166 const struct cdevsw rnd_cdevsw = {
  167         rndopen, nullclose, rndread, rndwrite, rndioctl,
  168         nostop, notty, rndpoll, nommap, rndkqfilter,
  169 };
  170 
  171 static inline void      rnd_wakeup_readers(void);
  172 static inline u_int32_t rnd_estimate_entropy(rndsource_t *, u_int32_t);
  173 static inline u_int32_t rnd_counter(void);
  174 static        void      rnd_timeout(void *);
  175 
  176 static int              rnd_ready = 0;
  177 static int              rnd_have_entropy = 0;
  178 
  179 LIST_HEAD(, __rndsource_element)        rnd_sources;
  180 
  181 /*
  182  * Generate a 32-bit counter.  This should be more machine dependant,
  183  * using cycle counters and the like when possible.
  184  */
  185 static inline u_int32_t
  186 rnd_counter(void)
  187 {
  188         struct timeval tv;
  189 
  190 #ifdef __HAVE_CPU_COUNTER
  191         if (cpu_hascounter())
  192                 return (cpu_counter32());
  193 #endif
  194         if (rnd_ready) {
  195                 microtime(&tv);
  196                 return (tv.tv_sec * 1000000 + tv.tv_usec);
  197         }
  198         /* when called from rnd_init, its too early to call microtime safely */
  199         return (0);
  200 }
  201 
  202 /*
  203  * Check to see if there are readers waiting on us.  If so, kick them.
  204  *
  205  * Must be called at splsoftclock().
  206  */
  207 static inline void
  208 rnd_wakeup_readers(void)
  209 {
  210 
  211         /*
  212          * If we have added new bits, and now have enough to do something,
  213          * wake up sleeping readers.
  214          */
  215         if (rndpool_get_entropy_count(&rnd_pool) > RND_ENTROPY_THRESHOLD * 8) {
  216                 if (rnd_status & RND_READWAITING) {
  217                         DPRINTF(RND_DEBUG_SNOOZE,
  218                             ("waking up pending readers.\n"));
  219                         rnd_status &= ~RND_READWAITING;
  220                         wakeup(&rnd_selq);
  221                 }
  222                 selnotify(&rnd_selq, 0);
  223 
  224 #ifdef RND_VERBOSE
  225                 if (!rnd_have_entropy)
  226                         printf("rnd: have initial entropy (%u)\n",
  227                                rndpool_get_entropy_count(&rnd_pool));
  228 #endif
  229                 rnd_have_entropy = 1;
  230         }
  231 }
  232 
  233 /*
  234  * Use the timing of the event to estimate the entropy gathered.
  235  * If all the differentials (first, second, and third) are non-zero, return
  236  * non-zero.  If any of these are zero, return zero.
  237  */
  238 static inline u_int32_t
  239 rnd_estimate_entropy(rndsource_t *rs, u_int32_t t)
  240 {
  241         int32_t delta, delta2, delta3;
  242 
  243         /*
  244          * If the time counter has overflowed, calculate the real difference.
  245          * If it has not, it is simplier.
  246          */
  247         if (t < rs->last_time)
  248                 delta = UINT_MAX - rs->last_time + t;
  249         else
  250                 delta = rs->last_time - t;
  251 
  252         if (delta < 0)
  253                 delta = -delta;
  254 
  255         /*
  256          * Calculate the second and third order differentials
  257          */
  258         delta2 = rs->last_delta - delta;
  259         if (delta2 < 0)
  260                 delta2 = -delta2;
  261 
  262         delta3 = rs->last_delta2 - delta2;
  263         if (delta3 < 0)
  264                 delta3 = -delta3;
  265 
  266         rs->last_time = t;
  267         rs->last_delta = delta;
  268         rs->last_delta2 = delta2;
  269 
  270         /*
  271          * If any delta is 0, we got no entropy.  If all are non-zero, we
  272          * might have something.
  273          */
  274         if (delta == 0 || delta2 == 0 || delta3 == 0)
  275                 return (0);
  276 
  277         return (1);
  278 }
  279 
  280 /*
  281  * "Attach" the random device. This is an (almost) empty stub, since
  282  * pseudo-devices don't get attached until after config, after the
  283  * entropy sources will attach. We just use the timing of this event
  284  * as another potential source of initial entropy.
  285  */
  286 void
  287 rndattach(int num)
  288 {
  289         u_int32_t c;
  290 
  291         /* Trap unwary players who don't call rnd_init() early */
  292         KASSERT(rnd_ready);
  293 
  294         /* mix in another counter */
  295         c = rnd_counter();
  296         rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
  297 }
  298 
  299 /*
  300  * initialize the global random pool for our use.
  301  * rnd_init() must be called very early on in the boot process, so
  302  * the pool is ready for other devices to attach as sources.
  303  */
  304 void
  305 rnd_init(void)
  306 {
  307         u_int32_t c;
  308 
  309         if (rnd_ready)
  310                 return;
  311 
  312         /*
  313          * take a counter early, hoping that there's some variance in
  314          * the following operations
  315          */
  316         c = rnd_counter();
  317 
  318         LIST_INIT(&rnd_sources);
  319         SIMPLEQ_INIT(&rnd_samples);
  320 
  321         rndpool_init(&rnd_pool);
  322 
  323         /* Mix *something*, *anything* into the pool to help it get started.
  324          * However, it's not safe for rnd_counter() to call microtime() yet,
  325          * so on some platforms we might just end up with zeros anyway.
  326          * XXX more things to add would be nice.
  327          */
  328         if (c) {
  329                 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
  330                 c = rnd_counter();
  331                 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
  332         }
  333 
  334         rnd_ready = 1;
  335 
  336 #ifdef RND_VERBOSE
  337         printf("rnd: initialised (%u)%s", RND_POOLBITS,
  338                c ? " with counter\n" : "\n");
  339 #endif
  340 }
  341 
  342 int
  343 rndopen(dev_t dev, int flags, int ifmt, struct proc *p)
  344 {
  345 
  346         if (rnd_ready == 0)
  347                 return (ENXIO);
  348 
  349         if (minor(dev) == RND_DEV_URANDOM || minor(dev) == RND_DEV_RANDOM)
  350                 return (0);
  351 
  352         return (ENXIO);
  353 }
  354 
  355 int
  356 rndread(dev_t dev, struct uio *uio, int ioflag)
  357 {
  358         u_int8_t *buf;
  359         u_int32_t entcnt, mode, n, nread;
  360         int ret, s;
  361 
  362         DPRINTF(RND_DEBUG_READ,
  363             ("Random:  Read of %d requested, flags 0x%08x\n",
  364             uio->uio_resid, ioflag));
  365 
  366         if (uio->uio_resid == 0)
  367                 return (0);
  368 
  369         switch (minor(dev)) {
  370         case RND_DEV_RANDOM:
  371                 mode = RND_EXTRACT_GOOD;
  372                 break;
  373         case RND_DEV_URANDOM:
  374                 mode = RND_EXTRACT_ANY;
  375                 break;
  376         default:
  377                 /* Can't happen, but this is cheap */
  378                 return (ENXIO);
  379         }
  380 
  381         ret = 0;
  382 
  383         buf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
  384 
  385         while (uio->uio_resid > 0) {
  386                 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
  387 
  388                 /*
  389                  * Make certain there is data available.  If there
  390                  * is, do the I/O even if it is partial.  If not,
  391                  * sleep unless the user has requested non-blocking
  392                  * I/O.
  393                  */
  394                 for (;;) {
  395                         /*
  396                          * If not requesting strong randomness, we
  397                          * can always read.
  398                          */
  399                         if (mode == RND_EXTRACT_ANY)
  400                                 break;
  401 
  402                         /*
  403                          * How much entropy do we have?  If it is enough for
  404                          * one hash, we can read.
  405                          */
  406                         s = splsoftclock();
  407                         entcnt = rndpool_get_entropy_count(&rnd_pool);
  408                         splx(s);
  409                         if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
  410                                 break;
  411 
  412                         /*
  413                          * Data is not available.
  414                          */
  415                         if (ioflag & IO_NDELAY) {
  416                                 ret = EWOULDBLOCK;
  417                                 goto out;
  418                         }
  419 
  420                         rnd_status |= RND_READWAITING;
  421                         ret = tsleep(&rnd_selq, PRIBIO|PCATCH,
  422                             "rndread", 0);
  423 
  424                         if (ret)
  425                                 goto out;
  426                 }
  427 
  428                 nread = rnd_extract_data(buf, n, mode);
  429 
  430                 /*
  431                  * Copy (possibly partial) data to the user.
  432                  * If an error occurs, or this is a partial
  433                  * read, bail out.
  434                  */
  435                 ret = uiomove((caddr_t)buf, nread, uio);
  436                 if (ret != 0 || nread != n)
  437                         goto out;
  438         }
  439 
  440 out:
  441         free(buf, M_TEMP);
  442         return (ret);
  443 }
  444 
  445 int
  446 rndwrite(dev_t dev, struct uio *uio, int ioflag)
  447 {
  448         u_int8_t *buf;
  449         int n, ret, s;
  450 
  451         DPRINTF(RND_DEBUG_WRITE,
  452             ("Random: Write of %d requested\n", uio->uio_resid));
  453 
  454         if (uio->uio_resid == 0)
  455                 return (0);
  456 
  457         ret = 0;
  458 
  459         buf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
  460 
  461         while (uio->uio_resid > 0) {
  462                 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
  463 
  464                 ret = uiomove((caddr_t)buf, n, uio);
  465                 if (ret != 0)
  466                         break;
  467 
  468                 /*
  469                  * Mix in the bytes.
  470                  */
  471                 s = splsoftclock();
  472                 rndpool_add_data(&rnd_pool, buf, n, 0);
  473                 splx(s);
  474 
  475                 DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n));
  476         }
  477 
  478         free(buf, M_TEMP);
  479         return (ret);
  480 }
  481 
  482 int
  483 rndioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
  484 {
  485         rndsource_element_t *rse;
  486         rndstat_t *rst;
  487         rndstat_name_t *rstnm;
  488         rndctl_t *rctl;
  489         rnddata_t *rnddata;
  490         u_int32_t count, start;
  491         int ret, s;
  492 
  493         ret = 0;
  494 
  495         switch (cmd) {
  496 
  497         /*
  498          * Handled in upper layer really, but we have to return zero
  499          * for it to be accepted by the upper layer.
  500          */
  501         case FIONBIO:
  502         case FIOASYNC:
  503                 break;
  504 
  505         case RNDGETENTCNT:
  506                 s = splsoftclock();
  507                 *(u_int32_t *)addr = rndpool_get_entropy_count(&rnd_pool);
  508                 splx(s);
  509                 break;
  510 
  511         case RNDGETPOOLSTAT:
  512                 if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
  513                         return (ret);
  514 
  515                 s = splsoftclock();
  516                 rndpool_get_stats(&rnd_pool, addr, sizeof(rndpoolstat_t));
  517                 splx(s);
  518                 break;
  519 
  520         case RNDGETSRCNUM:
  521                 if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
  522                         return (ret);
  523 
  524                 rst = (rndstat_t *)addr;
  525 
  526                 if (rst->count == 0)
  527                         break;
  528 
  529                 if (rst->count > RND_MAXSTATCOUNT)
  530                         return (EINVAL);
  531 
  532                 /*
  533                  * Find the starting source by running through the
  534                  * list of sources.
  535                  */
  536                 rse = rnd_sources.lh_first;
  537                 start = rst->start;
  538                 while (rse != NULL && start >= 1) {
  539                         rse = rse->list.le_next;
  540                         start--;
  541                 }
  542 
  543                 /*
  544                  * Return up to as many structures as the user asked
  545                  * for.  If we run out of sources, a count of zero
  546                  * will be returned, without an error.
  547                  */
  548                 for (count = 0; count < rst->count && rse != NULL; count++) {
  549                         memcpy(&rst->source[count], &rse->data,
  550                             sizeof(rndsource_t));
  551                         /* Zero out information which may leak */
  552                         rst->source[count].last_time = 0;
  553                         rst->source[count].last_delta = 0;
  554                         rst->source[count].last_delta2 = 0;
  555                         rst->source[count].state = 0;
  556                         rse = rse->list.le_next;
  557                 }
  558 
  559                 rst->count = count;
  560 
  561                 break;
  562 
  563         case RNDGETSRCNAME:
  564                 if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
  565                         return (ret);
  566 
  567                 /*
  568                  * Scan through the list, trying to find the name.
  569                  */
  570                 rstnm = (rndstat_name_t *)addr;
  571                 rse = rnd_sources.lh_first;
  572                 while (rse != NULL) {
  573                         if (strncmp(rse->data.name, rstnm->name, 16) == 0) {
  574                                 memcpy(&rstnm->source, &rse->data,
  575                                     sizeof(rndsource_t));
  576 
  577                                 return (0);
  578                         }
  579                         rse = rse->list.le_next;
  580                 }
  581 
  582                 ret = ENOENT;           /* name not found */
  583 
  584                 break;
  585 
  586         case RNDCTL:
  587                 if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
  588                         return (ret);
  589 
  590                 /*
  591                  * Set flags to enable/disable entropy counting and/or
  592                  * collection.
  593                  */
  594                 rctl = (rndctl_t *)addr;
  595                 rse = rnd_sources.lh_first;
  596 
  597                 /*
  598                  * Flags set apply to all sources of this type.
  599                  */
  600                 if (rctl->type != 0xff) {
  601                         while (rse != NULL) {
  602                                 if (rse->data.type == rctl->type) {
  603                                         rse->data.flags &= ~rctl->mask;
  604                                         rse->data.flags |=
  605                                             (rctl->flags & rctl->mask);
  606                                 }
  607                                 rse = rse->list.le_next;
  608                         }
  609 
  610                         return (0);
  611                 }
  612 
  613                 /*
  614                  * scan through the list, trying to find the name
  615                  */
  616                 while (rse != NULL) {
  617                         if (strncmp(rse->data.name, rctl->name, 16) == 0) {
  618                                 rse->data.flags &= ~rctl->mask;
  619                                 rse->data.flags |= (rctl->flags & rctl->mask);
  620 
  621                                 return (0);
  622                         }
  623                         rse = rse->list.le_next;
  624                 }
  625 
  626                 ret = ENOENT;           /* name not found */
  627 
  628                 break;
  629 
  630         case RNDADDDATA:
  631                 if ((ret = suser(p->p_ucred, &p->p_acflag)) != 0)
  632                         return (ret);
  633 
  634                 rnddata = (rnddata_t *)addr;
  635 
  636                 s = splsoftclock();
  637                 rndpool_add_data(&rnd_pool, rnddata->data, rnddata->len,
  638                     rnddata->entropy);
  639 
  640                 rnd_wakeup_readers();
  641                 splx(s);
  642 
  643                 break;
  644 
  645         default:
  646                 return (EINVAL);
  647         }
  648 
  649         return (ret);
  650 }
  651 
  652 int
  653 rndpoll(dev_t dev, int events, struct proc *p)
  654 {
  655         u_int32_t entcnt;
  656         int revents, s;
  657 
  658         /*
  659          * We are always writable.
  660          */
  661         revents = events & (POLLOUT | POLLWRNORM);
  662 
  663         /*
  664          * Save some work if not checking for reads.
  665          */
  666         if ((events & (POLLIN | POLLRDNORM)) == 0)
  667                 return (revents);
  668 
  669         /*
  670          * If the minor device is not /dev/random, we are always readable.
  671          */
  672         if (minor(dev) != RND_DEV_RANDOM) {
  673                 revents |= events & (POLLIN | POLLRDNORM);
  674                 return (revents);
  675         }
  676 
  677         /*
  678          * Make certain we have enough entropy to be readable.
  679          */
  680         s = splsoftclock();
  681         entcnt = rndpool_get_entropy_count(&rnd_pool);
  682         splx(s);
  683 
  684         if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
  685                 revents |= events & (POLLIN | POLLRDNORM);
  686         else
  687                 selrecord(p, &rnd_selq);
  688 
  689         return (revents);
  690 }
  691 
  692 static void
  693 filt_rnddetach(struct knote *kn)
  694 {
  695         int s;
  696 
  697         s = splsoftclock();
  698         SLIST_REMOVE(&rnd_selq.sel_klist, kn, knote, kn_selnext);
  699         splx(s);
  700 }
  701 
  702 static int
  703 filt_rndread(struct knote *kn, long hint)
  704 {
  705         uint32_t entcnt;
  706 
  707         entcnt = rndpool_get_entropy_count(&rnd_pool);
  708         if (entcnt >= RND_ENTROPY_THRESHOLD * 8) {
  709                 kn->kn_data = RND_TEMP_BUFFER_SIZE;
  710                 return (1);
  711         }
  712         return (0);
  713 }
  714 
  715 static const struct filterops rnd_seltrue_filtops =
  716         { 1, NULL, filt_rnddetach, filt_seltrue };
  717 
  718 static const struct filterops rndread_filtops =
  719         { 1, NULL, filt_rnddetach, filt_rndread };
  720 
  721 int
  722 rndkqfilter(dev_t dev, struct knote *kn)
  723 {
  724         struct klist *klist;
  725         int s;
  726 
  727         switch (kn->kn_filter) {
  728         case EVFILT_READ:
  729                 klist = &rnd_selq.sel_klist;
  730                 if (minor(dev) == RND_DEV_URANDOM)
  731                         kn->kn_fop = &rnd_seltrue_filtops;
  732                 else
  733                         kn->kn_fop = &rndread_filtops;
  734                 break;
  735 
  736         case EVFILT_WRITE:
  737                 klist = &rnd_selq.sel_klist;
  738                 kn->kn_fop = &rnd_seltrue_filtops;
  739                 break;
  740 
  741         default:
  742                 return (1);
  743         }
  744 
  745         kn->kn_hook = NULL;
  746 
  747         s = splsoftclock();
  748         SLIST_INSERT_HEAD(klist, kn, kn_selnext);
  749         splx(s);
  750 
  751         return (0);
  752 }
  753 
  754 static rnd_sample_t *
  755 rnd_sample_allocate(rndsource_t *source)
  756 {
  757         rnd_sample_t *c;
  758         int s;
  759 
  760         s = splvm();
  761         c = pool_get(&rnd_mempool, PR_WAITOK);
  762         splx(s);
  763         if (c == NULL)
  764                 return (NULL);
  765 
  766         c->source = source;
  767         c->cursor = 0;
  768         c->entropy = 0;
  769 
  770         return (c);
  771 }
  772 
  773 /*
  774  * Don't wait on allocation.  To be used in an interrupt context.
  775  */
  776 static rnd_sample_t *
  777 rnd_sample_allocate_isr(rndsource_t *source)
  778 {
  779         rnd_sample_t *c;
  780         int s;
  781 
  782         s = splvm();
  783         c = pool_get(&rnd_mempool, 0);
  784         splx(s);
  785         if (c == NULL)
  786                 return (NULL);
  787 
  788         c->source = source;
  789         c->cursor = 0;
  790         c->entropy = 0;
  791 
  792         return (c);
  793 }
  794 
  795 static void
  796 rnd_sample_free(rnd_sample_t *c)
  797 {
  798         int s;
  799 
  800         memset(c, 0, sizeof(rnd_sample_t));
  801         s = splvm();
  802         pool_put(&rnd_mempool, c);
  803         splx(s);
  804 }
  805 
  806 /*
  807  * Add a source to our list of sources.
  808  */
  809 void
  810 rnd_attach_source(rndsource_element_t *rs, char *name, u_int32_t type,
  811     u_int32_t flags)
  812 {
  813         u_int32_t ts;
  814 
  815         ts = rnd_counter();
  816 
  817         strlcpy(rs->data.name, name, sizeof(rs->data.name));
  818         rs->data.last_time = ts;
  819         rs->data.last_delta = 0;
  820         rs->data.last_delta2 = 0;
  821         rs->data.total = 0;
  822 
  823         /*
  824          * Force network devices to not collect any entropy by
  825          * default.
  826          */
  827         if (type == RND_TYPE_NET)
  828                 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
  829 
  830         rs->data.type = type;
  831         rs->data.flags = flags;
  832 
  833         rs->data.state = rnd_sample_allocate(&rs->data);
  834 
  835         LIST_INSERT_HEAD(&rnd_sources, rs, list);
  836 
  837 #ifdef RND_VERBOSE
  838         printf("rnd: %s attached as an entropy source (", rs->data.name);
  839         if (!(flags & RND_FLAG_NO_COLLECT)) {
  840                 printf("collecting");
  841                 if (flags & RND_FLAG_NO_ESTIMATE)
  842                         printf(" without estimation");
  843         }
  844         else
  845                 printf("off");
  846         printf(")\n");
  847 #endif
  848 
  849         /*
  850          * Again, put some more initial junk in the pool.
  851          * XXX Bogus, but harder to guess than zeros.
  852          */
  853         rndpool_add_data(&rnd_pool, &ts, sizeof(u_int32_t), 1);
  854 }
  855 
  856 /*
  857  * Remove a source from our list of sources.
  858  */
  859 void
  860 rnd_detach_source(rndsource_element_t *rs)
  861 {
  862         rnd_sample_t *sample;
  863         rndsource_t *source;
  864         int s;
  865 
  866         s = splvm();
  867 
  868         LIST_REMOVE(rs, list);
  869 
  870         source = &rs->data;
  871 
  872         if (source->state) {
  873                 rnd_sample_free(source->state);
  874                 source->state = NULL;
  875         }
  876 
  877         /*
  878          * If there are samples queued up "remove" them from the sample queue
  879          * by setting the source to the no-collect pseudosource.
  880          */
  881         sample = SIMPLEQ_FIRST(&rnd_samples);
  882         while (sample != NULL) {
  883                 if (sample->source == source)
  884                         sample->source = &rnd_source_no_collect;
  885 
  886                 sample = SIMPLEQ_NEXT(sample, next);
  887         }
  888 
  889         splx(s);
  890 #ifdef RND_VERBOSE
  891         printf("rnd: %s detached as an entropy source\n", rs->data.name);
  892 #endif
  893 }
  894 
  895 /*
  896  * Add a value to the entropy pool.  If rs is NULL no entropy estimation
  897  * will be performed, otherwise it should point to the source-specific
  898  * source structure.
  899  */
  900 void
  901 rnd_add_uint32(rndsource_element_t *rs, u_int32_t val)
  902 {
  903         rndsource_t *rst;
  904         rnd_sample_t *state;
  905         u_int32_t ts;
  906         int s;
  907 
  908         /*
  909          * If we are not collecting any data at all, just return.
  910          */
  911         if (rs == NULL)
  912                 return;
  913 
  914         rst = &rs->data;
  915 
  916         if (rst->flags & RND_FLAG_NO_COLLECT)
  917                 return;
  918 
  919         /*
  920          * Sample the counter as soon as possible to avoid
  921          * entropy overestimation.
  922          */
  923         ts = rnd_counter();
  924 
  925         /*
  926          * If the sample buffer is NULL, try to allocate one here.  If this
  927          * fails, drop this sample.
  928          */
  929         state = rst->state;
  930         if (state == NULL) {
  931                 state = rnd_sample_allocate_isr(rst);
  932                 if (state == NULL)
  933                         return;
  934                 rst->state = state;
  935         }
  936 
  937         /*
  938          * If we are estimating entropy on this source,
  939          * calculate differentials.
  940          */
  941 
  942         if ((rst->flags & RND_FLAG_NO_ESTIMATE) == 0)
  943                 state->entropy += rnd_estimate_entropy(rst, ts);
  944 
  945         state->ts[state->cursor] = ts;
  946         state->values[state->cursor] = val;
  947         state->cursor++;
  948 
  949         /*
  950          * If the state arrays are not full, we're done.
  951          */
  952         if (state->cursor < RND_SAMPLE_COUNT)
  953                 return;
  954 
  955         /*
  956          * State arrays are full.  Queue this chunk on the processing queue.
  957          */
  958         s = splvm();
  959         SIMPLEQ_INSERT_HEAD(&rnd_samples, state, next);
  960         rst->state = NULL;
  961 
  962         /*
  963          * If the timeout isn't pending, have it run in the near future.
  964          */
  965         if (rnd_timeout_pending == 0) {
  966                 rnd_timeout_pending = 1;
  967                 callout_reset(&rnd_callout, 1, rnd_timeout, NULL);
  968         }
  969         splx(s);
  970 
  971         /*
  972          * To get here we have to have queued the state up, and therefore
  973          * we need a new state buffer.  If we can, allocate one now;
  974          * if we don't get it, it doesn't matter; we'll try again on
  975          * the next random event.
  976          */
  977         rst->state = rnd_sample_allocate_isr(rst);
  978 }
  979 
  980 void
  981 rnd_add_data(rndsource_element_t *rs, void *data, u_int32_t len,
  982     u_int32_t entropy)
  983 {
  984         rndsource_t *rst;
  985 
  986         /* Mix in the random data directly into the pool. */
  987         rndpool_add_data(&rnd_pool, data, len, entropy);
  988 
  989         if (rs != NULL) {
  990                 rst = &rs->data;
  991                 rst->total += entropy;
  992 
  993                 if ((rst->flags & RND_FLAG_NO_ESTIMATE) == 0)
  994                         /* Estimate entropy using timing information */
  995                         rnd_add_uint32(rs, *(u_int8_t *)data);
  996         }
  997 
  998         /* Wake up any potential readers since we've just added some data. */
  999         rnd_wakeup_readers();
 1000 }
 1001 
 1002 /*
 1003  * Timeout, run to process the events in the ring buffer.  Only one of these
 1004  * can possibly be running at a time, run at splsoftclock().
 1005  */
 1006 static void
 1007 rnd_timeout(void *arg)
 1008 {
 1009         rnd_sample_t *sample;
 1010         rndsource_t *source;
 1011         u_int32_t entropy;
 1012         int s;
 1013 
 1014         /*
 1015          * Sample queue is protected at splvm(); go there briefly to dequeue.
 1016          */
 1017         s = splvm();
 1018         rnd_timeout_pending = 0;
 1019 
 1020         sample = SIMPLEQ_FIRST(&rnd_samples);
 1021         while (sample != NULL) {
 1022                 SIMPLEQ_REMOVE_HEAD(&rnd_samples, next);
 1023                 splx(s);
 1024 
 1025                 source = sample->source;
 1026 
 1027                 /*
 1028                  * We repeat this check here, since it is possible the source
 1029                  * was disabled before we were called, but after the entry
 1030                  * was queued.
 1031                  */
 1032                 if ((source->flags & RND_FLAG_NO_COLLECT) == 0) {
 1033                         rndpool_add_data(&rnd_pool, sample->values,
 1034                             RND_SAMPLE_COUNT * 4, 0);
 1035 
 1036                         entropy = sample->entropy;
 1037                         if (source->flags & RND_FLAG_NO_ESTIMATE)
 1038                                 entropy = 0;
 1039 
 1040                         rndpool_add_data(&rnd_pool, sample->ts,
 1041                             RND_SAMPLE_COUNT * 4,
 1042                             entropy);
 1043 
 1044                         source->total += sample->entropy;
 1045                 }
 1046 
 1047                 rnd_sample_free(sample);
 1048 
 1049                 /* Go back to splvm to dequeue the next one.. */
 1050                 s = splvm();
 1051                 sample = SIMPLEQ_FIRST(&rnd_samples);
 1052         }
 1053         splx(s);
 1054 
 1055         /*
 1056          * Wake up any potential readers waiting.
 1057          */
 1058         rnd_wakeup_readers();
 1059 }
 1060 
 1061 u_int32_t
 1062 rnd_extract_data(void *p, u_int32_t len, u_int32_t flags)
 1063 {
 1064         int retval, s;
 1065         u_int32_t c;
 1066 
 1067         s = splsoftclock();
 1068         if (!rnd_have_entropy) {
 1069 #ifdef RND_VERBOSE
 1070                 printf("rnd: WARNING! initial entropy low (%u).\n",
 1071                        rndpool_get_entropy_count(&rnd_pool));
 1072 #endif
 1073                 /* Try once again to put something in the pool */
 1074                 c = rnd_counter();
 1075                 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
 1076         }
 1077         retval = rndpool_extract_data(&rnd_pool, p, len, flags);
 1078         splx(s);
 1079 
 1080         return (retval);
 1081 }

Cache object: 1feb6ca8252db460af0091c8017c14fa


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.