FreeBSD/Linux Kernel Cross Reference
sys/dev/rnd.c
1 /* $NetBSD: rnd.c,v 1.57 2006/11/16 01:32:45 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1997 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Michael Graff <explorer@flame.org>. This code uses ideas and
9 * algorithms from the Linux driver written by Ted Ts'o.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by the NetBSD
22 * Foundation, Inc. and its contributors.
23 * 4. Neither the name of The NetBSD Foundation nor the names of its
24 * contributors may be used to endorse or promote products derived
25 * from this software without specific prior written permission.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40 #include <sys/cdefs.h>
41 __KERNEL_RCSID(0, "$NetBSD: rnd.c,v 1.57 2006/11/16 01:32:45 christos Exp $");
42
43 #include <sys/param.h>
44 #include <sys/ioctl.h>
45 #include <sys/fcntl.h>
46 #include <sys/select.h>
47 #include <sys/poll.h>
48 #include <sys/malloc.h>
49 #include <sys/proc.h>
50 #include <sys/kernel.h>
51 #include <sys/conf.h>
52 #include <sys/systm.h>
53 #include <sys/callout.h>
54 #include <sys/rnd.h>
55 #include <sys/vnode.h>
56 #include <sys/pool.h>
57 #include <sys/kauth.h>
58
59 #ifdef __HAVE_CPU_COUNTER
60 #include <machine/cpu_counter.h>
61 #endif
62
63 #ifdef RND_DEBUG
64 #define DPRINTF(l,x) if (rnd_debug & (l)) printf x
65 int rnd_debug = 0;
66 #else
67 #define DPRINTF(l,x)
68 #endif
69
70 #define RND_DEBUG_WRITE 0x0001
71 #define RND_DEBUG_READ 0x0002
72 #define RND_DEBUG_IOCTL 0x0004
73 #define RND_DEBUG_SNOOZE 0x0008
74
75 /*
76 * list devices attached
77 */
78 #if 0
79 #define RND_VERBOSE
80 #endif
81
82 /*
83 * Use the extraction time as a somewhat-random source
84 */
85 #ifndef RND_USE_EXTRACT_TIME
86 #define RND_USE_EXTRACT_TIME 1
87 #endif
88
89 /*
90 * The size of a temporary buffer, malloc()ed when needed, and used for
91 * reading and writing data.
92 */
93 #define RND_TEMP_BUFFER_SIZE 128
94
95 /*
96 * This is a little bit of state information attached to each device that we
97 * collect entropy from. This is simply a collection buffer, and when it
98 * is full it will be "detached" from the source and added to the entropy
99 * pool after entropy is distilled as much as possible.
100 */
101 #define RND_SAMPLE_COUNT 64 /* collect N samples, then compress */
102 typedef struct _rnd_sample_t {
103 SIMPLEQ_ENTRY(_rnd_sample_t) next;
104 rndsource_t *source;
105 int cursor;
106 int entropy;
107 u_int32_t ts[RND_SAMPLE_COUNT];
108 u_int32_t values[RND_SAMPLE_COUNT];
109 } rnd_sample_t;
110
111 /*
112 * The event queue. Fields are altered at an interrupt level.
113 * All accesses must be protected at splvm().
114 */
115 volatile int rnd_timeout_pending;
116 SIMPLEQ_HEAD(, _rnd_sample_t) rnd_samples;
117
118 /*
119 * our select/poll queue
120 */
121 struct selinfo rnd_selq;
122
123 /*
124 * Set when there are readers blocking on data from us
125 */
126 #define RND_READWAITING 0x00000001
127 volatile u_int32_t rnd_status;
128
129 /*
130 * Memory pool; accessed only at splvm().
131 */
132 POOL_INIT(rnd_mempool, sizeof(rnd_sample_t), 0, 0, 0, "rndsample", NULL);
133
134 /*
135 * Our random pool. This is defined here rather than using the general
136 * purpose one defined in rndpool.c.
137 *
138 * Samples are collected and queued at splvm() into a separate queue
139 * (rnd_samples, see above), and processed in a timeout routine; therefore,
140 * all other accesses to the random pool must be at splsoftclock() as well.
141 */
142 rndpool_t rnd_pool;
143
144 /*
145 * This source is used to easily "remove" queue entries when the source
146 * which actually generated the events is going away.
147 */
148 static rndsource_t rnd_source_no_collect = {
149 { 'N', 'o', 'C', 'o', 'l', 'l', 'e', 'c', 't', 0, 0, 0, 0, 0, 0, 0 },
150 0, 0, 0, 0,
151 RND_TYPE_UNKNOWN,
152 (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE | RND_TYPE_UNKNOWN),
153 NULL
154 };
155
156 struct callout rnd_callout = CALLOUT_INITIALIZER;
157
158 void rndattach(int);
159
160 dev_type_open(rndopen);
161 dev_type_read(rndread);
162 dev_type_write(rndwrite);
163 dev_type_ioctl(rndioctl);
164 dev_type_poll(rndpoll);
165 dev_type_kqfilter(rndkqfilter);
166
167 const struct cdevsw rnd_cdevsw = {
168 rndopen, nullclose, rndread, rndwrite, rndioctl,
169 nostop, notty, rndpoll, nommap, rndkqfilter, D_OTHER,
170 };
171
172 static inline void rnd_wakeup_readers(void);
173 static inline u_int32_t rnd_estimate_entropy(rndsource_t *, u_int32_t);
174 static inline u_int32_t rnd_counter(void);
175 static void rnd_timeout(void *);
176
177 static int rnd_ready = 0;
178 static int rnd_have_entropy = 0;
179
180 LIST_HEAD(, __rndsource_element) rnd_sources;
181
182 /*
183 * Generate a 32-bit counter. This should be more machine dependant,
184 * using cycle counters and the like when possible.
185 */
186 static inline u_int32_t
187 rnd_counter(void)
188 {
189 struct timeval tv;
190
191 #ifdef __HAVE_CPU_COUNTER
192 if (cpu_hascounter())
193 return (cpu_counter32());
194 #endif
195 if (rnd_ready) {
196 microtime(&tv);
197 return (tv.tv_sec * 1000000 + tv.tv_usec);
198 }
199 /* when called from rnd_init, its too early to call microtime safely */
200 return (0);
201 }
202
203 /*
204 * Check to see if there are readers waiting on us. If so, kick them.
205 *
206 * Must be called at splsoftclock().
207 */
208 static inline void
209 rnd_wakeup_readers(void)
210 {
211
212 /*
213 * If we have added new bits, and now have enough to do something,
214 * wake up sleeping readers.
215 */
216 if (rndpool_get_entropy_count(&rnd_pool) > RND_ENTROPY_THRESHOLD * 8) {
217 if (rnd_status & RND_READWAITING) {
218 DPRINTF(RND_DEBUG_SNOOZE,
219 ("waking up pending readers.\n"));
220 rnd_status &= ~RND_READWAITING;
221 wakeup(&rnd_selq);
222 }
223 selnotify(&rnd_selq, 0);
224
225 #ifdef RND_VERBOSE
226 if (!rnd_have_entropy)
227 printf("rnd: have initial entropy (%u)\n",
228 rndpool_get_entropy_count(&rnd_pool));
229 #endif
230 rnd_have_entropy = 1;
231 }
232 }
233
234 /*
235 * Use the timing of the event to estimate the entropy gathered.
236 * If all the differentials (first, second, and third) are non-zero, return
237 * non-zero. If any of these are zero, return zero.
238 */
239 static inline u_int32_t
240 rnd_estimate_entropy(rndsource_t *rs, u_int32_t t)
241 {
242 int32_t delta, delta2, delta3;
243
244 /*
245 * If the time counter has overflowed, calculate the real difference.
246 * If it has not, it is simplier.
247 */
248 if (t < rs->last_time)
249 delta = UINT_MAX - rs->last_time + t;
250 else
251 delta = rs->last_time - t;
252
253 if (delta < 0)
254 delta = -delta;
255
256 /*
257 * Calculate the second and third order differentials
258 */
259 delta2 = rs->last_delta - delta;
260 if (delta2 < 0)
261 delta2 = -delta2;
262
263 delta3 = rs->last_delta2 - delta2;
264 if (delta3 < 0)
265 delta3 = -delta3;
266
267 rs->last_time = t;
268 rs->last_delta = delta;
269 rs->last_delta2 = delta2;
270
271 /*
272 * If any delta is 0, we got no entropy. If all are non-zero, we
273 * might have something.
274 */
275 if (delta == 0 || delta2 == 0 || delta3 == 0)
276 return (0);
277
278 return (1);
279 }
280
281 /*
282 * "Attach" the random device. This is an (almost) empty stub, since
283 * pseudo-devices don't get attached until after config, after the
284 * entropy sources will attach. We just use the timing of this event
285 * as another potential source of initial entropy.
286 */
287 void
288 rndattach(int num)
289 {
290 u_int32_t c;
291
292 /* Trap unwary players who don't call rnd_init() early */
293 KASSERT(rnd_ready);
294
295 /* mix in another counter */
296 c = rnd_counter();
297 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
298 }
299
300 /*
301 * initialize the global random pool for our use.
302 * rnd_init() must be called very early on in the boot process, so
303 * the pool is ready for other devices to attach as sources.
304 */
305 void
306 rnd_init(void)
307 {
308 u_int32_t c;
309
310 if (rnd_ready)
311 return;
312
313 /*
314 * take a counter early, hoping that there's some variance in
315 * the following operations
316 */
317 c = rnd_counter();
318
319 LIST_INIT(&rnd_sources);
320 SIMPLEQ_INIT(&rnd_samples);
321
322 rndpool_init(&rnd_pool);
323
324 /* Mix *something*, *anything* into the pool to help it get started.
325 * However, it's not safe for rnd_counter() to call microtime() yet,
326 * so on some platforms we might just end up with zeros anyway.
327 * XXX more things to add would be nice.
328 */
329 if (c) {
330 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
331 c = rnd_counter();
332 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
333 }
334
335 rnd_ready = 1;
336
337 #ifdef RND_VERBOSE
338 printf("rnd: initialised (%u)%s", RND_POOLBITS,
339 c ? " with counter\n" : "\n");
340 #endif
341 }
342
343 int
344 rndopen(dev_t dev, int flags, int ifmt,
345 struct lwp *l)
346 {
347
348 if (rnd_ready == 0)
349 return (ENXIO);
350
351 if (minor(dev) == RND_DEV_URANDOM || minor(dev) == RND_DEV_RANDOM)
352 return (0);
353
354 return (ENXIO);
355 }
356
357 int
358 rndread(dev_t dev, struct uio *uio, int ioflag)
359 {
360 u_int8_t *bf;
361 u_int32_t entcnt, mode, n, nread;
362 int ret, s;
363
364 DPRINTF(RND_DEBUG_READ,
365 ("Random: Read of %d requested, flags 0x%08x\n",
366 uio->uio_resid, ioflag));
367
368 if (uio->uio_resid == 0)
369 return (0);
370
371 switch (minor(dev)) {
372 case RND_DEV_RANDOM:
373 mode = RND_EXTRACT_GOOD;
374 break;
375 case RND_DEV_URANDOM:
376 mode = RND_EXTRACT_ANY;
377 break;
378 default:
379 /* Can't happen, but this is cheap */
380 return (ENXIO);
381 }
382
383 ret = 0;
384
385 bf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
386
387 while (uio->uio_resid > 0) {
388 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
389
390 /*
391 * Make certain there is data available. If there
392 * is, do the I/O even if it is partial. If not,
393 * sleep unless the user has requested non-blocking
394 * I/O.
395 */
396 for (;;) {
397 /*
398 * If not requesting strong randomness, we
399 * can always read.
400 */
401 if (mode == RND_EXTRACT_ANY)
402 break;
403
404 /*
405 * How much entropy do we have? If it is enough for
406 * one hash, we can read.
407 */
408 s = splsoftclock();
409 entcnt = rndpool_get_entropy_count(&rnd_pool);
410 splx(s);
411 if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
412 break;
413
414 /*
415 * Data is not available.
416 */
417 if (ioflag & IO_NDELAY) {
418 ret = EWOULDBLOCK;
419 goto out;
420 }
421
422 rnd_status |= RND_READWAITING;
423 ret = tsleep(&rnd_selq, PRIBIO|PCATCH,
424 "rndread", 0);
425
426 if (ret)
427 goto out;
428 }
429
430 nread = rnd_extract_data(bf, n, mode);
431
432 /*
433 * Copy (possibly partial) data to the user.
434 * If an error occurs, or this is a partial
435 * read, bail out.
436 */
437 ret = uiomove((caddr_t)bf, nread, uio);
438 if (ret != 0 || nread != n)
439 goto out;
440 }
441
442 out:
443 free(bf, M_TEMP);
444 return (ret);
445 }
446
447 int
448 rndwrite(dev_t dev, struct uio *uio, int ioflag)
449 {
450 u_int8_t *bf;
451 int n, ret, s;
452
453 DPRINTF(RND_DEBUG_WRITE,
454 ("Random: Write of %d requested\n", uio->uio_resid));
455
456 if (uio->uio_resid == 0)
457 return (0);
458
459 ret = 0;
460
461 bf = malloc(RND_TEMP_BUFFER_SIZE, M_TEMP, M_WAITOK);
462
463 while (uio->uio_resid > 0) {
464 n = min(RND_TEMP_BUFFER_SIZE, uio->uio_resid);
465
466 ret = uiomove((caddr_t)bf, n, uio);
467 if (ret != 0)
468 break;
469
470 /*
471 * Mix in the bytes.
472 */
473 s = splsoftclock();
474 rndpool_add_data(&rnd_pool, bf, n, 0);
475 splx(s);
476
477 DPRINTF(RND_DEBUG_WRITE, ("Random: Copied in %d bytes\n", n));
478 }
479
480 free(bf, M_TEMP);
481 return (ret);
482 }
483
484 int
485 rndioctl(dev_t dev, u_long cmd, caddr_t addr, int flag,
486 struct lwp *l)
487 {
488 rndsource_element_t *rse;
489 rndstat_t *rst;
490 rndstat_name_t *rstnm;
491 rndctl_t *rctl;
492 rnddata_t *rnddata;
493 u_int32_t count, start;
494 int ret, s;
495
496 ret = 0;
497
498 switch (cmd) {
499
500 /*
501 * Handled in upper layer really, but we have to return zero
502 * for it to be accepted by the upper layer.
503 */
504 case FIONBIO:
505 case FIOASYNC:
506 break;
507
508 case RNDGETENTCNT:
509 s = splsoftclock();
510 *(u_int32_t *)addr = rndpool_get_entropy_count(&rnd_pool);
511 splx(s);
512 break;
513
514 case RNDGETPOOLSTAT:
515 if ((ret = kauth_authorize_generic(l->l_cred,
516 KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
517 return (ret);
518
519 s = splsoftclock();
520 rndpool_get_stats(&rnd_pool, addr, sizeof(rndpoolstat_t));
521 splx(s);
522 break;
523
524 case RNDGETSRCNUM:
525 if ((ret = kauth_authorize_generic(l->l_cred,
526 KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
527 return (ret);
528
529 rst = (rndstat_t *)addr;
530
531 if (rst->count == 0)
532 break;
533
534 if (rst->count > RND_MAXSTATCOUNT)
535 return (EINVAL);
536
537 /*
538 * Find the starting source by running through the
539 * list of sources.
540 */
541 rse = rnd_sources.lh_first;
542 start = rst->start;
543 while (rse != NULL && start >= 1) {
544 rse = rse->list.le_next;
545 start--;
546 }
547
548 /*
549 * Return up to as many structures as the user asked
550 * for. If we run out of sources, a count of zero
551 * will be returned, without an error.
552 */
553 for (count = 0; count < rst->count && rse != NULL; count++) {
554 memcpy(&rst->source[count], &rse->data,
555 sizeof(rndsource_t));
556 /* Zero out information which may leak */
557 rst->source[count].last_time = 0;
558 rst->source[count].last_delta = 0;
559 rst->source[count].last_delta2 = 0;
560 rst->source[count].state = 0;
561 rse = rse->list.le_next;
562 }
563
564 rst->count = count;
565
566 break;
567
568 case RNDGETSRCNAME:
569 if ((ret = kauth_authorize_generic(l->l_cred,
570 KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
571 return (ret);
572
573 /*
574 * Scan through the list, trying to find the name.
575 */
576 rstnm = (rndstat_name_t *)addr;
577 rse = rnd_sources.lh_first;
578 while (rse != NULL) {
579 if (strncmp(rse->data.name, rstnm->name, 16) == 0) {
580 memcpy(&rstnm->source, &rse->data,
581 sizeof(rndsource_t));
582
583 return (0);
584 }
585 rse = rse->list.le_next;
586 }
587
588 ret = ENOENT; /* name not found */
589
590 break;
591
592 case RNDCTL:
593 if ((ret = kauth_authorize_generic(l->l_cred,
594 KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
595 return (ret);
596
597 /*
598 * Set flags to enable/disable entropy counting and/or
599 * collection.
600 */
601 rctl = (rndctl_t *)addr;
602 rse = rnd_sources.lh_first;
603
604 /*
605 * Flags set apply to all sources of this type.
606 */
607 if (rctl->type != 0xff) {
608 while (rse != NULL) {
609 if (rse->data.type == rctl->type) {
610 rse->data.flags &= ~rctl->mask;
611 rse->data.flags |=
612 (rctl->flags & rctl->mask);
613 }
614 rse = rse->list.le_next;
615 }
616
617 return (0);
618 }
619
620 /*
621 * scan through the list, trying to find the name
622 */
623 while (rse != NULL) {
624 if (strncmp(rse->data.name, rctl->name, 16) == 0) {
625 rse->data.flags &= ~rctl->mask;
626 rse->data.flags |= (rctl->flags & rctl->mask);
627
628 return (0);
629 }
630 rse = rse->list.le_next;
631 }
632
633 ret = ENOENT; /* name not found */
634
635 break;
636
637 case RNDADDDATA:
638 if ((ret = kauth_authorize_generic(l->l_cred,
639 KAUTH_GENERIC_ISSUSER, &l->l_acflag)) != 0)
640 return (ret);
641
642 rnddata = (rnddata_t *)addr;
643
644 s = splsoftclock();
645 rndpool_add_data(&rnd_pool, rnddata->data, rnddata->len,
646 rnddata->entropy);
647
648 rnd_wakeup_readers();
649 splx(s);
650
651 break;
652
653 default:
654 return (EINVAL);
655 }
656
657 return (ret);
658 }
659
660 int
661 rndpoll(dev_t dev, int events, struct lwp *l)
662 {
663 u_int32_t entcnt;
664 int revents, s;
665
666 /*
667 * We are always writable.
668 */
669 revents = events & (POLLOUT | POLLWRNORM);
670
671 /*
672 * Save some work if not checking for reads.
673 */
674 if ((events & (POLLIN | POLLRDNORM)) == 0)
675 return (revents);
676
677 /*
678 * If the minor device is not /dev/random, we are always readable.
679 */
680 if (minor(dev) != RND_DEV_RANDOM) {
681 revents |= events & (POLLIN | POLLRDNORM);
682 return (revents);
683 }
684
685 /*
686 * Make certain we have enough entropy to be readable.
687 */
688 s = splsoftclock();
689 entcnt = rndpool_get_entropy_count(&rnd_pool);
690 splx(s);
691
692 if (entcnt >= RND_ENTROPY_THRESHOLD * 8)
693 revents |= events & (POLLIN | POLLRDNORM);
694 else
695 selrecord(l, &rnd_selq);
696
697 return (revents);
698 }
699
700 static void
701 filt_rnddetach(struct knote *kn)
702 {
703 int s;
704
705 s = splsoftclock();
706 SLIST_REMOVE(&rnd_selq.sel_klist, kn, knote, kn_selnext);
707 splx(s);
708 }
709
710 static int
711 filt_rndread(struct knote *kn, long hint)
712 {
713 uint32_t entcnt;
714
715 entcnt = rndpool_get_entropy_count(&rnd_pool);
716 if (entcnt >= RND_ENTROPY_THRESHOLD * 8) {
717 kn->kn_data = RND_TEMP_BUFFER_SIZE;
718 return (1);
719 }
720 return (0);
721 }
722
723 static const struct filterops rnd_seltrue_filtops =
724 { 1, NULL, filt_rnddetach, filt_seltrue };
725
726 static const struct filterops rndread_filtops =
727 { 1, NULL, filt_rnddetach, filt_rndread };
728
729 int
730 rndkqfilter(dev_t dev, struct knote *kn)
731 {
732 struct klist *klist;
733 int s;
734
735 switch (kn->kn_filter) {
736 case EVFILT_READ:
737 klist = &rnd_selq.sel_klist;
738 if (minor(dev) == RND_DEV_URANDOM)
739 kn->kn_fop = &rnd_seltrue_filtops;
740 else
741 kn->kn_fop = &rndread_filtops;
742 break;
743
744 case EVFILT_WRITE:
745 klist = &rnd_selq.sel_klist;
746 kn->kn_fop = &rnd_seltrue_filtops;
747 break;
748
749 default:
750 return (1);
751 }
752
753 kn->kn_hook = NULL;
754
755 s = splsoftclock();
756 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
757 splx(s);
758
759 return (0);
760 }
761
762 static rnd_sample_t *
763 rnd_sample_allocate(rndsource_t *source)
764 {
765 rnd_sample_t *c;
766 int s;
767
768 s = splvm();
769 c = pool_get(&rnd_mempool, PR_WAITOK);
770 splx(s);
771 if (c == NULL)
772 return (NULL);
773
774 c->source = source;
775 c->cursor = 0;
776 c->entropy = 0;
777
778 return (c);
779 }
780
781 /*
782 * Don't wait on allocation. To be used in an interrupt context.
783 */
784 static rnd_sample_t *
785 rnd_sample_allocate_isr(rndsource_t *source)
786 {
787 rnd_sample_t *c;
788 int s;
789
790 s = splvm();
791 c = pool_get(&rnd_mempool, 0);
792 splx(s);
793 if (c == NULL)
794 return (NULL);
795
796 c->source = source;
797 c->cursor = 0;
798 c->entropy = 0;
799
800 return (c);
801 }
802
803 static void
804 rnd_sample_free(rnd_sample_t *c)
805 {
806 int s;
807
808 memset(c, 0, sizeof(rnd_sample_t));
809 s = splvm();
810 pool_put(&rnd_mempool, c);
811 splx(s);
812 }
813
814 /*
815 * Add a source to our list of sources.
816 */
817 void
818 rnd_attach_source(rndsource_element_t *rs, const char *name, u_int32_t type,
819 u_int32_t flags)
820 {
821 u_int32_t ts;
822
823 ts = rnd_counter();
824
825 strlcpy(rs->data.name, name, sizeof(rs->data.name));
826 rs->data.last_time = ts;
827 rs->data.last_delta = 0;
828 rs->data.last_delta2 = 0;
829 rs->data.total = 0;
830
831 /*
832 * Force network devices to not collect any entropy by
833 * default.
834 */
835 if (type == RND_TYPE_NET)
836 flags |= (RND_FLAG_NO_COLLECT | RND_FLAG_NO_ESTIMATE);
837
838 rs->data.type = type;
839 rs->data.flags = flags;
840
841 rs->data.state = rnd_sample_allocate(&rs->data);
842
843 LIST_INSERT_HEAD(&rnd_sources, rs, list);
844
845 #ifdef RND_VERBOSE
846 printf("rnd: %s attached as an entropy source (", rs->data.name);
847 if (!(flags & RND_FLAG_NO_COLLECT)) {
848 printf("collecting");
849 if (flags & RND_FLAG_NO_ESTIMATE)
850 printf(" without estimation");
851 }
852 else
853 printf("off");
854 printf(")\n");
855 #endif
856
857 /*
858 * Again, put some more initial junk in the pool.
859 * XXX Bogus, but harder to guess than zeros.
860 */
861 rndpool_add_data(&rnd_pool, &ts, sizeof(u_int32_t), 1);
862 }
863
864 /*
865 * Remove a source from our list of sources.
866 */
867 void
868 rnd_detach_source(rndsource_element_t *rs)
869 {
870 rnd_sample_t *sample;
871 rndsource_t *source;
872 int s;
873
874 s = splvm();
875
876 LIST_REMOVE(rs, list);
877
878 source = &rs->data;
879
880 if (source->state) {
881 rnd_sample_free(source->state);
882 source->state = NULL;
883 }
884
885 /*
886 * If there are samples queued up "remove" them from the sample queue
887 * by setting the source to the no-collect pseudosource.
888 */
889 sample = SIMPLEQ_FIRST(&rnd_samples);
890 while (sample != NULL) {
891 if (sample->source == source)
892 sample->source = &rnd_source_no_collect;
893
894 sample = SIMPLEQ_NEXT(sample, next);
895 }
896
897 splx(s);
898 #ifdef RND_VERBOSE
899 printf("rnd: %s detached as an entropy source\n", rs->data.name);
900 #endif
901 }
902
903 /*
904 * Add a value to the entropy pool. The rs parameter should point to the
905 * source-specific source structure.
906 */
907 void
908 rnd_add_uint32(rndsource_element_t *rs, u_int32_t val)
909 {
910 rndsource_t *rst;
911 rnd_sample_t *state;
912 u_int32_t ts;
913 int s;
914
915 rst = &rs->data;
916
917 if (rst->flags & RND_FLAG_NO_COLLECT)
918 return;
919
920 /*
921 * Sample the counter as soon as possible to avoid
922 * entropy overestimation.
923 */
924 ts = rnd_counter();
925
926 /*
927 * If the sample buffer is NULL, try to allocate one here. If this
928 * fails, drop this sample.
929 */
930 state = rst->state;
931 if (state == NULL) {
932 state = rnd_sample_allocate_isr(rst);
933 if (state == NULL)
934 return;
935 rst->state = state;
936 }
937
938 /*
939 * If we are estimating entropy on this source,
940 * calculate differentials.
941 */
942
943 if ((rst->flags & RND_FLAG_NO_ESTIMATE) == 0)
944 state->entropy += rnd_estimate_entropy(rst, ts);
945
946 state->ts[state->cursor] = ts;
947 state->values[state->cursor] = val;
948 state->cursor++;
949
950 /*
951 * If the state arrays are not full, we're done.
952 */
953 if (state->cursor < RND_SAMPLE_COUNT)
954 return;
955
956 /*
957 * State arrays are full. Queue this chunk on the processing queue.
958 */
959 s = splvm();
960 SIMPLEQ_INSERT_HEAD(&rnd_samples, state, next);
961 rst->state = NULL;
962
963 /*
964 * If the timeout isn't pending, have it run in the near future.
965 */
966 if (rnd_timeout_pending == 0) {
967 rnd_timeout_pending = 1;
968 callout_reset(&rnd_callout, 1, rnd_timeout, NULL);
969 }
970 splx(s);
971
972 /*
973 * To get here we have to have queued the state up, and therefore
974 * we need a new state buffer. If we can, allocate one now;
975 * if we don't get it, it doesn't matter; we'll try again on
976 * the next random event.
977 */
978 rst->state = rnd_sample_allocate_isr(rst);
979 }
980
981 void
982 rnd_add_data(rndsource_element_t *rs, void *data, u_int32_t len,
983 u_int32_t entropy)
984 {
985 rndsource_t *rst;
986
987 /* Mix in the random data directly into the pool. */
988 rndpool_add_data(&rnd_pool, data, len, entropy);
989
990 if (rs != NULL) {
991 rst = &rs->data;
992 rst->total += entropy;
993
994 if ((rst->flags & RND_FLAG_NO_ESTIMATE) == 0)
995 /* Estimate entropy using timing information */
996 rnd_add_uint32(rs, *(u_int8_t *)data);
997 }
998
999 /* Wake up any potential readers since we've just added some data. */
1000 rnd_wakeup_readers();
1001 }
1002
1003 /*
1004 * Timeout, run to process the events in the ring buffer. Only one of these
1005 * can possibly be running at a time, run at splsoftclock().
1006 */
1007 static void
1008 rnd_timeout(void *arg)
1009 {
1010 rnd_sample_t *sample;
1011 rndsource_t *source;
1012 u_int32_t entropy;
1013 int s;
1014
1015 /*
1016 * Sample queue is protected at splvm(); go there briefly to dequeue.
1017 */
1018 s = splvm();
1019 rnd_timeout_pending = 0;
1020
1021 sample = SIMPLEQ_FIRST(&rnd_samples);
1022 while (sample != NULL) {
1023 SIMPLEQ_REMOVE_HEAD(&rnd_samples, next);
1024 splx(s);
1025
1026 source = sample->source;
1027
1028 /*
1029 * We repeat this check here, since it is possible the source
1030 * was disabled before we were called, but after the entry
1031 * was queued.
1032 */
1033 if ((source->flags & RND_FLAG_NO_COLLECT) == 0) {
1034 rndpool_add_data(&rnd_pool, sample->values,
1035 RND_SAMPLE_COUNT * 4, 0);
1036
1037 entropy = sample->entropy;
1038 if (source->flags & RND_FLAG_NO_ESTIMATE)
1039 entropy = 0;
1040
1041 rndpool_add_data(&rnd_pool, sample->ts,
1042 RND_SAMPLE_COUNT * 4,
1043 entropy);
1044
1045 source->total += sample->entropy;
1046 }
1047
1048 rnd_sample_free(sample);
1049
1050 /* Go back to splvm to dequeue the next one.. */
1051 s = splvm();
1052 sample = SIMPLEQ_FIRST(&rnd_samples);
1053 }
1054 splx(s);
1055
1056 /*
1057 * Wake up any potential readers waiting.
1058 */
1059 rnd_wakeup_readers();
1060 }
1061
1062 u_int32_t
1063 rnd_extract_data(void *p, u_int32_t len, u_int32_t flags)
1064 {
1065 int retval, s;
1066 u_int32_t c;
1067
1068 s = splsoftclock();
1069 if (!rnd_have_entropy) {
1070 #ifdef RND_VERBOSE
1071 printf("rnd: WARNING! initial entropy low (%u).\n",
1072 rndpool_get_entropy_count(&rnd_pool));
1073 #endif
1074 /* Try once again to put something in the pool */
1075 c = rnd_counter();
1076 rndpool_add_data(&rnd_pool, &c, sizeof(u_int32_t), 1);
1077 }
1078 retval = rndpool_extract_data(&rnd_pool, p, len, flags);
1079 splx(s);
1080
1081 return (retval);
1082 }
Cache object: e996842f8b55dfc09c5819003e9f0c94
|