1 /* $OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $ */
2 /*-
3 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
4 *
5 * This code was written by Angelos D. Keromytis in Athens, Greece, in
6 * February 2000. Network Security Technologies Inc. (NSTI) kindly
7 * supported the development of this code.
8 *
9 * Copyright (c) 2000, 2001 Angelos D. Keromytis
10 *
11 * Permission to use, copy, and modify this software with or without fee
12 * is hereby granted, provided that this entire notice is included in
13 * all source code copies of any software which is or includes a copy or
14 * modification of this software.
15 *
16 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
17 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
18 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
19 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
20 * PURPOSE.
21 */
22
23 #include <sys/cdefs.h>
24 __FBSDID("$FreeBSD: releng/6.2/sys/opencrypto/crypto.c 161998 2006-09-04 15:16:14Z pjd $");
25
26 #define CRYPTO_TIMING /* enable timing support */
27
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/eventhandler.h>
31 #include <sys/kernel.h>
32 #include <sys/kthread.h>
33 #include <sys/lock.h>
34 #include <sys/module.h>
35 #include <sys/mutex.h>
36 #include <sys/malloc.h>
37 #include <sys/proc.h>
38 #include <sys/sysctl.h>
39
40 #include <vm/uma.h>
41 #include <opencrypto/cryptodev.h>
42 #include <opencrypto/xform.h> /* XXX for M_XDATA */
43
44 /*
45 * Crypto drivers register themselves by allocating a slot in the
46 * crypto_drivers table with crypto_get_driverid() and then registering
47 * each algorithm they support with crypto_register() and crypto_kregister().
48 */
49 static struct mtx crypto_drivers_mtx; /* lock on driver table */
50 #define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx)
51 #define CRYPTO_DRIVER_UNLOCK() mtx_unlock(&crypto_drivers_mtx)
52 static struct cryptocap *crypto_drivers = NULL;
53 static int crypto_drivers_num = 0;
54
55 /*
56 * There are two queues for crypto requests; one for symmetric (e.g.
57 * cipher) operations and one for asymmetric (e.g. MOD)operations.
58 * A single mutex is used to lock access to both queues. We could
59 * have one per-queue but having one simplifies handling of block/unblock
60 * operations.
61 */
62 static int crp_sleep = 0;
63 static TAILQ_HEAD(,cryptop) crp_q; /* request queues */
64 static TAILQ_HEAD(,cryptkop) crp_kq;
65 static struct mtx crypto_q_mtx;
66 #define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx)
67 #define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx)
68
69 /*
70 * There are two queues for processing completed crypto requests; one
71 * for the symmetric and one for the asymmetric ops. We only need one
72 * but have two to avoid type futzing (cryptop vs. cryptkop). A single
73 * mutex is used to lock access to both queues. Note that this lock
74 * must be separate from the lock on request queues to insure driver
75 * callbacks don't generate lock order reversals.
76 */
77 static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */
78 static TAILQ_HEAD(,cryptkop) crp_ret_kq;
79 static struct mtx crypto_ret_q_mtx;
80 #define CRYPTO_RETQ_LOCK() mtx_lock(&crypto_ret_q_mtx)
81 #define CRYPTO_RETQ_UNLOCK() mtx_unlock(&crypto_ret_q_mtx)
82 #define CRYPTO_RETQ_EMPTY() (TAILQ_EMPTY(&crp_ret_q) && TAILQ_EMPTY(&crp_ret_kq))
83
84 static uma_zone_t cryptop_zone;
85 static uma_zone_t cryptodesc_zone;
86
87 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
88 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
89 &crypto_userasymcrypto, 0,
90 "Enable/disable user-mode access to asymmetric crypto support");
91 int crypto_devallowsoft = 0; /* only use hardware crypto for asym */
92 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
93 &crypto_devallowsoft, 0,
94 "Enable/disable use of software asym crypto support");
95
96 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
97
98 static void crypto_proc(void);
99 static struct proc *cryptoproc;
100 static void crypto_ret_proc(void);
101 static struct proc *cryptoretproc;
102 static void crypto_destroy(void);
103 static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
104 static int crypto_kinvoke(struct cryptkop *krp);
105
106 static struct cryptostats cryptostats;
107 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
108 cryptostats, "Crypto system statistics");
109
110 #ifdef CRYPTO_TIMING
111 static int crypto_timing = 0;
112 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
113 &crypto_timing, 0, "Enable/disable crypto timing support");
114 #endif
115
116 static int
117 crypto_init(void)
118 {
119 int error;
120
121 mtx_init(&crypto_drivers_mtx, "crypto", "crypto driver table",
122 MTX_DEF|MTX_QUIET);
123
124 TAILQ_INIT(&crp_q);
125 TAILQ_INIT(&crp_kq);
126 mtx_init(&crypto_q_mtx, "crypto", "crypto op queues", MTX_DEF);
127
128 TAILQ_INIT(&crp_ret_q);
129 TAILQ_INIT(&crp_ret_kq);
130 mtx_init(&crypto_ret_q_mtx, "crypto", "crypto return queues", MTX_DEF);
131
132 cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
133 0, 0, 0, 0,
134 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
135 cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
136 0, 0, 0, 0,
137 UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
138 if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
139 printf("crypto_init: cannot setup crypto zones\n");
140 error = ENOMEM;
141 goto bad;
142 }
143
144 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
145 crypto_drivers = malloc(crypto_drivers_num *
146 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
147 if (crypto_drivers == NULL) {
148 printf("crypto_init: cannot setup crypto drivers\n");
149 error = ENOMEM;
150 goto bad;
151 }
152
153 error = kthread_create((void (*)(void *)) crypto_proc, NULL,
154 &cryptoproc, 0, 0, "crypto");
155 if (error) {
156 printf("crypto_init: cannot start crypto thread; error %d",
157 error);
158 goto bad;
159 }
160
161 error = kthread_create((void (*)(void *)) crypto_ret_proc, NULL,
162 &cryptoretproc, 0, 0, "crypto returns");
163 if (error) {
164 printf("crypto_init: cannot start cryptoret thread; error %d",
165 error);
166 goto bad;
167 }
168 return 0;
169 bad:
170 crypto_destroy();
171 return error;
172 }
173
174 /*
175 * Signal a crypto thread to terminate. We use the driver
176 * table lock to synchronize the sleep/wakeups so that we
177 * are sure the threads have terminated before we release
178 * the data structures they use. See crypto_finis below
179 * for the other half of this song-and-dance.
180 */
181 static void
182 crypto_terminate(struct proc **pp, void *q)
183 {
184 struct proc *p;
185
186 mtx_assert(&crypto_drivers_mtx, MA_OWNED);
187 p = *pp;
188 *pp = NULL;
189 if (p) {
190 wakeup_one(q);
191 PROC_LOCK(p); /* NB: insure we don't miss wakeup */
192 CRYPTO_DRIVER_UNLOCK(); /* let crypto_finis progress */
193 msleep(p, &p->p_mtx, PWAIT, "crypto_destroy", 0);
194 PROC_UNLOCK(p);
195 CRYPTO_DRIVER_LOCK();
196 }
197 }
198
199 static void
200 crypto_destroy(void)
201 {
202 /*
203 * Terminate any crypto threads.
204 */
205 CRYPTO_DRIVER_LOCK();
206 crypto_terminate(&cryptoproc, &crp_q);
207 crypto_terminate(&cryptoretproc, &crp_ret_q);
208 CRYPTO_DRIVER_UNLOCK();
209
210 /* XXX flush queues??? */
211
212 /*
213 * Reclaim dynamically allocated resources.
214 */
215 if (crypto_drivers != NULL)
216 free(crypto_drivers, M_CRYPTO_DATA);
217
218 if (cryptodesc_zone != NULL)
219 uma_zdestroy(cryptodesc_zone);
220 if (cryptop_zone != NULL)
221 uma_zdestroy(cryptop_zone);
222 mtx_destroy(&crypto_q_mtx);
223 mtx_destroy(&crypto_ret_q_mtx);
224 mtx_destroy(&crypto_drivers_mtx);
225 }
226
227 /*
228 * Initialization code, both for static and dynamic loading.
229 */
230 static int
231 crypto_modevent(module_t mod, int type, void *unused)
232 {
233 int error = EINVAL;
234
235 switch (type) {
236 case MOD_LOAD:
237 error = crypto_init();
238 if (error == 0 && bootverbose)
239 printf("crypto: <crypto core>\n");
240 break;
241 case MOD_UNLOAD:
242 /*XXX disallow if active sessions */
243 error = 0;
244 crypto_destroy();
245 return 0;
246 }
247 return error;
248 }
249
250 static moduledata_t crypto_mod = {
251 "crypto",
252 crypto_modevent,
253 0
254 };
255 MODULE_VERSION(crypto, 1);
256 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
257 MODULE_DEPEND(crypto, zlib, 1, 1, 1);
258
259 /*
260 * Create a new session.
261 */
262 int
263 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
264 {
265 struct cryptocap *cap = NULL;
266 struct cryptoini *cr;
267 u_int32_t hid = 0, lid;
268 int err = EINVAL;
269
270 CRYPTO_DRIVER_LOCK();
271
272 if (crypto_drivers == NULL)
273 goto done;
274
275 /*
276 * The algorithm we use here is pretty stupid; just use the
277 * first driver that supports all the algorithms we need.
278 *
279 * XXX We need more smarts here (in real life too, but that's
280 * XXX another story altogether).
281 */
282
283 /*
284 * First try to find hardware crypto.
285 */
286 if (hard >= 0) {
287 for (hid = 0; hid < crypto_drivers_num; hid++) {
288 cap = &crypto_drivers[hid];
289 /*
290 * If it's not initialized or has remaining sessions
291 * referencing it, skip.
292 */
293 if (cap->cc_newsession == NULL ||
294 (cap->cc_flags & CRYPTOCAP_F_CLEANUP))
295 continue;
296
297 /* Hardware required -- ignore software drivers. */
298 if (cap->cc_flags & CRYPTOCAP_F_SOFTWARE)
299 continue;
300
301 /* See if all the algorithms are supported. */
302 for (cr = cri; cr; cr = cr->cri_next)
303 if (cap->cc_alg[cr->cri_alg] == 0)
304 break;
305 if (cr == NULL) {
306 /* Ok, all algorithms are supported. */
307 break;
308 }
309 }
310 if (hid == crypto_drivers_num)
311 cap = NULL;
312 }
313 /*
314 * If no hardware crypto, look for software crypto.
315 */
316 if (cap == NULL && hard <= 0) {
317 for (hid = 0; hid < crypto_drivers_num; hid++) {
318 cap = &crypto_drivers[hid];
319 /*
320 * If it's not initialized or has remaining sessions
321 * referencing it, skip.
322 */
323 if (cap->cc_newsession == NULL ||
324 (cap->cc_flags & CRYPTOCAP_F_CLEANUP))
325 continue;
326
327 /* Software required -- ignore hardware drivers. */
328 if (!(cap->cc_flags & CRYPTOCAP_F_SOFTWARE))
329 continue;
330
331 /* See if all the algorithms are supported. */
332 for (cr = cri; cr; cr = cr->cri_next)
333 if (cap->cc_alg[cr->cri_alg] == 0)
334 break;
335 if (cr == NULL) {
336 /* Ok, all algorithms are supported. */
337 break;
338 }
339 }
340 if (hid == crypto_drivers_num)
341 cap = NULL;
342 }
343
344 if (cap != NULL) {
345 /*
346 * Can't do everything in one session.
347 *
348 * XXX Fix this. We need to inject a "virtual" session layer right
349 * XXX about here.
350 */
351
352 /* Call the driver initialization routine. */
353 lid = hid; /* Pass the driver ID. */
354 err = (*cap->cc_newsession)(cap->cc_arg, &lid, cri);
355 if (err == 0) {
356 /* XXX assert (hid &~ 0xffffff) == 0 */
357 /* XXX assert (cap->cc_flags &~ 0xff) == 0 */
358 (*sid) = ((cap->cc_flags & 0xff) << 24) | hid;
359 (*sid) <<= 32;
360 (*sid) |= (lid & 0xffffffff);
361 cap->cc_sessions++;
362 }
363 }
364 done:
365 CRYPTO_DRIVER_UNLOCK();
366 return err;
367 }
368
369 static void
370 crypto_remove(struct cryptocap *cap)
371 {
372
373 mtx_assert(&crypto_drivers_mtx, MA_OWNED);
374 if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
375 bzero(cap, sizeof(*cap));
376 }
377
378 /*
379 * Delete an existing session (or a reserved session on an unregistered
380 * driver).
381 */
382 int
383 crypto_freesession(u_int64_t sid)
384 {
385 struct cryptocap *cap;
386 u_int32_t hid;
387 int err;
388
389 CRYPTO_DRIVER_LOCK();
390
391 if (crypto_drivers == NULL) {
392 err = EINVAL;
393 goto done;
394 }
395
396 /* Determine two IDs. */
397 hid = CRYPTO_SESID2HID(sid);
398
399 if (hid >= crypto_drivers_num) {
400 err = ENOENT;
401 goto done;
402 }
403 cap = &crypto_drivers[hid];
404
405 if (cap->cc_sessions)
406 cap->cc_sessions--;
407
408 /* Call the driver cleanup routine, if available. */
409 if (cap->cc_freesession)
410 err = cap->cc_freesession(cap->cc_arg, sid);
411 else
412 err = 0;
413
414 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
415 crypto_remove(cap);
416
417 done:
418 CRYPTO_DRIVER_UNLOCK();
419 return err;
420 }
421
422 /*
423 * Return an unused driver id. Used by drivers prior to registering
424 * support for the algorithms they handle.
425 */
426 int32_t
427 crypto_get_driverid(u_int32_t flags)
428 {
429 struct cryptocap *newdrv;
430 int i;
431
432 CRYPTO_DRIVER_LOCK();
433
434 for (i = 0; i < crypto_drivers_num; i++) {
435 if (crypto_drivers[i].cc_process == NULL &&
436 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
437 break;
438 }
439 }
440
441 /* Out of entries, allocate some more. */
442 if (i == crypto_drivers_num) {
443 /* Be careful about wrap-around. */
444 if (2 * crypto_drivers_num <= crypto_drivers_num) {
445 CRYPTO_DRIVER_UNLOCK();
446 printf("crypto: driver count wraparound!\n");
447 return -1;
448 }
449
450 newdrv = malloc(2 * crypto_drivers_num *
451 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
452 if (newdrv == NULL) {
453 CRYPTO_DRIVER_UNLOCK();
454 printf("crypto: no space to expand driver table!\n");
455 return -1;
456 }
457
458 bcopy(crypto_drivers, newdrv,
459 crypto_drivers_num * sizeof(struct cryptocap));
460
461 crypto_drivers_num *= 2;
462
463 free(crypto_drivers, M_CRYPTO_DATA);
464 crypto_drivers = newdrv;
465 }
466
467 /* NB: state is zero'd on free */
468 crypto_drivers[i].cc_sessions = 1; /* Mark */
469 crypto_drivers[i].cc_flags = flags;
470 if (bootverbose)
471 printf("crypto: assign driver %u, flags %u\n", i, flags);
472
473 CRYPTO_DRIVER_UNLOCK();
474
475 return i;
476 }
477
478 static struct cryptocap *
479 crypto_checkdriver(u_int32_t hid)
480 {
481 if (crypto_drivers == NULL)
482 return NULL;
483 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
484 }
485
486 /*
487 * Register support for a key-related algorithm. This routine
488 * is called once for each algorithm supported a driver.
489 */
490 int
491 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
492 int (*kprocess)(void*, struct cryptkop *, int),
493 void *karg)
494 {
495 struct cryptocap *cap;
496 int err;
497
498 CRYPTO_DRIVER_LOCK();
499
500 cap = crypto_checkdriver(driverid);
501 if (cap != NULL &&
502 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
503 /*
504 * XXX Do some performance testing to determine placing.
505 * XXX We probably need an auxiliary data structure that
506 * XXX describes relative performances.
507 */
508
509 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
510 if (bootverbose)
511 printf("crypto: driver %u registers key alg %u flags %u\n"
512 , driverid
513 , kalg
514 , flags
515 );
516
517 if (cap->cc_kprocess == NULL) {
518 cap->cc_karg = karg;
519 cap->cc_kprocess = kprocess;
520 }
521 err = 0;
522 } else
523 err = EINVAL;
524
525 CRYPTO_DRIVER_UNLOCK();
526 return err;
527 }
528
529 /*
530 * Register support for a non-key-related algorithm. This routine
531 * is called once for each such algorithm supported by a driver.
532 */
533 int
534 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
535 u_int32_t flags,
536 int (*newses)(void*, u_int32_t*, struct cryptoini*),
537 int (*freeses)(void*, u_int64_t),
538 int (*process)(void*, struct cryptop *, int),
539 void *arg)
540 {
541 struct cryptocap *cap;
542 int err;
543
544 CRYPTO_DRIVER_LOCK();
545
546 cap = crypto_checkdriver(driverid);
547 /* NB: algorithms are in the range [1..max] */
548 if (cap != NULL &&
549 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
550 /*
551 * XXX Do some performance testing to determine placing.
552 * XXX We probably need an auxiliary data structure that
553 * XXX describes relative performances.
554 */
555
556 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
557 cap->cc_max_op_len[alg] = maxoplen;
558 if (bootverbose)
559 printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
560 , driverid
561 , alg
562 , flags
563 , maxoplen
564 );
565
566 if (cap->cc_process == NULL) {
567 cap->cc_arg = arg;
568 cap->cc_newsession = newses;
569 cap->cc_process = process;
570 cap->cc_freesession = freeses;
571 cap->cc_sessions = 0; /* Unmark */
572 }
573 err = 0;
574 } else
575 err = EINVAL;
576
577 CRYPTO_DRIVER_UNLOCK();
578 return err;
579 }
580
581 /*
582 * Unregister a crypto driver. If there are pending sessions using it,
583 * leave enough information around so that subsequent calls using those
584 * sessions will correctly detect the driver has been unregistered and
585 * reroute requests.
586 */
587 int
588 crypto_unregister(u_int32_t driverid, int alg)
589 {
590 struct cryptocap *cap;
591 u_int32_t ses, kops;
592 int i, err;
593
594 CRYPTO_DRIVER_LOCK();
595
596 cap = crypto_checkdriver(driverid);
597 if (cap != NULL &&
598 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
599 cap->cc_alg[alg] != 0) {
600 cap->cc_alg[alg] = 0;
601 cap->cc_max_op_len[alg] = 0;
602
603 /* Was this the last algorithm ? */
604 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
605 if (cap->cc_alg[i] != 0)
606 break;
607
608 if (i == CRYPTO_ALGORITHM_MAX + 1) {
609 ses = cap->cc_sessions;
610 kops = cap->cc_koperations;
611 bzero(cap, sizeof(*cap));
612 if (ses != 0 || kops != 0) {
613 /*
614 * If there are pending sessions, just mark as invalid.
615 */
616 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
617 cap->cc_sessions = ses;
618 cap->cc_koperations = kops;
619 }
620 }
621 err = 0;
622 } else
623 err = EINVAL;
624
625 CRYPTO_DRIVER_UNLOCK();
626 return err;
627 }
628
629 /*
630 * Unregister all algorithms associated with a crypto driver.
631 * If there are pending sessions using it, leave enough information
632 * around so that subsequent calls using those sessions will
633 * correctly detect the driver has been unregistered and reroute
634 * requests.
635 */
636 int
637 crypto_unregister_all(u_int32_t driverid)
638 {
639 struct cryptocap *cap;
640 u_int32_t ses, kops;
641 int i, err;
642
643 CRYPTO_DRIVER_LOCK();
644
645 cap = crypto_checkdriver(driverid);
646 if (cap != NULL) {
647 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
648 cap->cc_alg[i] = 0;
649 cap->cc_max_op_len[i] = 0;
650 }
651 ses = cap->cc_sessions;
652 kops = cap->cc_koperations;
653 bzero(cap, sizeof(*cap));
654 if (ses != 0 || kops != 0) {
655 /*
656 * If there are pending sessions, just mark as invalid.
657 */
658 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
659 cap->cc_sessions = ses;
660 cap->cc_koperations = kops;
661 }
662 err = 0;
663 } else
664 err = EINVAL;
665
666 CRYPTO_DRIVER_UNLOCK();
667 return err;
668 }
669
670 /*
671 * Clear blockage on a driver. The what parameter indicates whether
672 * the driver is now ready for cryptop's and/or cryptokop's.
673 */
674 int
675 crypto_unblock(u_int32_t driverid, int what)
676 {
677 struct cryptocap *cap;
678 int err;
679
680 CRYPTO_Q_LOCK();
681 cap = crypto_checkdriver(driverid);
682 if (cap != NULL) {
683 if (what & CRYPTO_SYMQ)
684 cap->cc_qblocked = 0;
685 if (what & CRYPTO_ASYMQ)
686 cap->cc_kqblocked = 0;
687 if (crp_sleep)
688 wakeup_one(&crp_q);
689 err = 0;
690 } else
691 err = EINVAL;
692 CRYPTO_Q_UNLOCK();
693
694 return err;
695 }
696
697 /*
698 * Add a crypto request to a queue, to be processed by the kernel thread.
699 */
700 int
701 crypto_dispatch(struct cryptop *crp)
702 {
703 struct cryptocap *cap;
704 u_int32_t hid;
705 int result;
706
707 cryptostats.cs_ops++;
708
709 #ifdef CRYPTO_TIMING
710 if (crypto_timing)
711 binuptime(&crp->crp_tstamp);
712 #endif
713
714 hid = CRYPTO_SESID2HID(crp->crp_sid);
715
716 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
717 /*
718 * Caller marked the request to be processed
719 * immediately; dispatch it directly to the
720 * driver unless the driver is currently blocked.
721 */
722 cap = crypto_checkdriver(hid);
723 /* Driver cannot disappeared when there is an active session. */
724 KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
725 if (!cap->cc_qblocked) {
726 result = crypto_invoke(cap, crp, 0);
727 if (result != ERESTART)
728 return (result);
729 /*
730 * The driver ran out of resources, put the request on
731 * the queue.
732 */
733 }
734 }
735 CRYPTO_Q_LOCK();
736 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
737 if (crp_sleep)
738 wakeup_one(&crp_q);
739 CRYPTO_Q_UNLOCK();
740 return 0;
741 }
742
743 /*
744 * Add an asymetric crypto request to a queue,
745 * to be processed by the kernel thread.
746 */
747 int
748 crypto_kdispatch(struct cryptkop *krp)
749 {
750 int result;
751
752 cryptostats.cs_kops++;
753
754 result = crypto_kinvoke(krp);
755 if (result != ERESTART)
756 return (result);
757 CRYPTO_Q_LOCK();
758 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
759 if (crp_sleep)
760 wakeup_one(&crp_q);
761 CRYPTO_Q_UNLOCK();
762
763 return 0;
764 }
765
766 /*
767 * Dispatch an assymetric crypto request to the appropriate crypto devices.
768 */
769 static int
770 crypto_kinvoke(struct cryptkop *krp)
771 {
772 struct cryptocap *cap = NULL;
773 u_int32_t hid;
774 int error = 0;
775
776 KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
777 KASSERT(krp->krp_callback != NULL,
778 ("%s: krp->crp_callback == NULL", __func__));
779
780 CRYPTO_DRIVER_LOCK();
781 for (hid = 0; hid < crypto_drivers_num; hid++) {
782 cap = &crypto_drivers[hid];
783 if (cap == NULL)
784 continue;
785 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
786 !crypto_devallowsoft) {
787 continue;
788 }
789 if (cap->cc_kprocess == NULL)
790 continue;
791 if (!(cap->cc_kalg[krp->krp_op] & CRYPTO_ALG_FLAG_SUPPORTED))
792 continue;
793 if (cap->cc_kqblocked) {
794 error = ERESTART;
795 continue;
796 }
797 error = 0;
798 break;
799 }
800 krp->krp_hid = hid;
801 if (hid < crypto_drivers_num) {
802 cap->cc_koperations++;
803 CRYPTO_DRIVER_UNLOCK();
804 error = cap->cc_kprocess(cap->cc_karg, krp, 0);
805 CRYPTO_DRIVER_LOCK();
806 if (error == ERESTART) {
807 cap->cc_koperations--;
808 CRYPTO_DRIVER_UNLOCK();
809 return (error);
810 }
811 } else {
812 error = ENODEV;
813 }
814 CRYPTO_DRIVER_UNLOCK();
815
816 if (error) {
817 krp->krp_status = error;
818 crypto_kdone(krp);
819 }
820 return 0;
821 }
822
823 #ifdef CRYPTO_TIMING
824 static void
825 crypto_tstat(struct cryptotstat *ts, struct bintime *bt)
826 {
827 struct bintime now, delta;
828 struct timespec t;
829 uint64_t u;
830
831 binuptime(&now);
832 u = now.frac;
833 delta.frac = now.frac - bt->frac;
834 delta.sec = now.sec - bt->sec;
835 if (u < delta.frac)
836 delta.sec--;
837 bintime2timespec(&delta, &t);
838 timespecadd(&ts->acc, &t);
839 if (timespeccmp(&t, &ts->min, <))
840 ts->min = t;
841 if (timespeccmp(&t, &ts->max, >))
842 ts->max = t;
843 ts->count++;
844
845 *bt = now;
846 }
847 #endif
848
849 /*
850 * Dispatch a crypto request to the appropriate crypto devices.
851 */
852 static int
853 crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint)
854 {
855
856 KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
857 KASSERT(crp->crp_callback != NULL,
858 ("%s: crp->crp_callback == NULL", __func__));
859 KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
860
861 #ifdef CRYPTO_TIMING
862 if (crypto_timing)
863 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
864 #endif
865 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
866 struct cryptodesc *crd;
867 u_int64_t nid;
868
869 /*
870 * Driver has unregistered; migrate the session and return
871 * an error to the caller so they'll resubmit the op.
872 *
873 * XXX: What if there are more already queued requests for this
874 * session?
875 */
876 crypto_freesession(crp->crp_sid);
877
878 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
879 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
880
881 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
882 crp->crp_sid = nid;
883
884 crp->crp_etype = EAGAIN;
885 crypto_done(crp);
886 return 0;
887 } else {
888 /*
889 * Invoke the driver to process the request.
890 */
891 return cap->cc_process(cap->cc_arg, crp, hint);
892 }
893 }
894
895 /*
896 * Release a set of crypto descriptors.
897 */
898 void
899 crypto_freereq(struct cryptop *crp)
900 {
901 struct cryptodesc *crd;
902
903 if (crp == NULL)
904 return;
905
906 #ifdef DIAGNOSTIC
907 {
908 struct cryptop *crp2;
909
910 CRYPTO_Q_LOCK();
911 TAILQ_FOREACH(crp2, &crp_q, crp_next) {
912 KASSERT(crp2 != crp,
913 ("Freeing cryptop from the crypto queue (%p).",
914 crp));
915 }
916 CRYPTO_Q_UNLOCK();
917 CRYPTO_RETQ_LOCK();
918 TAILQ_FOREACH(crp2, &crp_ret_q, crp_next) {
919 KASSERT(crp2 != crp,
920 ("Freeing cryptop from the return queue (%p).",
921 crp));
922 }
923 CRYPTO_RETQ_UNLOCK();
924 }
925 #endif
926
927 while ((crd = crp->crp_desc) != NULL) {
928 crp->crp_desc = crd->crd_next;
929 uma_zfree(cryptodesc_zone, crd);
930 }
931
932 uma_zfree(cryptop_zone, crp);
933 }
934
935 /*
936 * Acquire a set of crypto descriptors.
937 */
938 struct cryptop *
939 crypto_getreq(int num)
940 {
941 struct cryptodesc *crd;
942 struct cryptop *crp;
943
944 crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
945 if (crp != NULL) {
946 while (num--) {
947 crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
948 if (crd == NULL) {
949 crypto_freereq(crp);
950 return NULL;
951 }
952
953 crd->crd_next = crp->crp_desc;
954 crp->crp_desc = crd;
955 }
956 }
957 return crp;
958 }
959
960 /*
961 * Invoke the callback on behalf of the driver.
962 */
963 void
964 crypto_done(struct cryptop *crp)
965 {
966 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
967 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
968 crp->crp_flags |= CRYPTO_F_DONE;
969 if (crp->crp_etype != 0)
970 cryptostats.cs_errs++;
971 #ifdef CRYPTO_TIMING
972 if (crypto_timing)
973 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
974 #endif
975 /*
976 * CBIMM means unconditionally do the callback immediately;
977 * CBIFSYNC means do the callback immediately only if the
978 * operation was done synchronously. Both are used to avoid
979 * doing extraneous context switches; the latter is mostly
980 * used with the software crypto driver.
981 */
982 if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
983 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
984 (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
985 /*
986 * Do the callback directly. This is ok when the
987 * callback routine does very little (e.g. the
988 * /dev/crypto callback method just does a wakeup).
989 */
990 #ifdef CRYPTO_TIMING
991 if (crypto_timing) {
992 /*
993 * NB: We must copy the timestamp before
994 * doing the callback as the cryptop is
995 * likely to be reclaimed.
996 */
997 struct bintime t = crp->crp_tstamp;
998 crypto_tstat(&cryptostats.cs_cb, &t);
999 crp->crp_callback(crp);
1000 crypto_tstat(&cryptostats.cs_finis, &t);
1001 } else
1002 #endif
1003 crp->crp_callback(crp);
1004 } else {
1005 /*
1006 * Normal case; queue the callback for the thread.
1007 */
1008 CRYPTO_RETQ_LOCK();
1009 if (CRYPTO_RETQ_EMPTY())
1010 wakeup_one(&crp_ret_q); /* shared wait channel */
1011 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
1012 CRYPTO_RETQ_UNLOCK();
1013 }
1014 }
1015
1016 /*
1017 * Invoke the callback on behalf of the driver.
1018 */
1019 void
1020 crypto_kdone(struct cryptkop *krp)
1021 {
1022 struct cryptocap *cap;
1023
1024 if (krp->krp_status != 0)
1025 cryptostats.cs_kerrs++;
1026 CRYPTO_DRIVER_LOCK();
1027 /* XXX: What if driver is loaded in the meantime? */
1028 if (krp->krp_hid < crypto_drivers_num) {
1029 cap = &crypto_drivers[krp->krp_hid];
1030 cap->cc_koperations--;
1031 KASSERT(cap->cc_koperations >= 0, ("cc_koperations < 0"));
1032 if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
1033 crypto_remove(cap);
1034 }
1035 CRYPTO_DRIVER_UNLOCK();
1036 CRYPTO_RETQ_LOCK();
1037 if (CRYPTO_RETQ_EMPTY())
1038 wakeup_one(&crp_ret_q); /* shared wait channel */
1039 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
1040 CRYPTO_RETQ_UNLOCK();
1041 }
1042
1043 int
1044 crypto_getfeat(int *featp)
1045 {
1046 int hid, kalg, feat = 0;
1047
1048 if (!crypto_userasymcrypto)
1049 goto out;
1050
1051 CRYPTO_DRIVER_LOCK();
1052 for (hid = 0; hid < crypto_drivers_num; hid++) {
1053 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1054 !crypto_devallowsoft) {
1055 continue;
1056 }
1057 if (crypto_drivers[hid].cc_kprocess == NULL)
1058 continue;
1059 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1060 if ((crypto_drivers[hid].cc_kalg[kalg] &
1061 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1062 feat |= 1 << kalg;
1063 }
1064 CRYPTO_DRIVER_UNLOCK();
1065 out:
1066 *featp = feat;
1067 return (0);
1068 }
1069
1070 /*
1071 * Terminate a thread at module unload. The process that
1072 * initiated this is waiting for us to signal that we're gone;
1073 * wake it up and exit. We use the driver table lock to insure
1074 * we don't do the wakeup before they're waiting. There is no
1075 * race here because the waiter sleeps on the proc lock for the
1076 * thread so it gets notified at the right time because of an
1077 * extra wakeup that's done in exit1().
1078 */
1079 static void
1080 crypto_finis(void *chan)
1081 {
1082 CRYPTO_DRIVER_LOCK();
1083 wakeup_one(chan);
1084 CRYPTO_DRIVER_UNLOCK();
1085 kthread_exit(0);
1086 }
1087
1088 /*
1089 * Crypto thread, dispatches crypto requests.
1090 */
1091 static void
1092 crypto_proc(void)
1093 {
1094 struct cryptop *crp, *submit;
1095 struct cryptkop *krp;
1096 struct cryptocap *cap;
1097 u_int32_t hid;
1098 int result, hint;
1099
1100 CRYPTO_Q_LOCK();
1101 for (;;) {
1102 /*
1103 * Find the first element in the queue that can be
1104 * processed and look-ahead to see if multiple ops
1105 * are ready for the same driver.
1106 */
1107 submit = NULL;
1108 hint = 0;
1109 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1110 hid = CRYPTO_SESID2HID(crp->crp_sid);
1111 cap = crypto_checkdriver(hid);
1112 /*
1113 * Driver cannot disappeared when there is an active
1114 * session.
1115 */
1116 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1117 __func__, __LINE__));
1118 if (cap == NULL || cap->cc_process == NULL) {
1119 /* Op needs to be migrated, process it. */
1120 if (submit == NULL)
1121 submit = crp;
1122 break;
1123 }
1124 if (!cap->cc_qblocked) {
1125 if (submit != NULL) {
1126 /*
1127 * We stop on finding another op,
1128 * regardless whether its for the same
1129 * driver or not. We could keep
1130 * searching the queue but it might be
1131 * better to just use a per-driver
1132 * queue instead.
1133 */
1134 if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
1135 hint = CRYPTO_HINT_MORE;
1136 break;
1137 } else {
1138 submit = crp;
1139 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1140 break;
1141 /* keep scanning for more are q'd */
1142 }
1143 }
1144 }
1145 if (submit != NULL) {
1146 TAILQ_REMOVE(&crp_q, submit, crp_next);
1147 hid = CRYPTO_SESID2HID(submit->crp_sid);
1148 cap = crypto_checkdriver(hid);
1149 KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
1150 __func__, __LINE__));
1151 result = crypto_invoke(cap, submit, hint);
1152 if (result == ERESTART) {
1153 /*
1154 * The driver ran out of resources, mark the
1155 * driver ``blocked'' for cryptop's and put
1156 * the request back in the queue. It would
1157 * best to put the request back where we got
1158 * it but that's hard so for now we put it
1159 * at the front. This should be ok; putting
1160 * it at the end does not work.
1161 */
1162 /* XXX validate sid again? */
1163 crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1164 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1165 cryptostats.cs_blocks++;
1166 }
1167 }
1168
1169 /* As above, but for key ops */
1170 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1171 cap = crypto_checkdriver(krp->krp_hid);
1172 if (cap == NULL || cap->cc_kprocess == NULL) {
1173 /* Op needs to be migrated, process it. */
1174 break;
1175 }
1176 if (!cap->cc_kqblocked)
1177 break;
1178 }
1179 if (krp != NULL) {
1180 TAILQ_REMOVE(&crp_kq, krp, krp_next);
1181 result = crypto_kinvoke(krp);
1182 if (result == ERESTART) {
1183 /*
1184 * The driver ran out of resources, mark the
1185 * driver ``blocked'' for cryptkop's and put
1186 * the request back in the queue. It would
1187 * best to put the request back where we got
1188 * it but that's hard so for now we put it
1189 * at the front. This should be ok; putting
1190 * it at the end does not work.
1191 */
1192 /* XXX validate sid again? */
1193 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1194 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1195 cryptostats.cs_kblocks++;
1196 }
1197 }
1198
1199 if (submit == NULL && krp == NULL) {
1200 /*
1201 * Nothing more to be processed. Sleep until we're
1202 * woken because there are more ops to process.
1203 * This happens either by submission or by a driver
1204 * becoming unblocked and notifying us through
1205 * crypto_unblock. Note that when we wakeup we
1206 * start processing each queue again from the
1207 * front. It's not clear that it's important to
1208 * preserve this ordering since ops may finish
1209 * out of order if dispatched to different devices
1210 * and some become blocked while others do not.
1211 */
1212 crp_sleep = 1;
1213 msleep(&crp_q, &crypto_q_mtx, PWAIT, "crypto_wait", 0);
1214 crp_sleep = 0;
1215 if (cryptoproc == NULL)
1216 break;
1217 cryptostats.cs_intrs++;
1218 }
1219 }
1220 CRYPTO_Q_UNLOCK();
1221
1222 crypto_finis(&crp_q);
1223 }
1224
1225 /*
1226 * Crypto returns thread, does callbacks for processed crypto requests.
1227 * Callbacks are done here, rather than in the crypto drivers, because
1228 * callbacks typically are expensive and would slow interrupt handling.
1229 */
1230 static void
1231 crypto_ret_proc(void)
1232 {
1233 struct cryptop *crpt;
1234 struct cryptkop *krpt;
1235
1236 CRYPTO_RETQ_LOCK();
1237 for (;;) {
1238 /* Harvest return q's for completed ops */
1239 crpt = TAILQ_FIRST(&crp_ret_q);
1240 if (crpt != NULL)
1241 TAILQ_REMOVE(&crp_ret_q, crpt, crp_next);
1242
1243 krpt = TAILQ_FIRST(&crp_ret_kq);
1244 if (krpt != NULL)
1245 TAILQ_REMOVE(&crp_ret_kq, krpt, krp_next);
1246
1247 if (crpt != NULL || krpt != NULL) {
1248 CRYPTO_RETQ_UNLOCK();
1249 /*
1250 * Run callbacks unlocked.
1251 */
1252 if (crpt != NULL) {
1253 #ifdef CRYPTO_TIMING
1254 if (crypto_timing) {
1255 /*
1256 * NB: We must copy the timestamp before
1257 * doing the callback as the cryptop is
1258 * likely to be reclaimed.
1259 */
1260 struct bintime t = crpt->crp_tstamp;
1261 crypto_tstat(&cryptostats.cs_cb, &t);
1262 crpt->crp_callback(crpt);
1263 crypto_tstat(&cryptostats.cs_finis, &t);
1264 } else
1265 #endif
1266 crpt->crp_callback(crpt);
1267 }
1268 if (krpt != NULL)
1269 krpt->krp_callback(krpt);
1270 CRYPTO_RETQ_LOCK();
1271 } else {
1272 /*
1273 * Nothing more to be processed. Sleep until we're
1274 * woken because there are more returns to process.
1275 */
1276 msleep(&crp_ret_q, &crypto_ret_q_mtx, PWAIT,
1277 "crypto_ret_wait", 0);
1278 if (cryptoretproc == NULL)
1279 break;
1280 cryptostats.cs_rets++;
1281 }
1282 }
1283 CRYPTO_RETQ_UNLOCK();
1284
1285 crypto_finis(&crp_ret_q);
1286 }
Cache object: 9889956d667a2e89e02462800633e7d7
|