1 /* $FreeBSD$ */
2 /* $OpenBSD: crypto.c,v 1.38 2002/06/11 11:14:29 beck Exp $ */
3 /*
4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5 *
6 * This code was written by Angelos D. Keromytis in Athens, Greece, in
7 * February 2000. Network Security Technologies Inc. (NSTI) kindly
8 * supported the development of this code.
9 *
10 * Copyright (c) 2000, 2001 Angelos D. Keromytis
11 *
12 * Permission to use, copy, and modify this software with or without fee
13 * is hereby granted, provided that this entire notice is included in
14 * all source code copies of any software which is or includes a copy or
15 * modification of this software.
16 *
17 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21 * PURPOSE.
22 */
23
24 #define CRYPTO_TIMING /* enable cryptop timing stuff */
25
26 #include <sys/param.h>
27 #include <sys/systm.h>
28 #include <sys/eventhandler.h>
29 #include <sys/kernel.h>
30 #include <sys/kthread.h>
31 #include <sys/malloc.h>
32 #include <sys/proc.h>
33 #include <sys/sysctl.h>
34
35 #include <sys/interrupt.h>
36 #include <machine/ipl.h>
37
38 #include <vm/vm_zone.h>
39 #include <opencrypto/cryptodev.h>
40 #include <opencrypto/xform.h> /* XXX for M_XDATA */
41
42 /*
43 * Crypto drivers register themselves by allocating a slot in the
44 * crypto_drivers table with crypto_get_driverid() and then registering
45 * each algorithm they support with crypto_register() and crypto_kregister().
46 */
47 static struct cryptocap *crypto_drivers = NULL;
48 static int crypto_drivers_num = 0;
49
50 /*
51 * There are two queues for crypto requests; one for symmetric (e.g.
52 * cipher) operations and one for asymmetric (e.g. MOD) operations.
53 * See below for how synchronization is handled.
54 */
55 static TAILQ_HEAD(,cryptop) crp_q; /* request queues */
56 static TAILQ_HEAD(,cryptkop) crp_kq;
57
58 /*
59 * There are two queues for processing completed crypto requests; one
60 * for the symmetric and one for the asymmetric ops. We only need one
61 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
62 * for how synchronization is handled.
63 */
64 static TAILQ_HEAD(,cryptop) crp_ret_q; /* callback queues */
65 static TAILQ_HEAD(,cryptkop) crp_ret_kq;
66
67 /*
68 * Crypto op and desciptor data structures are allocated
69 * from separate private zones.
70 */
71 static vm_zone_t cryptop_zone;
72 static vm_zone_t cryptodesc_zone;
73
74 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
75 SYSCTL_INT(_kern, OID_AUTO, usercrypto, CTLFLAG_RW,
76 &crypto_usercrypto, 0,
77 "Enable/disable user-mode access to crypto support");
78 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
79 SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
80 &crypto_userasymcrypto, 0,
81 "Enable/disable user-mode access to asymmetric crypto support");
82 int crypto_devallowsoft = 0; /* only use hardware crypto for asym */
83 SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
84 &crypto_devallowsoft, 0,
85 "Enable/disable use of software asym crypto support");
86
87 MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
88
89 /*
90 * Synchronization: read carefully, this is non-trivial.
91 *
92 * Crypto requests are submitted via crypto_dispatch. Typically
93 * these come in from network protocols at spl0 (output path) or
94 * splnet (input path).
95 *
96 * Requests are typically passed on the driver directly, but they
97 * may also be queued for processing by a software interrupt thread,
98 * cryptointr, that runs at splsoftcrypto. This thread dispatches
99 * the requests to crypto drivers (h/w or s/w) who call crypto_done
100 * when a request is complete. Hardware crypto drivers are assumed
101 * to register their IRQ's as network devices so their interrupt handlers
102 * and subsequent "done callbacks" happen at splimp.
103 *
104 * Completed crypto ops are queued for a separate kernel thread that
105 * handles the callbacks at spl0. This decoupling insures the crypto
106 * driver interrupt service routine is not delayed while the callback
107 * takes place and that callbacks are delivered after a context switch
108 * (as opposed to a software interrupt that clients must block).
109 *
110 * This scheme is not intended for SMP machines.
111 */
112 static void cryptointr(void); /* swi thread to dispatch ops */
113 static void cryptoret(void); /* kernel thread for callbacks*/
114 static struct proc *cryptoproc;
115 static void crypto_destroy(void);
116 static int crypto_invoke(struct cryptop *crp, int hint);
117 static int crypto_kinvoke(struct cryptkop *krp, int hint);
118
119 static struct cryptostats cryptostats;
120 SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
121 cryptostats, "Crypto system statistics");
122
123 #ifdef CRYPTO_TIMING
124 static int crypto_timing = 0;
125 SYSCTL_INT(_debug, OID_AUTO, crypto_timing, CTLFLAG_RW,
126 &crypto_timing, 0, "Enable/disable crypto timing support");
127 #endif
128
129 static int
130 crypto_init(void)
131 {
132 int error;
133
134 cryptop_zone = zinit("cryptop", sizeof (struct cryptop), 0, 0, 1);
135 cryptodesc_zone = zinit("cryptodesc", sizeof (struct cryptodesc),
136 0, 0, 1);
137 if (cryptodesc_zone == NULL || cryptop_zone == NULL) {
138 printf("crypto_init: cannot setup crypto zones\n");
139 return ENOMEM;
140 }
141
142 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
143 crypto_drivers = malloc(crypto_drivers_num *
144 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
145 if (crypto_drivers == NULL) {
146 printf("crypto_init: cannot malloc driver table\n");
147 return ENOMEM;
148 }
149
150 TAILQ_INIT(&crp_q);
151 TAILQ_INIT(&crp_kq);
152
153 TAILQ_INIT(&crp_ret_q);
154 TAILQ_INIT(&crp_ret_kq);
155
156 register_swi(SWI_CRYPTO, cryptointr);
157 error = kthread_create((void (*)(void *)) cryptoret, NULL,
158 &cryptoproc, "cryptoret");
159 if (error) {
160 printf("crypto_init: cannot start cryptoret thread; error %d",
161 error);
162 crypto_destroy();
163 }
164 return error;
165 }
166
167 static void
168 crypto_destroy(void)
169 {
170 /* XXX no wait to reclaim zones */
171 if (crypto_drivers != NULL)
172 free(crypto_drivers, M_CRYPTO_DATA);
173 unregister_swi(SWI_CRYPTO, cryptointr);
174 }
175
176 /*
177 * Initialization code, both for static and dynamic loading.
178 */
179 static int
180 crypto_modevent(module_t mod, int type, void *unused)
181 {
182 int error = EINVAL;
183
184 switch (type) {
185 case MOD_LOAD:
186 error = crypto_init();
187 if (error == 0 && bootverbose)
188 printf("crypto: <crypto core>\n");
189 break;
190 case MOD_UNLOAD:
191 /*XXX disallow if active sessions */
192 error = 0;
193 crypto_destroy();
194 break;
195 }
196 return error;
197 }
198
199 static moduledata_t crypto_mod = {
200 "crypto",
201 crypto_modevent,
202 0
203 };
204 MODULE_VERSION(crypto, 1);
205 DECLARE_MODULE(crypto, crypto_mod, SI_SUB_DRIVERS, SI_ORDER_FIRST);
206
207 /*
208 * Create a new session.
209 */
210 int
211 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
212 {
213 struct cryptoini *cr;
214 u_int32_t hid, lid;
215 int err = EINVAL;
216 int s;
217
218 s = splcrypto();
219
220 if (crypto_drivers == NULL)
221 goto done;
222
223 /*
224 * The algorithm we use here is pretty stupid; just use the
225 * first driver that supports all the algorithms we need.
226 *
227 * XXX We need more smarts here (in real life too, but that's
228 * XXX another story altogether).
229 */
230
231 for (hid = 0; hid < crypto_drivers_num; hid++) {
232 struct cryptocap *cap = &crypto_drivers[hid];
233 /*
234 * If it's not initialized or has remaining sessions
235 * referencing it, skip.
236 */
237 if (cap->cc_newsession == NULL ||
238 (cap->cc_flags & CRYPTOCAP_F_CLEANUP))
239 continue;
240
241 /* Hardware required -- ignore software drivers. */
242 if (hard > 0 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE))
243 continue;
244 /* Software required -- ignore hardware drivers. */
245 if (hard < 0 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0)
246 continue;
247
248 /* See if all the algorithms are supported. */
249 for (cr = cri; cr; cr = cr->cri_next)
250 if (cap->cc_alg[cr->cri_alg] == 0)
251 break;
252
253 if (cr == NULL) {
254 /* Ok, all algorithms are supported. */
255
256 /*
257 * Can't do everything in one session.
258 *
259 * XXX Fix this. We need to inject a "virtual" session layer right
260 * XXX about here.
261 */
262
263 /* Call the driver initialization routine. */
264 lid = hid; /* Pass the driver ID. */
265 err = (*cap->cc_newsession)(cap->cc_arg, &lid, cri);
266 if (err == 0) {
267 /* XXX assert (hid &~ 0xffffff) == 0 */
268 /* XXX assert (cap->cc_flags &~ 0xff) == 0 */
269 (*sid) = ((cap->cc_flags & 0xff) << 24) | hid;
270 (*sid) <<= 32;
271 (*sid) |= (lid & 0xffffffff);
272 cap->cc_sessions++;
273 }
274 break;
275 }
276 }
277 done:
278 splx(s);
279 return err;
280 }
281
282 /*
283 * Delete an existing session (or a reserved session on an unregistered
284 * driver).
285 */
286 int
287 crypto_freesession(u_int64_t sid)
288 {
289 u_int32_t hid;
290 int err, s;
291
292 s = splcrypto();
293
294 if (crypto_drivers == NULL) {
295 err = EINVAL;
296 goto done;
297 }
298
299 /* Determine two IDs. */
300 hid = CRYPTO_SESID2HID(sid);
301
302 if (hid >= crypto_drivers_num) {
303 err = ENOENT;
304 goto done;
305 }
306
307 if (crypto_drivers[hid].cc_sessions)
308 crypto_drivers[hid].cc_sessions--;
309
310 /* Call the driver cleanup routine, if available. */
311 if (crypto_drivers[hid].cc_freesession)
312 err = crypto_drivers[hid].cc_freesession(
313 crypto_drivers[hid].cc_arg, sid);
314 else
315 err = 0;
316
317 /*
318 * If this was the last session of a driver marked as invalid,
319 * make the entry available for reuse.
320 */
321 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
322 crypto_drivers[hid].cc_sessions == 0)
323 bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
324
325 done:
326 splx(s);
327 return err;
328 }
329
330 /*
331 * Return an unused driver id. Used by drivers prior to registering
332 * support for the algorithms they handle.
333 */
334 int32_t
335 crypto_get_driverid(u_int32_t flags)
336 {
337 struct cryptocap *newdrv;
338 int i, s;
339
340 s = splcrypto();
341 for (i = 0; i < crypto_drivers_num; i++)
342 if (crypto_drivers[i].cc_process == NULL &&
343 (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0 &&
344 crypto_drivers[i].cc_sessions == 0)
345 break;
346
347 /* Out of entries, allocate some more. */
348 if (i == crypto_drivers_num) {
349 /* Be careful about wrap-around. */
350 if (2 * crypto_drivers_num <= crypto_drivers_num) {
351 splx(s);
352 printf("crypto: driver count wraparound!\n");
353 return -1;
354 }
355
356 newdrv = malloc(2 * crypto_drivers_num *
357 sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
358 if (newdrv == NULL) {
359 splx(s);
360 printf("crypto: no space to expand driver table!\n");
361 return -1;
362 }
363
364 bcopy(crypto_drivers, newdrv,
365 crypto_drivers_num * sizeof(struct cryptocap));
366
367 crypto_drivers_num *= 2;
368
369 free(crypto_drivers, M_CRYPTO_DATA);
370 crypto_drivers = newdrv;
371 }
372
373 /* NB: state is zero'd on free */
374 crypto_drivers[i].cc_sessions = 1; /* Mark */
375 crypto_drivers[i].cc_flags = flags;
376 if (bootverbose)
377 printf("crypto: assign driver %u, flags %u\n", i, flags);
378
379 splx(s);
380
381 return i;
382 }
383
384 static struct cryptocap *
385 crypto_checkdriver(u_int32_t hid)
386 {
387 if (crypto_drivers == NULL)
388 return NULL;
389 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
390 }
391
392 /*
393 * Register support for a key-related algorithm. This routine
394 * is called once for each algorithm supported a driver.
395 */
396 int
397 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
398 int (*kprocess)(void*, struct cryptkop *, int),
399 void *karg)
400 {
401 int s;
402 struct cryptocap *cap;
403 int err;
404
405 s = splcrypto();
406
407 cap = crypto_checkdriver(driverid);
408 if (cap != NULL &&
409 (CRK_ALGORITM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
410 /*
411 * XXX Do some performance testing to determine placing.
412 * XXX We probably need an auxiliary data structure that
413 * XXX describes relative performances.
414 */
415
416 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
417 if (bootverbose)
418 printf("crypto: driver %u registers key alg %u flags %u\n"
419 , driverid
420 , kalg
421 , flags
422 );
423
424 if (cap->cc_kprocess == NULL) {
425 cap->cc_karg = karg;
426 cap->cc_kprocess = kprocess;
427 }
428 err = 0;
429 } else
430 err = EINVAL;
431
432 splx(s);
433 return err;
434 }
435
436 /*
437 * Register support for a non-key-related algorithm. This routine
438 * is called once for each such algorithm supported by a driver.
439 */
440 int
441 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
442 u_int32_t flags,
443 int (*newses)(void*, u_int32_t*, struct cryptoini*),
444 int (*freeses)(void*, u_int64_t),
445 int (*process)(void*, struct cryptop *, int),
446 void *arg)
447 {
448 struct cryptocap *cap;
449 int s, err;
450
451 s = splcrypto();
452
453 cap = crypto_checkdriver(driverid);
454 /* NB: algorithms are in the range [1..max] */
455 if (cap != NULL &&
456 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
457 /*
458 * XXX Do some performance testing to determine placing.
459 * XXX We probably need an auxiliary data structure that
460 * XXX describes relative performances.
461 */
462
463 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
464 cap->cc_max_op_len[alg] = maxoplen;
465 if (bootverbose)
466 printf("crypto: driver %u registers alg %u flags %u maxoplen %u\n"
467 , driverid
468 , alg
469 , flags
470 , maxoplen
471 );
472
473 if (cap->cc_process == NULL) {
474 cap->cc_arg = arg;
475 cap->cc_newsession = newses;
476 cap->cc_process = process;
477 cap->cc_freesession = freeses;
478 cap->cc_sessions = 0; /* Unmark */
479 }
480 err = 0;
481 } else
482 err = EINVAL;
483
484 splx(s);
485 return err;
486 }
487
488 /*
489 * Unregister a crypto driver. If there are pending sessions using it,
490 * leave enough information around so that subsequent calls using those
491 * sessions will correctly detect the driver has been unregistered and
492 * reroute requests.
493 */
494 int
495 crypto_unregister(u_int32_t driverid, int alg)
496 {
497 int i, err, s = splcrypto();
498 u_int32_t ses;
499 struct cryptocap *cap;
500
501 cap = crypto_checkdriver(driverid);
502 if (cap != NULL &&
503 (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
504 cap->cc_alg[alg] != 0) {
505 cap->cc_alg[alg] = 0;
506 cap->cc_max_op_len[alg] = 0;
507
508 /* Was this the last algorithm ? */
509 for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
510 if (cap->cc_alg[i] != 0)
511 break;
512
513 if (i == CRYPTO_ALGORITHM_MAX + 1) {
514 ses = cap->cc_sessions;
515 bzero(cap, sizeof(struct cryptocap));
516 if (ses != 0) {
517 /*
518 * If there are pending sessions, just mark as invalid.
519 */
520 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
521 cap->cc_sessions = ses;
522 }
523 }
524 err = 0;
525 } else
526 err = EINVAL;
527
528 splx(s);
529 return err;
530 }
531
532 /*
533 * Unregister all algorithms associated with a crypto driver.
534 * If there are pending sessions using it, leave enough information
535 * around so that subsequent calls using those sessions will
536 * correctly detect the driver has been unregistered and reroute
537 * requests.
538 */
539 int
540 crypto_unregister_all(u_int32_t driverid)
541 {
542 int i, err, s = splcrypto();
543 u_int32_t ses;
544 struct cryptocap *cap;
545
546 cap = crypto_checkdriver(driverid);
547 if (cap != NULL) {
548 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
549 cap->cc_alg[i] = 0;
550 cap->cc_max_op_len[i] = 0;
551 }
552 ses = cap->cc_sessions;
553 bzero(cap, sizeof(struct cryptocap));
554 if (ses != 0) {
555 /*
556 * If there are pending sessions, just mark as invalid.
557 */
558 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
559 cap->cc_sessions = ses;
560 }
561 err = 0;
562 } else
563 err = EINVAL;
564
565 splx(s);
566 return err;
567 }
568
569 /*
570 * Clear blockage on a driver. The what parameter indicates whether
571 * the driver is now ready for cryptop's and/or cryptokop's.
572 */
573 int
574 crypto_unblock(u_int32_t driverid, int what)
575 {
576 struct cryptocap *cap;
577 int needwakeup, err, s;
578
579 s = splcrypto();
580 cap = crypto_checkdriver(driverid);
581 if (cap != NULL) {
582 needwakeup = 0;
583 if (what & CRYPTO_SYMQ) {
584 needwakeup |= cap->cc_qblocked;
585 cap->cc_qblocked = 0;
586 }
587 if (what & CRYPTO_ASYMQ) {
588 needwakeup |= cap->cc_kqblocked;
589 cap->cc_kqblocked = 0;
590 }
591 if (needwakeup)
592 setsoftcrypto();
593 err = 0;
594 } else
595 err = EINVAL;
596 splx(s);
597
598 return err;
599 }
600
601 /*
602 * Dispatch a crypto request to a driver or queue
603 * it, to be processed by the kernel thread.
604 */
605 int
606 crypto_dispatch(struct cryptop *crp)
607 {
608 u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid);
609 int s, result;
610
611 cryptostats.cs_ops++;
612
613 #ifdef CRYPTO_TIMING
614 if (crypto_timing)
615 nanouptime(&crp->crp_tstamp);
616 #endif
617 s = splcrypto();
618 if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
619 struct cryptocap *cap;
620 /*
621 * Caller marked the request to be processed
622 * immediately; dispatch it directly to the
623 * driver unless the driver is currently blocked.
624 */
625 cap = crypto_checkdriver(hid);
626 if (cap && !cap->cc_qblocked) {
627 result = crypto_invoke(crp, 0);
628 if (result == ERESTART) {
629 /*
630 * The driver ran out of resources, mark the
631 * driver ``blocked'' for cryptop's and put
632 * the op on the queue.
633 */
634 crypto_drivers[hid].cc_qblocked = 1;
635 TAILQ_INSERT_HEAD(&crp_q, crp, crp_next);
636 cryptostats.cs_blocks++;
637 result = 0;
638 }
639 } else {
640 /*
641 * The driver is blocked, just queue the op until
642 * it unblocks and the swi thread gets kicked.
643 */
644 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
645 result = 0;
646 }
647 } else {
648 int wasempty = TAILQ_EMPTY(&crp_q);
649 /*
650 * Caller marked the request as ``ok to delay'';
651 * queue it for the swi thread. This is desirable
652 * when the operation is low priority and/or suitable
653 * for batching.
654 */
655 TAILQ_INSERT_TAIL(&crp_q, crp, crp_next);
656 if (wasempty)
657 setsoftcrypto();
658 result = 0;
659 }
660 splx(s);
661
662 return result;
663 }
664
665 /*
666 * Add an asymetric crypto request to a queue,
667 * to be processed by the kernel thread.
668 */
669 int
670 crypto_kdispatch(struct cryptkop *krp)
671 {
672 struct cryptocap *cap;
673 int s, result;
674
675 cryptostats.cs_kops++;
676
677 s = splcrypto();
678 cap = crypto_checkdriver(krp->krp_hid);
679 if (cap && !cap->cc_kqblocked) {
680 result = crypto_kinvoke(krp, 0);
681 if (result == ERESTART) {
682 /*
683 * The driver ran out of resources, mark the
684 * driver ``blocked'' for cryptop's and put
685 * the op on the queue.
686 */
687 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
688 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
689 cryptostats.cs_kblocks++;
690 }
691 } else {
692 /*
693 * The driver is blocked, just queue the op until
694 * it unblocks and the swi thread gets kicked.
695 */
696 TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
697 result = 0;
698 }
699 splx(s);
700
701 return result;
702 }
703
704 /*
705 * Dispatch an assymetric crypto request to the appropriate crypto devices.
706 */
707 static int
708 crypto_kinvoke(struct cryptkop *krp, int hint)
709 {
710 u_int32_t hid;
711 int error;
712
713 /* Sanity checks. */
714 if (krp == NULL)
715 return EINVAL;
716 if (krp->krp_callback == NULL) {
717 free(krp, M_XDATA); /* XXX allocated in cryptodev */
718 return EINVAL;
719 }
720
721 for (hid = 0; hid < crypto_drivers_num; hid++) {
722 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
723 !crypto_devallowsoft)
724 continue;
725 if (crypto_drivers[hid].cc_kprocess == NULL)
726 continue;
727 if ((crypto_drivers[hid].cc_kalg[krp->krp_op] &
728 CRYPTO_ALG_FLAG_SUPPORTED) == 0)
729 continue;
730 break;
731 }
732 if (hid < crypto_drivers_num) {
733 krp->krp_hid = hid;
734 error = crypto_drivers[hid].cc_kprocess(
735 crypto_drivers[hid].cc_karg, krp, hint);
736 } else
737 error = ENODEV;
738
739 if (error) {
740 krp->krp_status = error;
741 crypto_kdone(krp);
742 }
743 return 0;
744 }
745
746 #ifdef CRYPTO_TIMING
747 static void
748 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
749 {
750 struct timespec now, t;
751
752 nanouptime(&now);
753 t.tv_sec = now.tv_sec - tv->tv_sec;
754 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
755 if (t.tv_nsec < 0) {
756 t.tv_sec--;
757 t.tv_nsec += 1000000000;
758 }
759 timespecadd(&ts->acc, &t);
760 if (timespeccmp(&t, &ts->min, <))
761 ts->min = t;
762 if (timespeccmp(&t, &ts->max, >))
763 ts->max = t;
764 ts->count++;
765
766 *tv = now;
767 }
768 #endif
769
770 /*
771 * Dispatch a crypto request to the appropriate crypto devices.
772 */
773 static int
774 crypto_invoke(struct cryptop *crp, int hint)
775 {
776 u_int32_t hid;
777 int (*process)(void*, struct cryptop *, int);
778
779 #ifdef CRYPTO_TIMING
780 if (crypto_timing)
781 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
782 #endif
783 /* Sanity checks. */
784 if (crp == NULL)
785 return EINVAL;
786 if (crp->crp_callback == NULL) {
787 crypto_freereq(crp);
788 return EINVAL;
789 }
790 if (crp->crp_desc == NULL) {
791 crp->crp_etype = EINVAL;
792 crypto_done(crp);
793 return 0;
794 }
795
796 hid = CRYPTO_SESID2HID(crp->crp_sid);
797 if (hid < crypto_drivers_num) {
798 if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
799 crypto_freesession(crp->crp_sid);
800 process = crypto_drivers[hid].cc_process;
801 } else {
802 process = NULL;
803 }
804
805 if (process == NULL) {
806 struct cryptodesc *crd;
807 u_int64_t nid;
808
809 /*
810 * Driver has unregistered; migrate the session and return
811 * an error to the caller so they'll resubmit the op.
812 */
813 for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
814 crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
815
816 if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI), 0) == 0)
817 crp->crp_sid = nid;
818
819 crp->crp_etype = EAGAIN;
820 crypto_done(crp);
821 return 0;
822 } else {
823 /*
824 * Invoke the driver to process the request.
825 */
826 return (*process)(crypto_drivers[hid].cc_arg, crp, hint);
827 }
828 }
829
830 /*
831 * Release a set of crypto descriptors.
832 */
833 void
834 crypto_freereq(struct cryptop *crp)
835 {
836 struct cryptodesc *crd;
837 int s;
838
839 if (crp == NULL)
840 return;
841
842 /* NB: see below for an explanation */
843 s = splcrypto();
844 while ((crd = crp->crp_desc) != NULL) {
845 crp->crp_desc = crd->crd_next;
846 zfree(cryptodesc_zone, crd);
847 }
848 zfree(cryptop_zone, crp);
849 splx(s);
850 }
851
852 /*
853 * Acquire a set of crypto descriptors.
854 */
855 struct cryptop *
856 crypto_getreq(int num)
857 {
858 struct cryptodesc *crd;
859 struct cryptop *crp;
860 int s;
861
862 /*
863 * Must interlock access to the zone. Calls may come in
864 * at raised ipl from network protocols, but in general
865 * we cannot be certain where we'll be called from. We
866 * could use zalloci/zfreei which is safe to be called
867 * from anywhere or use splhigh, but for now splcrypto
868 * is safe as it blocks crypto drivers and network threads.
869 */
870 s = splcrypto();
871 crp = zalloc(cryptop_zone);
872 if (crp != NULL) {
873 bzero(crp, sizeof (*crp));
874 while (num--) {
875 crd = zalloc(cryptodesc_zone);
876 if (crd == NULL) {
877 crypto_freereq(crp);
878 splx(s);
879 return NULL;
880 }
881
882 bzero(crd, sizeof (*crd));
883 crd->crd_next = crp->crp_desc;
884 crp->crp_desc = crd;
885 }
886 }
887 splx(s);
888 return crp;
889 }
890
891 /*
892 * Invoke the callback on behalf of the driver.
893 */
894 void
895 crypto_done(struct cryptop *crp)
896 {
897 int flags;
898
899 KASSERT((crp->crp_flags & CRYPTO_F_DONE) == 0,
900 ("crypto_done: op already done, flags 0x%x", crp->crp_flags));
901 crp->crp_flags |= CRYPTO_F_DONE;
902 if (crp->crp_etype != 0)
903 cryptostats.cs_errs++;
904 #ifdef CRYPTO_TIMING
905 if (crypto_timing)
906 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
907 #endif
908 /*
909 * CBIMM means unconditionally do the callback immediately;
910 * CBIFSYNC means do the callback immediately only if the
911 * operation was done synchronously. Both are used to avoid
912 * doing extraneous context switches; the latter is mostly
913 * used with the software crypto driver.
914 */
915 if ((crp->crp_flags & CRYPTO_F_CBIMM) ||
916 ((crp->crp_flags & CRYPTO_F_CBIFSYNC) &&
917 (CRYPTO_SESID2CAPS(crp->crp_sid) & CRYPTOCAP_F_SYNC))) {
918 /*
919 * Do the callback directly. This is ok when the
920 * callback routine does very little (e.g. the
921 * /dev/crypto callback method just does a wakeup).
922 */
923 #ifdef CRYPTO_TIMING
924 if (crypto_timing) {
925 /*
926 * NB: We must copy the timestamp before
927 * doing the callback as the cryptop is
928 * likely to be reclaimed.
929 */
930 struct timespec t = crp->crp_tstamp;
931 crypto_tstat(&cryptostats.cs_cb, &t);
932 crp->crp_callback(crp);
933 crypto_tstat(&cryptostats.cs_finis, &t);
934 } else
935 #endif
936 crp->crp_callback(crp);
937 } else {
938 int s, wasempty;
939 /*
940 * Normal case; queue the callback for the thread.
941 *
942 * The return queue is manipulated by the swi thread
943 * and, potentially, by crypto device drivers calling
944 * back to mark operations completed. Thus we need
945 * to mask both while manipulating the return queue.
946 */
947 s = splcrypto();
948 wasempty = TAILQ_EMPTY(&crp_ret_q);
949 TAILQ_INSERT_TAIL(&crp_ret_q, crp, crp_next);
950 if (wasempty)
951 wakeup_one(&crp_ret_q);
952 splx(s);
953 }
954 }
955
956 /*
957 * Invoke the callback on behalf of the driver.
958 */
959 void
960 crypto_kdone(struct cryptkop *krp)
961 {
962 int s, wasempty;
963
964 if (krp->krp_status != 0)
965 cryptostats.cs_kerrs++;
966 /*
967 * The return queue is manipulated by the swi thread
968 * and, potentially, by crypto device drivers calling
969 * back to mark operations completed. Thus we need
970 * to mask both while manipulating the return queue.
971 */
972 s = splcrypto();
973 wasempty = TAILQ_EMPTY(&crp_ret_kq);
974 TAILQ_INSERT_TAIL(&crp_ret_kq, krp, krp_next);
975 if (wasempty)
976 wakeup_one(&crp_ret_q);
977 splx(s);
978 }
979
980 int
981 crypto_getfeat(int *featp)
982 {
983 int hid, kalg, feat = 0;
984 int s = splcrypto();
985
986 if (!crypto_userasymcrypto)
987 goto out;
988
989 for (hid = 0; hid < crypto_drivers_num; hid++) {
990 if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_SOFTWARE) &&
991 !crypto_devallowsoft) {
992 continue;
993 }
994 if (crypto_drivers[hid].cc_kprocess == NULL)
995 continue;
996 for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
997 if ((crypto_drivers[hid].cc_kalg[kalg] &
998 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
999 feat |= 1 << kalg;
1000 }
1001 out:
1002 splx(s);
1003 *featp = feat;
1004 return (0);
1005 }
1006
1007 /*
1008 * Software interrupt thread to dispatch crypto requests.
1009 */
1010 static void
1011 cryptointr(void)
1012 {
1013 struct cryptop *crp, *submit;
1014 struct cryptkop *krp;
1015 struct cryptocap *cap;
1016 int result, hint, s;
1017
1018 cryptostats.cs_intrs++;
1019 s = splcrypto();
1020 do {
1021 /*
1022 * Find the first element in the queue that can be
1023 * processed and look-ahead to see if multiple ops
1024 * are ready for the same driver.
1025 */
1026 submit = NULL;
1027 hint = 0;
1028 TAILQ_FOREACH(crp, &crp_q, crp_next) {
1029 u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid);
1030 cap = crypto_checkdriver(hid);
1031 if (cap == NULL || cap->cc_process == NULL) {
1032 /* Op needs to be migrated, process it. */
1033 if (submit == NULL)
1034 submit = crp;
1035 break;
1036 }
1037 if (!cap->cc_qblocked) {
1038 if (submit != NULL) {
1039 /*
1040 * We stop on finding another op,
1041 * regardless whether its for the same
1042 * driver or not. We could keep
1043 * searching the queue but it might be
1044 * better to just use a per-driver
1045 * queue instead.
1046 */
1047 if (CRYPTO_SESID2HID(submit->crp_sid) == hid)
1048 hint = CRYPTO_HINT_MORE;
1049 break;
1050 } else {
1051 submit = crp;
1052 if ((submit->crp_flags & CRYPTO_F_BATCH) == 0)
1053 break;
1054 /* keep scanning for more are q'd */
1055 }
1056 }
1057 }
1058 if (submit != NULL) {
1059 TAILQ_REMOVE(&crp_q, submit, crp_next);
1060 result = crypto_invoke(submit, hint);
1061 if (result == ERESTART) {
1062 /*
1063 * The driver ran out of resources, mark the
1064 * driver ``blocked'' for cryptop's and put
1065 * the request back in the queue. It would
1066 * best to put the request back where we got
1067 * it but that's hard so for now we put it
1068 * at the front. This should be ok; putting
1069 * it at the end does not work.
1070 */
1071 /* XXX validate sid again? */
1072 crypto_drivers[CRYPTO_SESID2HID(submit->crp_sid)].cc_qblocked = 1;
1073 TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
1074 cryptostats.cs_blocks++;
1075 }
1076 }
1077
1078 /* As above, but for key ops */
1079 TAILQ_FOREACH(krp, &crp_kq, krp_next) {
1080 cap = crypto_checkdriver(krp->krp_hid);
1081 if (cap == NULL || cap->cc_kprocess == NULL) {
1082 /* Op needs to be migrated, process it. */
1083 break;
1084 }
1085 if (!cap->cc_kqblocked)
1086 break;
1087 }
1088 if (krp != NULL) {
1089 TAILQ_REMOVE(&crp_kq, krp, krp_next);
1090 result = crypto_kinvoke(krp, 0);
1091 if (result == ERESTART) {
1092 /*
1093 * The driver ran out of resources, mark the
1094 * driver ``blocked'' for cryptkop's and put
1095 * the request back in the queue. It would
1096 * best to put the request back where we got
1097 * it but that's hard so for now we put it
1098 * at the front. This should be ok; putting
1099 * it at the end does not work.
1100 */
1101 /* XXX validate sid again? */
1102 crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
1103 TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
1104 cryptostats.cs_kblocks++;
1105 }
1106 }
1107 } while (submit != NULL || krp != NULL);
1108 splx(s);
1109 }
1110
1111 /*
1112 * Kernel thread to do callbacks.
1113 */
1114 static void
1115 cryptoret(void)
1116 {
1117 struct cryptop *crp;
1118 struct cryptkop *krp;
1119 int s;
1120
1121 s = splcrypto();
1122 for (;;) {
1123 crp = TAILQ_FIRST(&crp_ret_q);
1124 if (crp != NULL)
1125 TAILQ_REMOVE(&crp_ret_q, crp, crp_next);
1126 krp = TAILQ_FIRST(&crp_ret_kq);
1127 if (krp != NULL)
1128 TAILQ_REMOVE(&crp_ret_kq, krp, krp_next);
1129
1130 if (crp != NULL || krp != NULL) {
1131 splx(s); /* lower ipl for callbacks */
1132 if (crp != NULL) {
1133 #ifdef CRYPTO_TIMING
1134 if (crypto_timing) {
1135 /*
1136 * NB: We must copy the timestamp before
1137 * doing the callback as the cryptop is
1138 * likely to be reclaimed.
1139 */
1140 struct timespec t = crp->crp_tstamp;
1141 crypto_tstat(&cryptostats.cs_cb, &t);
1142 crp->crp_callback(crp);
1143 crypto_tstat(&cryptostats.cs_finis, &t);
1144 } else
1145 #endif
1146 crp->crp_callback(crp);
1147 }
1148 if (krp != NULL)
1149 krp->krp_callback(krp);
1150 s = splcrypto();
1151 } else {
1152 (void) tsleep(&crp_ret_q, PLOCK, "crypto_wait", 0);
1153 cryptostats.cs_rets++;
1154 }
1155 }
1156 }
Cache object: b4abfc7f05afb4206a409445067f5490
|