1 /* $NetBSD: crypto.c,v 1.131 2022/06/26 22:52:30 riastradh Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/crypto.c,v 1.4.2.5 2003/02/26 00:14:05 sam Exp $ */
3 /* $OpenBSD: crypto.c,v 1.41 2002/07/17 23:52:38 art Exp $ */
4
5 /*-
6 * Copyright (c) 2008 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Coyote Point Systems, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
36 *
37 * This code was written by Angelos D. Keromytis in Athens, Greece, in
38 * February 2000. Network Security Technologies Inc. (NSTI) kindly
39 * supported the development of this code.
40 *
41 * Copyright (c) 2000, 2001 Angelos D. Keromytis
42 *
43 * Permission to use, copy, and modify this software with or without fee
44 * is hereby granted, provided that this entire notice is included in
45 * all source code copies of any software which is or includes a copy or
46 * modification of this software.
47 *
48 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
49 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
50 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
51 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
52 * PURPOSE.
53 */
54
55 #include <sys/cdefs.h>
56 __KERNEL_RCSID(0, "$NetBSD: crypto.c,v 1.131 2022/06/26 22:52:30 riastradh Exp $");
57
58 #include <sys/param.h>
59 #include <sys/reboot.h>
60 #include <sys/systm.h>
61 #include <sys/proc.h>
62 #include <sys/pool.h>
63 #include <sys/kthread.h>
64 #include <sys/once.h>
65 #include <sys/sysctl.h>
66 #include <sys/intr.h>
67 #include <sys/errno.h>
68 #include <sys/module.h>
69 #include <sys/xcall.h>
70 #include <sys/device.h>
71 #include <sys/cpu.h>
72 #include <sys/percpu.h>
73 #include <sys/kmem.h>
74
75 #if defined(_KERNEL_OPT)
76 #include "opt_ocf.h"
77 #endif
78
79 #include <opencrypto/cryptodev.h>
80 #include <opencrypto/xform.h> /* XXX for M_XDATA */
81
82 /*
83 * Crypto drivers register themselves by allocating a slot in the
84 * crypto_drivers table with crypto_get_driverid() and then registering
85 * each algorithm they support with crypto_register() and crypto_kregister().
86 */
87 /* Don't directly access crypto_drivers[i], use crypto_checkdriver(i). */
88 static struct {
89 kmutex_t mtx;
90 int num;
91 struct cryptocap *list;
92 } crypto_drv __cacheline_aligned;
93 #define crypto_drv_mtx (crypto_drv.mtx)
94 #define crypto_drivers_num (crypto_drv.num)
95 #define crypto_drivers (crypto_drv.list)
96
97 static void *crypto_q_si;
98 static void *crypto_ret_si;
99
100 /*
101 * There are two queues for crypto requests; one for symmetric (e.g.
102 * cipher) operations and one for asymmetric (e.g. MOD) operations.
103 * See below for how synchronization is handled.
104 */
105 TAILQ_HEAD(crypto_crp_q, cryptop);
106 TAILQ_HEAD(crypto_crp_kq, cryptkop);
107 struct crypto_crp_qs {
108 struct crypto_crp_q *crp_q;
109 struct crypto_crp_kq *crp_kq;
110 };
111 static percpu_t *crypto_crp_qs_percpu;
112
113 static inline struct crypto_crp_qs *
114 crypto_get_crp_qs(int *s)
115 {
116
117 KASSERT(s != NULL);
118
119 *s = splsoftnet();
120 return percpu_getref(crypto_crp_qs_percpu);
121 }
122
123 static inline void
124 crypto_put_crp_qs(int *s)
125 {
126
127 KASSERT(s != NULL);
128
129 percpu_putref(crypto_crp_qs_percpu);
130 splx(*s);
131 }
132
133 static void
134 crypto_crp_q_is_busy_pc(void *p, void *arg, struct cpu_info *ci __unused)
135 {
136 struct crypto_crp_qs *qs_pc = p;
137 bool *isempty = arg;
138
139 if (!TAILQ_EMPTY(qs_pc->crp_q) || !TAILQ_EMPTY(qs_pc->crp_kq))
140 *isempty = true;
141 }
142
143 static void
144 crypto_crp_qs_init_pc(void *p, void *arg __unused, struct cpu_info *ci __unused)
145 {
146 struct crypto_crp_qs *qs = p;
147
148 qs->crp_q = kmem_alloc(sizeof(struct crypto_crp_q), KM_SLEEP);
149 qs->crp_kq = kmem_alloc(sizeof(struct crypto_crp_kq), KM_SLEEP);
150
151 TAILQ_INIT(qs->crp_q);
152 TAILQ_INIT(qs->crp_kq);
153 }
154
155 /*
156 * There are two queues for processing completed crypto requests; one
157 * for the symmetric and one for the asymmetric ops. We only need one
158 * but have two to avoid type futzing (cryptop vs. cryptkop). See below
159 * for how synchronization is handled.
160 */
161 TAILQ_HEAD(crypto_crp_ret_q, cryptop);
162 TAILQ_HEAD(crypto_crp_ret_kq, cryptkop);
163 struct crypto_crp_ret_qs {
164 kmutex_t crp_ret_q_mtx;
165 bool crp_ret_q_exit_flag;
166
167 struct crypto_crp_ret_q crp_ret_q;
168 int crp_ret_q_len;
169 int crp_ret_q_maxlen; /* queue length limit. <=0 means unlimited. */
170 int crp_ret_q_drops;
171
172 struct crypto_crp_ret_kq crp_ret_kq;
173 int crp_ret_kq_len;
174 int crp_ret_kq_maxlen; /* queue length limit. <=0 means unlimited. */
175 int crp_ret_kq_drops;
176 };
177 struct crypto_crp_ret_qs **crypto_crp_ret_qs_list;
178
179
180 static inline struct crypto_crp_ret_qs *
181 crypto_get_crp_ret_qs(struct cpu_info *ci)
182 {
183 u_int cpuid;
184 struct crypto_crp_ret_qs *qs;
185
186 KASSERT(ci != NULL);
187
188 cpuid = cpu_index(ci);
189 qs = crypto_crp_ret_qs_list[cpuid];
190 mutex_enter(&qs->crp_ret_q_mtx);
191 return qs;
192 }
193
194 static inline void
195 crypto_put_crp_ret_qs(struct cpu_info *ci)
196 {
197 u_int cpuid;
198 struct crypto_crp_ret_qs *qs;
199
200 KASSERT(ci != NULL);
201
202 cpuid = cpu_index(ci);
203 qs = crypto_crp_ret_qs_list[cpuid];
204 mutex_exit(&qs->crp_ret_q_mtx);
205 }
206
207 #ifndef CRYPTO_RET_Q_MAXLEN
208 #define CRYPTO_RET_Q_MAXLEN 0
209 #endif
210 #ifndef CRYPTO_RET_KQ_MAXLEN
211 #define CRYPTO_RET_KQ_MAXLEN 0
212 #endif
213
214 static int
215 sysctl_opencrypto_q_len(SYSCTLFN_ARGS)
216 {
217 int error, len = 0;
218 struct sysctlnode node = *rnode;
219
220 for (int i = 0; i < ncpu; i++) {
221 struct crypto_crp_ret_qs *qs;
222 struct cpu_info *ci = cpu_lookup(i);
223
224 qs = crypto_get_crp_ret_qs(ci);
225 len += qs->crp_ret_q_len;
226 crypto_put_crp_ret_qs(ci);
227 }
228
229 node.sysctl_data = &len;
230 error = sysctl_lookup(SYSCTLFN_CALL(&node));
231 if (error || newp == NULL)
232 return error;
233
234 return 0;
235 }
236
237 static int
238 sysctl_opencrypto_q_drops(SYSCTLFN_ARGS)
239 {
240 int error, drops = 0;
241 struct sysctlnode node = *rnode;
242
243 for (int i = 0; i < ncpu; i++) {
244 struct crypto_crp_ret_qs *qs;
245 struct cpu_info *ci = cpu_lookup(i);
246
247 qs = crypto_get_crp_ret_qs(ci);
248 drops += qs->crp_ret_q_drops;
249 crypto_put_crp_ret_qs(ci);
250 }
251
252 node.sysctl_data = &drops;
253 error = sysctl_lookup(SYSCTLFN_CALL(&node));
254 if (error || newp == NULL)
255 return error;
256
257 return 0;
258 }
259
260 static int
261 sysctl_opencrypto_q_maxlen(SYSCTLFN_ARGS)
262 {
263 int error, maxlen;
264 struct crypto_crp_ret_qs *qs;
265 struct sysctlnode node = *rnode;
266
267 /* each crp_ret_kq_maxlen is the same. */
268 qs = crypto_get_crp_ret_qs(curcpu());
269 maxlen = qs->crp_ret_q_maxlen;
270 crypto_put_crp_ret_qs(curcpu());
271
272 node.sysctl_data = &maxlen;
273 error = sysctl_lookup(SYSCTLFN_CALL(&node));
274 if (error || newp == NULL)
275 return error;
276
277 for (int i = 0; i < ncpu; i++) {
278 struct cpu_info *ci = cpu_lookup(i);
279
280 qs = crypto_get_crp_ret_qs(ci);
281 qs->crp_ret_q_maxlen = maxlen;
282 crypto_put_crp_ret_qs(ci);
283 }
284
285 return 0;
286 }
287
288 static int
289 sysctl_opencrypto_kq_len(SYSCTLFN_ARGS)
290 {
291 int error, len = 0;
292 struct sysctlnode node = *rnode;
293
294 for (int i = 0; i < ncpu; i++) {
295 struct crypto_crp_ret_qs *qs;
296 struct cpu_info *ci = cpu_lookup(i);
297
298 qs = crypto_get_crp_ret_qs(ci);
299 len += qs->crp_ret_kq_len;
300 crypto_put_crp_ret_qs(ci);
301 }
302
303 node.sysctl_data = &len;
304 error = sysctl_lookup(SYSCTLFN_CALL(&node));
305 if (error || newp == NULL)
306 return error;
307
308 return 0;
309 }
310
311 static int
312 sysctl_opencrypto_kq_drops(SYSCTLFN_ARGS)
313 {
314 int error, drops = 0;
315 struct sysctlnode node = *rnode;
316
317 for (int i = 0; i < ncpu; i++) {
318 struct crypto_crp_ret_qs *qs;
319 struct cpu_info *ci = cpu_lookup(i);
320
321 qs = crypto_get_crp_ret_qs(ci);
322 drops += qs->crp_ret_kq_drops;
323 crypto_put_crp_ret_qs(ci);
324 }
325
326 node.sysctl_data = &drops;
327 error = sysctl_lookup(SYSCTLFN_CALL(&node));
328 if (error || newp == NULL)
329 return error;
330
331 return 0;
332 }
333
334 static int
335 sysctl_opencrypto_kq_maxlen(SYSCTLFN_ARGS)
336 {
337 int error, maxlen;
338 struct crypto_crp_ret_qs *qs;
339 struct sysctlnode node = *rnode;
340
341 /* each crp_ret_kq_maxlen is the same. */
342 qs = crypto_get_crp_ret_qs(curcpu());
343 maxlen = qs->crp_ret_kq_maxlen;
344 crypto_put_crp_ret_qs(curcpu());
345
346 node.sysctl_data = &maxlen;
347 error = sysctl_lookup(SYSCTLFN_CALL(&node));
348 if (error || newp == NULL)
349 return error;
350
351 for (int i = 0; i < ncpu; i++) {
352 struct cpu_info *ci = cpu_lookup(i);
353
354 qs = crypto_get_crp_ret_qs(ci);
355 qs->crp_ret_kq_maxlen = maxlen;
356 crypto_put_crp_ret_qs(ci);
357 }
358
359 return 0;
360 }
361
362 /*
363 * Crypto op and descriptor data structures are allocated
364 * from separate private zones(FreeBSD)/pools(netBSD/OpenBSD) .
365 */
366 static pool_cache_t cryptop_cache;
367 static pool_cache_t cryptodesc_cache;
368 static pool_cache_t cryptkop_cache;
369
370 int crypto_usercrypto = 1; /* userland may open /dev/crypto */
371 int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
372 /*
373 * cryptodevallowsoft is (intended to be) sysctl'able, controlling
374 * access to hardware versus software transforms as below:
375 *
376 * crypto_devallowsoft < 0: Force userlevel requests to use software
377 * transforms, always
378 * crypto_devallowsoft = 0: Use hardware if present, grant userlevel
379 * requests for non-accelerated transforms
380 * (handling the latter in software)
381 * crypto_devallowsoft > 0: Allow user requests only for transforms which
382 * are hardware-accelerated.
383 */
384 int crypto_devallowsoft = 1; /* only use hardware crypto */
385
386 static void
387 sysctl_opencrypto_setup(struct sysctllog **clog)
388 {
389 const struct sysctlnode *ocnode;
390 const struct sysctlnode *retqnode, *retkqnode;
391
392 sysctl_createv(clog, 0, NULL, NULL,
393 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
394 CTLTYPE_INT, "usercrypto",
395 SYSCTL_DESCR("Enable/disable user-mode access to "
396 "crypto support"),
397 NULL, 0, &crypto_usercrypto, 0,
398 CTL_KERN, CTL_CREATE, CTL_EOL);
399 sysctl_createv(clog, 0, NULL, NULL,
400 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
401 CTLTYPE_INT, "userasymcrypto",
402 SYSCTL_DESCR("Enable/disable user-mode access to "
403 "asymmetric crypto support"),
404 NULL, 0, &crypto_userasymcrypto, 0,
405 CTL_KERN, CTL_CREATE, CTL_EOL);
406 sysctl_createv(clog, 0, NULL, NULL,
407 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
408 CTLTYPE_INT, "cryptodevallowsoft",
409 SYSCTL_DESCR("Enable/disable use of software "
410 "asymmetric crypto support"),
411 NULL, 0, &crypto_devallowsoft, 0,
412 CTL_KERN, CTL_CREATE, CTL_EOL);
413
414 sysctl_createv(clog, 0, NULL, &ocnode,
415 CTLFLAG_PERMANENT,
416 CTLTYPE_NODE, "opencrypto",
417 SYSCTL_DESCR("opencrypto related entries"),
418 NULL, 0, NULL, 0,
419 CTL_CREATE, CTL_EOL);
420
421 sysctl_createv(clog, 0, &ocnode, &retqnode,
422 CTLFLAG_PERMANENT,
423 CTLTYPE_NODE, "crypto_ret_q",
424 SYSCTL_DESCR("crypto_ret_q related entries"),
425 NULL, 0, NULL, 0,
426 CTL_CREATE, CTL_EOL);
427 sysctl_createv(clog, 0, &retqnode, NULL,
428 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
429 CTLTYPE_INT, "len",
430 SYSCTL_DESCR("Current queue length"),
431 sysctl_opencrypto_q_len, 0,
432 NULL, 0,
433 CTL_CREATE, CTL_EOL);
434 sysctl_createv(clog, 0, &retqnode, NULL,
435 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
436 CTLTYPE_INT, "drops",
437 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
438 sysctl_opencrypto_q_drops, 0,
439 NULL, 0,
440 CTL_CREATE, CTL_EOL);
441 sysctl_createv(clog, 0, &retqnode, NULL,
442 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
443 CTLTYPE_INT, "maxlen",
444 SYSCTL_DESCR("Maximum allowed queue length"),
445 sysctl_opencrypto_q_maxlen, 0,
446 NULL, 0,
447 CTL_CREATE, CTL_EOL);
448
449
450 sysctl_createv(clog, 0, &ocnode, &retkqnode,
451 CTLFLAG_PERMANENT,
452 CTLTYPE_NODE, "crypto_ret_kq",
453 SYSCTL_DESCR("crypto_ret_kq related entries"),
454 NULL, 0, NULL, 0,
455 CTL_CREATE, CTL_EOL);
456 sysctl_createv(clog, 0, &retkqnode, NULL,
457 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
458 CTLTYPE_INT, "len",
459 SYSCTL_DESCR("Current queue length"),
460 sysctl_opencrypto_kq_len, 0,
461 NULL, 0,
462 CTL_CREATE, CTL_EOL);
463 sysctl_createv(clog, 0, &retkqnode, NULL,
464 CTLFLAG_PERMANENT|CTLFLAG_READONLY,
465 CTLTYPE_INT, "drops",
466 SYSCTL_DESCR("Crypto requests dropped due to full ret queue"),
467 sysctl_opencrypto_kq_drops, 0,
468 NULL, 0,
469 CTL_CREATE, CTL_EOL);
470 sysctl_createv(clog, 0, &retkqnode, NULL,
471 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
472 CTLTYPE_INT, "maxlen",
473 SYSCTL_DESCR("Maximum allowed queue length"),
474 sysctl_opencrypto_kq_maxlen, 0,
475 NULL, 0,
476 CTL_CREATE, CTL_EOL);
477 }
478
479 /*
480 * Synchronization: read carefully, this is non-trivial.
481 *
482 * Crypto requests are submitted via crypto_dispatch. Typically
483 * these come in from network protocols at spl0 (output path) or
484 * spl[,soft]net (input path).
485 *
486 * Requests are typically passed on the driver directly, but they
487 * may also be queued for processing by a software interrupt thread,
488 * cryptointr, that runs at splsoftcrypto. This thread dispatches
489 * the requests to crypto drivers (h/w or s/w) who call crypto_done
490 * when a request is complete. Hardware crypto drivers are assumed
491 * to register their IRQ's as network devices so their interrupt handlers
492 * and subsequent "done callbacks" happen at spl[imp,net].
493 *
494 * Completed crypto ops are queued for a separate kernel thread that
495 * handles the callbacks at spl0. This decoupling insures the crypto
496 * driver interrupt service routine is not delayed while the callback
497 * takes place and that callbacks are delivered after a context switch
498 * (as opposed to a software interrupt that clients must block).
499 *
500 * This scheme is not intended for SMP machines.
501 */
502 static void cryptointr(void *); /* swi thread to dispatch ops */
503 static void cryptoret_softint(void *); /* kernel thread for callbacks*/
504 static int crypto_destroy(bool);
505 static int crypto_invoke(struct cryptop *crp, int hint);
506 static int crypto_kinvoke(struct cryptkop *krp, int hint);
507
508 static struct cryptocap *crypto_checkdriver_lock(u_int32_t);
509 static struct cryptocap *crypto_checkdriver_uninit(u_int32_t);
510 static struct cryptocap *crypto_checkdriver(u_int32_t);
511 static void crypto_driver_lock(struct cryptocap *);
512 static void crypto_driver_unlock(struct cryptocap *);
513 static void crypto_driver_clear(struct cryptocap *);
514
515 static int crypto_init_finalize(device_t);
516
517 static struct cryptostats cryptostats;
518 #ifdef CRYPTO_TIMING
519 static int crypto_timing = 0;
520 #endif
521
522 static struct sysctllog *sysctl_opencrypto_clog;
523
524 static void
525 crypto_crp_ret_qs_init(void)
526 {
527 int i;
528
529 crypto_crp_ret_qs_list = kmem_alloc(sizeof(struct crypto_crp_ret_qs *) * ncpu,
530 KM_SLEEP);
531
532 for (i = 0; i < ncpu; i++) {
533 struct crypto_crp_ret_qs *qs;
534
535 qs = kmem_alloc(sizeof(struct crypto_crp_ret_qs), KM_SLEEP);
536 mutex_init(&qs->crp_ret_q_mtx, MUTEX_DEFAULT, IPL_NET);
537 qs->crp_ret_q_exit_flag = false;
538
539 TAILQ_INIT(&qs->crp_ret_q);
540 qs->crp_ret_q_len = 0;
541 qs->crp_ret_q_maxlen = CRYPTO_RET_Q_MAXLEN;
542 qs->crp_ret_q_drops = 0;
543
544 TAILQ_INIT(&qs->crp_ret_kq);
545 qs->crp_ret_kq_len = 0;
546 qs->crp_ret_kq_maxlen = CRYPTO_RET_KQ_MAXLEN;
547 qs->crp_ret_kq_drops = 0;
548
549 crypto_crp_ret_qs_list[i] = qs;
550 }
551 }
552
553 static int
554 crypto_init0(void)
555 {
556
557 mutex_init(&crypto_drv_mtx, MUTEX_DEFAULT, IPL_NONE);
558 cryptop_cache = pool_cache_init(sizeof(struct cryptop),
559 coherency_unit, 0, 0, "cryptop", NULL, IPL_NET, NULL, NULL, NULL);
560 cryptodesc_cache = pool_cache_init(sizeof(struct cryptodesc),
561 coherency_unit, 0, 0, "cryptdesc", NULL, IPL_NET, NULL, NULL, NULL);
562 cryptkop_cache = pool_cache_init(sizeof(struct cryptkop),
563 coherency_unit, 0, 0, "cryptkop", NULL, IPL_NET, NULL, NULL, NULL);
564
565 crypto_crp_qs_percpu = percpu_create(sizeof(struct crypto_crp_qs),
566 crypto_crp_qs_init_pc, /*XXX*/NULL, NULL);
567
568 crypto_crp_ret_qs_init();
569
570 crypto_drivers = kmem_zalloc(CRYPTO_DRIVERS_INITIAL *
571 sizeof(struct cryptocap), KM_SLEEP);
572 crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
573
574 crypto_q_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE, cryptointr, NULL);
575 if (crypto_q_si == NULL) {
576 printf("crypto_init: cannot establish request queue handler\n");
577 return crypto_destroy(false);
578 }
579
580 /*
581 * Some encryption devices (such as mvcesa) are attached before
582 * ipi_sysinit(). That causes an assertion in ipi_register() as
583 * crypto_ret_si softint uses SOFTINT_RCPU.
584 */
585 if (config_finalize_register(NULL, crypto_init_finalize) != 0) {
586 printf("crypto_init: cannot register crypto_init_finalize\n");
587 return crypto_destroy(false);
588 }
589
590 sysctl_opencrypto_setup(&sysctl_opencrypto_clog);
591
592 return 0;
593 }
594
595 static int
596 crypto_init_finalize(device_t self __unused)
597 {
598
599 crypto_ret_si = softint_establish(SOFTINT_NET|SOFTINT_MPSAFE|SOFTINT_RCPU,
600 &cryptoret_softint, NULL);
601 KASSERT(crypto_ret_si != NULL);
602
603 return 0;
604 }
605
606 int
607 crypto_init(void)
608 {
609 static ONCE_DECL(crypto_init_once);
610
611 return RUN_ONCE(&crypto_init_once, crypto_init0);
612 }
613
614 static int
615 crypto_destroy(bool exit_kthread)
616 {
617 int i;
618
619 if (exit_kthread) {
620 struct cryptocap *cap = NULL;
621 bool is_busy = false;
622
623 /* if we have any in-progress requests, don't unload */
624 percpu_foreach(crypto_crp_qs_percpu, crypto_crp_q_is_busy_pc,
625 &is_busy);
626 if (is_busy)
627 return EBUSY;
628 /* FIXME:
629 * prohibit enqueue to crp_q and crp_kq after here.
630 */
631
632 mutex_enter(&crypto_drv_mtx);
633 for (i = 0; i < crypto_drivers_num; i++) {
634 cap = crypto_checkdriver(i);
635 if (cap == NULL)
636 continue;
637 if (cap->cc_sessions != 0) {
638 mutex_exit(&crypto_drv_mtx);
639 return EBUSY;
640 }
641 }
642 mutex_exit(&crypto_drv_mtx);
643 /* FIXME:
644 * prohibit touch crypto_drivers[] and each element after here.
645 */
646
647 /* Ensure cryptoret_softint() is never scheduled again. */
648 for (i = 0; i < ncpu; i++) {
649 struct crypto_crp_ret_qs *qs;
650 struct cpu_info *ci = cpu_lookup(i);
651
652 qs = crypto_get_crp_ret_qs(ci);
653 qs->crp_ret_q_exit_flag = true;
654 crypto_put_crp_ret_qs(ci);
655 }
656 }
657
658 if (sysctl_opencrypto_clog != NULL)
659 sysctl_teardown(&sysctl_opencrypto_clog);
660
661 if (crypto_ret_si != NULL)
662 softint_disestablish(crypto_ret_si);
663
664 if (crypto_q_si != NULL)
665 softint_disestablish(crypto_q_si);
666
667 mutex_enter(&crypto_drv_mtx);
668 if (crypto_drivers != NULL)
669 kmem_free(crypto_drivers,
670 crypto_drivers_num * sizeof(struct cryptocap));
671 mutex_exit(&crypto_drv_mtx);
672
673 percpu_free(crypto_crp_qs_percpu, sizeof(struct crypto_crp_qs));
674
675 pool_cache_destroy(cryptop_cache);
676 pool_cache_destroy(cryptodesc_cache);
677 pool_cache_destroy(cryptkop_cache);
678
679 mutex_destroy(&crypto_drv_mtx);
680
681 return 0;
682 }
683
684 static bool
685 crypto_driver_suitable(struct cryptocap *cap, struct cryptoini *cri)
686 {
687 struct cryptoini *cr;
688
689 for (cr = cri; cr; cr = cr->cri_next)
690 if (cap->cc_alg[cr->cri_alg] == 0) {
691 DPRINTF("alg %d not supported\n", cr->cri_alg);
692 return false;
693 }
694
695 return true;
696 }
697
698 #define CRYPTO_ACCEPT_HARDWARE 0x1
699 #define CRYPTO_ACCEPT_SOFTWARE 0x2
700 /*
701 * The algorithm we use here is pretty stupid; just use the
702 * first driver that supports all the algorithms we need.
703 * If there are multiple drivers we choose the driver with
704 * the fewest active sessions. We prefer hardware-backed
705 * drivers to software ones.
706 *
707 * XXX We need more smarts here (in real life too, but that's
708 * XXX another story altogether).
709 */
710 static struct cryptocap *
711 crypto_select_driver_lock(struct cryptoini *cri, int hard)
712 {
713 u_int32_t hid;
714 int accept;
715 struct cryptocap *cap, *best;
716 int error = 0;
717
718 best = NULL;
719 /*
720 * hard == 0 can use both hardware and software drivers.
721 * We use hardware drivers prior to software drivers, so search
722 * hardware drivers at first time.
723 */
724 if (hard >= 0)
725 accept = CRYPTO_ACCEPT_HARDWARE;
726 else
727 accept = CRYPTO_ACCEPT_SOFTWARE;
728 again:
729 for (hid = 0; hid < crypto_drivers_num; hid++) {
730 cap = crypto_checkdriver(hid);
731 if (cap == NULL)
732 continue;
733
734 crypto_driver_lock(cap);
735
736 /*
737 * If it's not initialized or has remaining sessions
738 * referencing it, skip.
739 */
740 if (cap->cc_newsession == NULL ||
741 (cap->cc_flags & CRYPTOCAP_F_CLEANUP)) {
742 crypto_driver_unlock(cap);
743 continue;
744 }
745
746 /* Hardware required -- ignore software drivers. */
747 if ((accept & CRYPTO_ACCEPT_SOFTWARE) == 0
748 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE)) {
749 crypto_driver_unlock(cap);
750 continue;
751 }
752 /* Software required -- ignore hardware drivers. */
753 if ((accept & CRYPTO_ACCEPT_HARDWARE) == 0
754 && (cap->cc_flags & CRYPTOCAP_F_SOFTWARE) == 0) {
755 crypto_driver_unlock(cap);
756 continue;
757 }
758
759 /* See if all the algorithms are supported. */
760 if (crypto_driver_suitable(cap, cri)) {
761 if (best == NULL) {
762 /* keep holding crypto_driver_lock(cap) */
763 best = cap;
764 continue;
765 } else if (cap->cc_sessions < best->cc_sessions) {
766 crypto_driver_unlock(best);
767 /* keep holding crypto_driver_lock(cap) */
768 best = cap;
769 continue;
770 }
771 }
772
773 crypto_driver_unlock(cap);
774 }
775 if (best == NULL && hard == 0
776 && (accept & CRYPTO_ACCEPT_SOFTWARE) == 0) {
777 accept = CRYPTO_ACCEPT_SOFTWARE;
778 goto again;
779 }
780
781 if (best == NULL && hard == 0 && error == 0) {
782 mutex_exit(&crypto_drv_mtx);
783 error = module_autoload("swcrypto", MODULE_CLASS_DRIVER);
784 mutex_enter(&crypto_drv_mtx);
785 if (error == 0) {
786 error = EINVAL;
787 goto again;
788 }
789 }
790
791 return best;
792 }
793
794 /*
795 * Create a new session.
796 */
797 int
798 crypto_newsession(u_int64_t *sid, struct cryptoini *cri, int hard)
799 {
800 struct cryptocap *cap;
801 int err = EINVAL;
802
803 /*
804 * On failure, leave *sid initialized to a sentinel value that
805 * crypto_freesession will ignore. This is the same as what
806 * you get from zero-initialized memory -- some callers (I'm
807 * looking at you, netipsec!) have paths that lead from
808 * zero-initialized memory into crypto_freesession without any
809 * crypto_newsession.
810 */
811 *sid = 0;
812
813 mutex_enter(&crypto_drv_mtx);
814
815 cap = crypto_select_driver_lock(cri, hard);
816 if (cap != NULL) {
817 u_int32_t hid, lid;
818
819 hid = cap - crypto_drivers;
820 KASSERT(hid < 0xffffff);
821 /*
822 * Can't do everything in one session.
823 *
824 * XXX Fix this. We need to inject a "virtual" session layer right
825 * XXX about here.
826 */
827
828 /* Call the driver initialization routine. */
829 lid = hid; /* Pass the driver ID. */
830 crypto_driver_unlock(cap);
831 err = cap->cc_newsession(cap->cc_arg, &lid, cri);
832 crypto_driver_lock(cap);
833 if (err == 0) {
834 (*sid) = hid + 1;
835 (*sid) <<= 32;
836 (*sid) |= (lid & 0xffffffff);
837 KASSERT(*sid != 0);
838 cap->cc_sessions++;
839 } else {
840 DPRINTF("crypto_drivers[%d].cc_newsession() failed. error=%d\n",
841 hid, err);
842 }
843 crypto_driver_unlock(cap);
844 }
845
846 mutex_exit(&crypto_drv_mtx);
847
848 return err;
849 }
850
851 /*
852 * Delete an existing session (or a reserved session on an unregistered
853 * driver).
854 */
855 void
856 crypto_freesession(u_int64_t sid)
857 {
858 struct cryptocap *cap;
859
860 /*
861 * crypto_newsession never returns 0 as a sid (by virtue of
862 * never returning 0 as a hid, which is part of the sid).
863 * However, some callers assume that freeing zero is safe.
864 * Previously this relied on all drivers to agree that freeing
865 * invalid sids is a no-op, but that's a terrible API contract
866 * that we're getting rid of.
867 */
868 if (sid == 0)
869 return;
870
871 /* Determine two IDs. */
872 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(sid));
873 KASSERTMSG(cap != NULL, "sid=%"PRIx64, sid);
874
875 KASSERT(cap->cc_sessions > 0);
876 cap->cc_sessions--;
877
878 /* Call the driver cleanup routine, if available. */
879 if (cap->cc_freesession)
880 cap->cc_freesession(cap->cc_arg, sid);
881
882 /*
883 * If this was the last session of a driver marked as invalid,
884 * make the entry available for reuse.
885 */
886 if ((cap->cc_flags & CRYPTOCAP_F_CLEANUP) && cap->cc_sessions == 0)
887 crypto_driver_clear(cap);
888
889 crypto_driver_unlock(cap);
890 }
891
892 static bool
893 crypto_checkdriver_initialized(const struct cryptocap *cap)
894 {
895
896 return cap->cc_process != NULL ||
897 (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0 ||
898 cap->cc_sessions != 0;
899 }
900
901 /*
902 * Return an unused driver id. Used by drivers prior to registering
903 * support for the algorithms they handle.
904 */
905 int32_t
906 crypto_get_driverid(u_int32_t flags)
907 {
908 struct cryptocap *newdrv;
909 struct cryptocap *cap = NULL;
910 int i;
911
912 (void)crypto_init(); /* XXX oh, this is foul! */
913
914 mutex_enter(&crypto_drv_mtx);
915 for (i = 0; i < crypto_drivers_num; i++) {
916 cap = crypto_checkdriver_uninit(i);
917 if (cap == NULL || crypto_checkdriver_initialized(cap))
918 continue;
919 break;
920 }
921
922 /* Out of entries, allocate some more. */
923 if (cap == NULL) {
924 /* Be careful about wrap-around. */
925 if (2 * crypto_drivers_num <= crypto_drivers_num) {
926 mutex_exit(&crypto_drv_mtx);
927 printf("crypto: driver count wraparound!\n");
928 return -1;
929 }
930
931 newdrv = kmem_zalloc(2 * crypto_drivers_num *
932 sizeof(struct cryptocap), KM_SLEEP);
933 memcpy(newdrv, crypto_drivers,
934 crypto_drivers_num * sizeof(struct cryptocap));
935 kmem_free(crypto_drivers,
936 crypto_drivers_num * sizeof(struct cryptocap));
937
938 crypto_drivers_num *= 2;
939 crypto_drivers = newdrv;
940
941 cap = crypto_checkdriver_uninit(i);
942 KASSERT(cap != NULL);
943 }
944
945 /* NB: state is zero'd on free */
946 cap->cc_sessions = 1; /* Mark */
947 cap->cc_flags = flags;
948 mutex_init(&cap->cc_lock, MUTEX_DEFAULT, IPL_NET);
949
950 if (bootverbose)
951 printf("crypto: assign driver %u, flags %u\n", i, flags);
952
953 mutex_exit(&crypto_drv_mtx);
954
955 return i;
956 }
957
958 static struct cryptocap *
959 crypto_checkdriver_lock(u_int32_t hid)
960 {
961 struct cryptocap *cap;
962
963 KASSERT(crypto_drivers != NULL);
964
965 if (hid >= crypto_drivers_num)
966 return NULL;
967
968 cap = &crypto_drivers[hid];
969 mutex_enter(&cap->cc_lock);
970 return cap;
971 }
972
973 /*
974 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two
975 * situations
976 * - crypto_drivers[] may not be allocated
977 * - crypto_drivers[hid] may not be initialized
978 */
979 static struct cryptocap *
980 crypto_checkdriver_uninit(u_int32_t hid)
981 {
982
983 KASSERT(mutex_owned(&crypto_drv_mtx));
984
985 if (crypto_drivers == NULL)
986 return NULL;
987
988 return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
989 }
990
991 /*
992 * Use crypto_checkdriver_uninit() instead of crypto_checkdriver() below two
993 * situations
994 * - crypto_drivers[] may not be allocated
995 * - crypto_drivers[hid] may not be initialized
996 */
997 static struct cryptocap *
998 crypto_checkdriver(u_int32_t hid)
999 {
1000
1001 KASSERT(mutex_owned(&crypto_drv_mtx));
1002
1003 if (crypto_drivers == NULL || hid >= crypto_drivers_num)
1004 return NULL;
1005
1006 struct cryptocap *cap = &crypto_drivers[hid];
1007 return crypto_checkdriver_initialized(cap) ? cap : NULL;
1008 }
1009
1010 static inline void
1011 crypto_driver_lock(struct cryptocap *cap)
1012 {
1013
1014 KASSERT(cap != NULL);
1015
1016 mutex_enter(&cap->cc_lock);
1017 }
1018
1019 static inline void
1020 crypto_driver_unlock(struct cryptocap *cap)
1021 {
1022
1023 KASSERT(cap != NULL);
1024
1025 mutex_exit(&cap->cc_lock);
1026 }
1027
1028 static void
1029 crypto_driver_clear(struct cryptocap *cap)
1030 {
1031
1032 if (cap == NULL)
1033 return;
1034
1035 KASSERT(mutex_owned(&cap->cc_lock));
1036
1037 cap->cc_sessions = 0;
1038 memset(&cap->cc_max_op_len, 0, sizeof(cap->cc_max_op_len));
1039 memset(&cap->cc_alg, 0, sizeof(cap->cc_alg));
1040 memset(&cap->cc_kalg, 0, sizeof(cap->cc_kalg));
1041 cap->cc_flags = 0;
1042 cap->cc_qblocked = 0;
1043 cap->cc_kqblocked = 0;
1044
1045 cap->cc_arg = NULL;
1046 cap->cc_newsession = NULL;
1047 cap->cc_process = NULL;
1048 cap->cc_freesession = NULL;
1049 cap->cc_kprocess = NULL;
1050 }
1051
1052 /*
1053 * Register support for a key-related algorithm. This routine
1054 * is called once for each algorithm supported a driver.
1055 */
1056 int
1057 crypto_kregister(u_int32_t driverid, int kalg, u_int32_t flags,
1058 int (*kprocess)(void *, struct cryptkop *, int),
1059 void *karg)
1060 {
1061 struct cryptocap *cap;
1062 int err;
1063
1064 mutex_enter(&crypto_drv_mtx);
1065
1066 cap = crypto_checkdriver_lock(driverid);
1067 if (cap != NULL &&
1068 (CRK_ALGORITHM_MIN <= kalg && kalg <= CRK_ALGORITHM_MAX)) {
1069 /*
1070 * XXX Do some performance testing to determine placing.
1071 * XXX We probably need an auxiliary data structure that
1072 * XXX describes relative performances.
1073 */
1074
1075 cap->cc_kalg[kalg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1076 if (bootverbose) {
1077 printf("crypto: driver %u registers key alg %u "
1078 " flags %u\n",
1079 driverid,
1080 kalg,
1081 flags
1082 );
1083 }
1084
1085 if (cap->cc_kprocess == NULL) {
1086 cap->cc_karg = karg;
1087 cap->cc_kprocess = kprocess;
1088 }
1089 err = 0;
1090 } else
1091 err = EINVAL;
1092
1093 mutex_exit(&crypto_drv_mtx);
1094 return err;
1095 }
1096
1097 /*
1098 * Register support for a non-key-related algorithm. This routine
1099 * is called once for each such algorithm supported by a driver.
1100 */
1101 int
1102 crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
1103 u_int32_t flags,
1104 int (*newses)(void *, u_int32_t*, struct cryptoini*),
1105 void (*freeses)(void *, u_int64_t),
1106 int (*process)(void *, struct cryptop *, int),
1107 void *arg)
1108 {
1109 struct cryptocap *cap;
1110 int err;
1111
1112 cap = crypto_checkdriver_lock(driverid);
1113 if (cap == NULL)
1114 return EINVAL;
1115
1116 /* NB: algorithms are in the range [1..max] */
1117 if (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) {
1118 /*
1119 * XXX Do some performance testing to determine placing.
1120 * XXX We probably need an auxiliary data structure that
1121 * XXX describes relative performances.
1122 */
1123
1124 cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
1125 cap->cc_max_op_len[alg] = maxoplen;
1126 if (bootverbose) {
1127 printf("crypto: driver %u registers alg %u "
1128 "flags %u maxoplen %u\n",
1129 driverid,
1130 alg,
1131 flags,
1132 maxoplen
1133 );
1134 }
1135
1136 if (cap->cc_process == NULL) {
1137 cap->cc_arg = arg;
1138 cap->cc_newsession = newses;
1139 cap->cc_process = process;
1140 cap->cc_freesession = freeses;
1141 cap->cc_sessions = 0; /* Unmark */
1142 }
1143 err = 0;
1144 } else
1145 err = EINVAL;
1146
1147 crypto_driver_unlock(cap);
1148
1149 return err;
1150 }
1151
1152 static int
1153 crypto_unregister_locked(struct cryptocap *cap, int alg, bool all)
1154 {
1155 int i;
1156 u_int32_t ses;
1157 bool lastalg = true;
1158
1159 KASSERT(cap != NULL);
1160 KASSERT(mutex_owned(&cap->cc_lock));
1161
1162 if (alg < CRYPTO_ALGORITHM_MIN || CRYPTO_ALGORITHM_MAX < alg)
1163 return EINVAL;
1164
1165 if (!all && cap->cc_alg[alg] == 0)
1166 return EINVAL;
1167
1168 cap->cc_alg[alg] = 0;
1169 cap->cc_max_op_len[alg] = 0;
1170
1171 if (all) {
1172 if (alg != CRYPTO_ALGORITHM_MAX)
1173 lastalg = false;
1174 } else {
1175 /* Was this the last algorithm ? */
1176 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++)
1177 if (cap->cc_alg[i] != 0) {
1178 lastalg = false;
1179 break;
1180 }
1181 }
1182 if (lastalg) {
1183 ses = cap->cc_sessions;
1184 crypto_driver_clear(cap);
1185 if (ses != 0) {
1186 /*
1187 * If there are pending sessions, just mark as invalid.
1188 */
1189 cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
1190 cap->cc_sessions = ses;
1191 }
1192 }
1193
1194 return 0;
1195 }
1196
1197 /*
1198 * Unregister a crypto driver. If there are pending sessions using it,
1199 * leave enough information around so that subsequent calls using those
1200 * sessions will correctly detect the driver has been unregistered and
1201 * reroute requests.
1202 */
1203 int
1204 crypto_unregister(u_int32_t driverid, int alg)
1205 {
1206 int err;
1207 struct cryptocap *cap;
1208
1209 cap = crypto_checkdriver_lock(driverid);
1210 err = crypto_unregister_locked(cap, alg, false);
1211 crypto_driver_unlock(cap);
1212
1213 return err;
1214 }
1215
1216 /*
1217 * Unregister all algorithms associated with a crypto driver.
1218 * If there are pending sessions using it, leave enough information
1219 * around so that subsequent calls using those sessions will
1220 * correctly detect the driver has been unregistered and reroute
1221 * requests.
1222 */
1223 int
1224 crypto_unregister_all(u_int32_t driverid)
1225 {
1226 int err, i;
1227 struct cryptocap *cap;
1228
1229 cap = crypto_checkdriver_lock(driverid);
1230 for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; i++) {
1231 err = crypto_unregister_locked(cap, i, true);
1232 if (err)
1233 break;
1234 }
1235 crypto_driver_unlock(cap);
1236
1237 return err;
1238 }
1239
1240 /*
1241 * Clear blockage on a driver. The what parameter indicates whether
1242 * the driver is now ready for cryptop's and/or cryptokop's.
1243 */
1244 int
1245 crypto_unblock(u_int32_t driverid, int what)
1246 {
1247 struct cryptocap *cap;
1248 int needwakeup = 0;
1249
1250 cap = crypto_checkdriver_lock(driverid);
1251 if (cap == NULL)
1252 return EINVAL;
1253
1254 if (what & CRYPTO_SYMQ) {
1255 needwakeup |= cap->cc_qblocked;
1256 cap->cc_qblocked = 0;
1257 }
1258 if (what & CRYPTO_ASYMQ) {
1259 needwakeup |= cap->cc_kqblocked;
1260 cap->cc_kqblocked = 0;
1261 }
1262 crypto_driver_unlock(cap);
1263 if (needwakeup) {
1264 kpreempt_disable();
1265 softint_schedule(crypto_q_si);
1266 kpreempt_enable();
1267 }
1268
1269 return 0;
1270 }
1271
1272 /*
1273 * Dispatch a crypto request to a driver or queue
1274 * it, to be processed by the kernel thread.
1275 */
1276 void
1277 crypto_dispatch(struct cryptop *crp)
1278 {
1279 int result, s;
1280 struct cryptocap *cap;
1281 struct crypto_crp_qs *crp_qs;
1282 struct crypto_crp_q *crp_q;
1283
1284 KASSERT(crp != NULL);
1285 KASSERT(crp->crp_callback != NULL);
1286 KASSERT(crp->crp_desc != NULL);
1287 KASSERT(crp->crp_buf != NULL);
1288 KASSERT(!cpu_intr_p());
1289
1290 DPRINTF("crp %p, alg %d\n", crp, crp->crp_desc->crd_alg);
1291
1292 cryptostats.cs_ops++;
1293
1294 #ifdef CRYPTO_TIMING
1295 if (crypto_timing)
1296 nanouptime(&crp->crp_tstamp);
1297 #endif
1298
1299 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
1300 int wasempty;
1301 /*
1302 * Caller marked the request as ``ok to delay'';
1303 * queue it for the swi thread. This is desirable
1304 * when the operation is low priority and/or suitable
1305 * for batching.
1306 *
1307 * don't care list order in batch job.
1308 */
1309 crp_qs = crypto_get_crp_qs(&s);
1310 crp_q = crp_qs->crp_q;
1311 wasempty = TAILQ_EMPTY(crp_q);
1312 TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
1313 crypto_put_crp_qs(&s);
1314 crp_q = NULL;
1315 if (wasempty) {
1316 kpreempt_disable();
1317 softint_schedule(crypto_q_si);
1318 kpreempt_enable();
1319 }
1320 return;
1321 }
1322
1323 crp_qs = crypto_get_crp_qs(&s);
1324 crp_q = crp_qs->crp_q;
1325 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid));
1326 /*
1327 * TODO:
1328 * If we can ensure the driver has been valid until the driver is
1329 * done crypto_unregister(), this migrate operation is not required.
1330 */
1331 if (cap == NULL) {
1332 /*
1333 * The driver must be detached, so this request will migrate
1334 * to other drivers in cryptointr() later.
1335 */
1336 TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
1337 goto out;
1338 }
1339
1340 if (cap->cc_qblocked != 0) {
1341 crypto_driver_unlock(cap);
1342 /*
1343 * The driver is blocked, just queue the op until
1344 * it unblocks and the swi thread gets kicked.
1345 */
1346 TAILQ_INSERT_TAIL(crp_q, crp, crp_next);
1347 goto out;
1348 }
1349
1350 /*
1351 * Caller marked the request to be processed
1352 * immediately; dispatch it directly to the
1353 * driver unless the driver is currently blocked.
1354 */
1355 crypto_driver_unlock(cap);
1356 result = crypto_invoke(crp, 0);
1357 KASSERTMSG(result == 0 || result == ERESTART, "result=%d", result);
1358 if (result == ERESTART) {
1359 /*
1360 * The driver ran out of resources, mark the
1361 * driver ``blocked'' for cryptop's and put
1362 * the op on the queue.
1363 */
1364 crypto_driver_lock(cap);
1365 cap->cc_qblocked = 1;
1366 crypto_driver_unlock(cap);
1367 TAILQ_INSERT_HEAD(crp_q, crp, crp_next);
1368 cryptostats.cs_blocks++;
1369 }
1370
1371 out:
1372 crypto_put_crp_qs(&s);
1373 }
1374
1375 /*
1376 * Add an asymmetric crypto request to a queue,
1377 * to be processed by the kernel thread.
1378 */
1379 void
1380 crypto_kdispatch(struct cryptkop *krp)
1381 {
1382 int result, s;
1383 struct cryptocap *cap;
1384 struct crypto_crp_qs *crp_qs;
1385 struct crypto_crp_kq *crp_kq;
1386
1387 KASSERT(krp != NULL);
1388 KASSERT(krp->krp_callback != NULL);
1389 KASSERT(!cpu_intr_p());
1390
1391 cryptostats.cs_kops++;
1392
1393 crp_qs = crypto_get_crp_qs(&s);
1394 crp_kq = crp_qs->crp_kq;
1395 cap = crypto_checkdriver_lock(krp->krp_hid);
1396 /*
1397 * TODO:
1398 * If we can ensure the driver has been valid until the driver is
1399 * done crypto_unregister(), this migrate operation is not required.
1400 */
1401 if (cap == NULL) {
1402 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
1403 goto out;
1404 }
1405
1406 if (cap->cc_kqblocked != 0) {
1407 crypto_driver_unlock(cap);
1408 /*
1409 * The driver is blocked, just queue the op until
1410 * it unblocks and the swi thread gets kicked.
1411 */
1412 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
1413 goto out;
1414 }
1415
1416 crypto_driver_unlock(cap);
1417 result = crypto_kinvoke(krp, 0);
1418 KASSERTMSG(result == 0 || result == ERESTART, "result=%d", result);
1419 if (result == ERESTART) {
1420 /*
1421 * The driver ran out of resources, mark the
1422 * driver ``blocked'' for cryptop's and put
1423 * the op on the queue.
1424 */
1425 crypto_driver_lock(cap);
1426 cap->cc_kqblocked = 1;
1427 crypto_driver_unlock(cap);
1428 TAILQ_INSERT_HEAD(crp_kq, krp, krp_next);
1429 cryptostats.cs_kblocks++;
1430 }
1431
1432 out:
1433 crypto_put_crp_qs(&s);
1434 }
1435
1436 /*
1437 * Dispatch an asymmetric crypto request to the appropriate crypto devices.
1438 */
1439 static int
1440 crypto_kinvoke(struct cryptkop *krp, int hint)
1441 {
1442 struct cryptocap *cap = NULL;
1443 u_int32_t hid;
1444 int error;
1445
1446 KASSERT(krp != NULL);
1447 KASSERT(krp->krp_callback != NULL);
1448 KASSERT(!cpu_intr_p());
1449
1450 mutex_enter(&crypto_drv_mtx);
1451 for (hid = 0; hid < crypto_drivers_num; hid++) {
1452 cap = crypto_checkdriver(hid);
1453 if (cap == NULL)
1454 continue;
1455 crypto_driver_lock(cap);
1456 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1457 crypto_devallowsoft == 0) {
1458 crypto_driver_unlock(cap);
1459 continue;
1460 }
1461 if (cap->cc_kprocess == NULL) {
1462 crypto_driver_unlock(cap);
1463 continue;
1464 }
1465 if ((cap->cc_kalg[krp->krp_op] &
1466 CRYPTO_ALG_FLAG_SUPPORTED) == 0) {
1467 crypto_driver_unlock(cap);
1468 continue;
1469 }
1470 break;
1471 }
1472 mutex_exit(&crypto_drv_mtx);
1473 if (cap != NULL) {
1474 int (*process)(void *, struct cryptkop *, int);
1475 void *arg;
1476
1477 process = cap->cc_kprocess;
1478 arg = cap->cc_karg;
1479 krp->krp_hid = hid;
1480 krp->reqcpu = curcpu();
1481 crypto_driver_unlock(cap);
1482 error = (*process)(arg, krp, hint);
1483 KASSERTMSG(error == 0 || error == ERESTART, "error=%d",
1484 error);
1485 return error;
1486 } else {
1487 krp->krp_status = ENODEV;
1488 krp->reqcpu = curcpu();
1489 crypto_kdone(krp);
1490 return 0;
1491 }
1492 }
1493
1494 #ifdef CRYPTO_TIMING
1495 static void
1496 crypto_tstat(struct cryptotstat *ts, struct timespec *tv)
1497 {
1498 struct timespec now, t;
1499
1500 nanouptime(&now);
1501 t.tv_sec = now.tv_sec - tv->tv_sec;
1502 t.tv_nsec = now.tv_nsec - tv->tv_nsec;
1503 if (t.tv_nsec < 0) {
1504 t.tv_sec--;
1505 t.tv_nsec += 1000000000;
1506 }
1507 timespecadd(&ts->acc, &t, &t);
1508 if (timespeccmp(&t, &ts->min, <))
1509 ts->min = t;
1510 if (timespeccmp(&t, &ts->max, >))
1511 ts->max = t;
1512 ts->count++;
1513
1514 *tv = now;
1515 }
1516 #endif
1517
1518 /*
1519 * Dispatch a crypto request to the appropriate crypto devices.
1520 */
1521 static int
1522 crypto_invoke(struct cryptop *crp, int hint)
1523 {
1524 struct cryptocap *cap;
1525 int error;
1526
1527 KASSERT(crp != NULL);
1528 KASSERT(crp->crp_callback != NULL);
1529 KASSERT(crp->crp_desc != NULL);
1530 KASSERT(!cpu_intr_p());
1531
1532 #ifdef CRYPTO_TIMING
1533 if (crypto_timing)
1534 crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
1535 #endif
1536
1537 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(crp->crp_sid));
1538 if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
1539 int (*process)(void *, struct cryptop *, int);
1540 void *arg;
1541
1542 process = cap->cc_process;
1543 arg = cap->cc_arg;
1544 crp->reqcpu = curcpu();
1545
1546 /*
1547 * Invoke the driver to process the request.
1548 */
1549 DPRINTF("calling process for %p\n", crp);
1550 crypto_driver_unlock(cap);
1551 error = (*process)(arg, crp, hint);
1552 KASSERTMSG(error == 0 || error == ERESTART, "error=%d",
1553 error);
1554 return error;
1555 } else {
1556 if (cap != NULL) {
1557 crypto_driver_unlock(cap);
1558 crypto_freesession(crp->crp_sid);
1559 }
1560 crp->crp_etype = ENODEV;
1561 crypto_done(crp);
1562 return 0;
1563 }
1564 }
1565
1566 /*
1567 * Release a set of crypto descriptors.
1568 */
1569 void
1570 crypto_freereq(struct cryptop *crp)
1571 {
1572 struct cryptodesc *crd;
1573
1574 if (crp == NULL)
1575 return;
1576 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1577
1578 /* sanity check */
1579 if (crp->crp_flags & CRYPTO_F_ONRETQ) {
1580 panic("crypto_freereq() freeing crp on RETQ\n");
1581 }
1582
1583 while ((crd = crp->crp_desc) != NULL) {
1584 crp->crp_desc = crd->crd_next;
1585 pool_cache_put(cryptodesc_cache, crd);
1586 }
1587 pool_cache_put(cryptop_cache, crp);
1588 }
1589
1590 /*
1591 * Acquire a set of crypto descriptors.
1592 */
1593 struct cryptop *
1594 crypto_getreq(int num)
1595 {
1596 struct cryptodesc *crd;
1597 struct cryptop *crp;
1598 struct crypto_crp_ret_qs *qs;
1599
1600 KASSERT(num > 0);
1601
1602 /*
1603 * When crp_ret_q is full, we restrict here to avoid crp_ret_q overflow
1604 * by error callback.
1605 */
1606 qs = crypto_get_crp_ret_qs(curcpu());
1607 if (qs->crp_ret_q_maxlen > 0
1608 && qs->crp_ret_q_len > qs->crp_ret_q_maxlen) {
1609 qs->crp_ret_q_drops++;
1610 crypto_put_crp_ret_qs(curcpu());
1611 return NULL;
1612 }
1613 crypto_put_crp_ret_qs(curcpu());
1614
1615 crp = pool_cache_get(cryptop_cache, PR_NOWAIT);
1616 if (crp == NULL) {
1617 return NULL;
1618 }
1619 memset(crp, 0, sizeof(struct cryptop));
1620
1621 while (num--) {
1622 crd = pool_cache_get(cryptodesc_cache, PR_NOWAIT);
1623 if (crd == NULL) {
1624 crypto_freereq(crp);
1625 return NULL;
1626 }
1627
1628 memset(crd, 0, sizeof(struct cryptodesc));
1629 crd->crd_next = crp->crp_desc;
1630 crp->crp_desc = crd;
1631 }
1632
1633 return crp;
1634 }
1635
1636 /*
1637 * Release a set of asymmetric crypto descriptors.
1638 * Currently, support one descriptor only.
1639 */
1640 void
1641 crypto_kfreereq(struct cryptkop *krp)
1642 {
1643
1644 if (krp == NULL)
1645 return;
1646
1647 DPRINTF("krp %p\n", krp);
1648
1649 /* sanity check */
1650 if (krp->krp_flags & CRYPTO_F_ONRETQ) {
1651 panic("crypto_kfreereq() freeing krp on RETQ\n");
1652 }
1653
1654 pool_cache_put(cryptkop_cache, krp);
1655 }
1656
1657 /*
1658 * Acquire a set of asymmetric crypto descriptors.
1659 * Currently, support one descriptor only.
1660 */
1661 struct cryptkop *
1662 crypto_kgetreq(int num __diagused, int prflags)
1663 {
1664 struct cryptkop *krp;
1665 struct crypto_crp_ret_qs *qs;
1666
1667 KASSERTMSG(num == 1, "num=%d not supported", num);
1668
1669 /*
1670 * When crp_ret_kq is full, we restrict here to avoid crp_ret_kq
1671 * overflow by error callback.
1672 */
1673 qs = crypto_get_crp_ret_qs(curcpu());
1674 if (qs->crp_ret_kq_maxlen > 0
1675 && qs->crp_ret_kq_len > qs->crp_ret_kq_maxlen) {
1676 qs->crp_ret_kq_drops++;
1677 crypto_put_crp_ret_qs(curcpu());
1678 return NULL;
1679 }
1680 crypto_put_crp_ret_qs(curcpu());
1681
1682 krp = pool_cache_get(cryptkop_cache, prflags);
1683 if (krp == NULL) {
1684 return NULL;
1685 }
1686 memset(krp, 0, sizeof(struct cryptkop));
1687
1688 return krp;
1689 }
1690
1691 /*
1692 * Invoke the callback on behalf of the driver.
1693 */
1694 void
1695 crypto_done(struct cryptop *crp)
1696 {
1697 int wasempty;
1698 struct crypto_crp_ret_qs *qs;
1699 struct crypto_crp_ret_q *crp_ret_q;
1700
1701 KASSERT(crp != NULL);
1702
1703 if (crp->crp_etype != 0)
1704 cryptostats.cs_errs++;
1705 #ifdef CRYPTO_TIMING
1706 if (crypto_timing)
1707 crypto_tstat(&cryptostats.cs_done, &crp->crp_tstamp);
1708 #endif
1709 DPRINTF("lid[%u]: crp %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1710
1711 qs = crypto_get_crp_ret_qs(crp->reqcpu);
1712 crp_ret_q = &qs->crp_ret_q;
1713 wasempty = TAILQ_EMPTY(crp_ret_q);
1714 DPRINTF("lid[%u]: queueing %p\n", CRYPTO_SESID2LID(crp->crp_sid), crp);
1715 crp->crp_flags |= CRYPTO_F_ONRETQ;
1716 TAILQ_INSERT_TAIL(crp_ret_q, crp, crp_next);
1717 qs->crp_ret_q_len++;
1718 if (wasempty && !qs->crp_ret_q_exit_flag) {
1719 DPRINTF("lid[%u]: waking cryptoret, crp %p hit empty queue\n.",
1720 CRYPTO_SESID2LID(crp->crp_sid), crp);
1721 softint_schedule_cpu(crypto_ret_si, crp->reqcpu);
1722 }
1723 crypto_put_crp_ret_qs(crp->reqcpu);
1724 }
1725
1726 /*
1727 * Invoke the callback on behalf of the driver.
1728 */
1729 void
1730 crypto_kdone(struct cryptkop *krp)
1731 {
1732 int wasempty;
1733 struct crypto_crp_ret_qs *qs;
1734 struct crypto_crp_ret_kq *crp_ret_kq;
1735
1736 KASSERT(krp != NULL);
1737
1738 if (krp->krp_status != 0)
1739 cryptostats.cs_kerrs++;
1740
1741 qs = crypto_get_crp_ret_qs(krp->reqcpu);
1742 crp_ret_kq = &qs->crp_ret_kq;
1743
1744 wasempty = TAILQ_EMPTY(crp_ret_kq);
1745 krp->krp_flags |= CRYPTO_F_ONRETQ;
1746 TAILQ_INSERT_TAIL(crp_ret_kq, krp, krp_next);
1747 qs->crp_ret_kq_len++;
1748 if (wasempty && !qs->crp_ret_q_exit_flag)
1749 softint_schedule_cpu(crypto_ret_si, krp->reqcpu);
1750 crypto_put_crp_ret_qs(krp->reqcpu);
1751 }
1752
1753 int
1754 crypto_getfeat(int *featp)
1755 {
1756
1757 if (crypto_userasymcrypto == 0) {
1758 *featp = 0;
1759 return 0;
1760 }
1761
1762 mutex_enter(&crypto_drv_mtx);
1763
1764 int feat = 0;
1765 for (int hid = 0; hid < crypto_drivers_num; hid++) {
1766 struct cryptocap *cap;
1767 cap = crypto_checkdriver(hid);
1768 if (cap == NULL)
1769 continue;
1770
1771 crypto_driver_lock(cap);
1772
1773 if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
1774 crypto_devallowsoft == 0)
1775 goto unlock;
1776
1777 if (cap->cc_kprocess == NULL)
1778 goto unlock;
1779
1780 for (int kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
1781 if ((cap->cc_kalg[kalg] &
1782 CRYPTO_ALG_FLAG_SUPPORTED) != 0)
1783 feat |= 1 << kalg;
1784
1785 unlock: crypto_driver_unlock(cap);
1786 }
1787
1788 mutex_exit(&crypto_drv_mtx);
1789 *featp = feat;
1790 return (0);
1791 }
1792
1793 /*
1794 * Software interrupt thread to dispatch crypto requests.
1795 */
1796 static void
1797 cryptointr(void *arg __unused)
1798 {
1799 struct cryptop *crp, *submit, *cnext;
1800 struct cryptkop *krp, *knext;
1801 struct cryptocap *cap;
1802 struct crypto_crp_qs *crp_qs;
1803 struct crypto_crp_q *crp_q;
1804 struct crypto_crp_kq *crp_kq;
1805 int result, hint, s;
1806
1807 cryptostats.cs_intrs++;
1808 crp_qs = crypto_get_crp_qs(&s);
1809 crp_q = crp_qs->crp_q;
1810 crp_kq = crp_qs->crp_kq;
1811 do {
1812 /*
1813 * Find the first element in the queue that can be
1814 * processed and look-ahead to see if multiple ops
1815 * are ready for the same driver.
1816 */
1817 submit = NULL;
1818 hint = 0;
1819 TAILQ_FOREACH_SAFE(crp, crp_q, crp_next, cnext) {
1820 u_int32_t hid = CRYPTO_SESID2HID(crp->crp_sid);
1821 cap = crypto_checkdriver_lock(hid);
1822 if (cap == NULL || cap->cc_process == NULL) {
1823 if (cap != NULL)
1824 crypto_driver_unlock(cap);
1825 /* Op needs to be migrated, process it. */
1826 submit = crp;
1827 break;
1828 }
1829
1830 /*
1831 * skip blocked crp regardless of CRYPTO_F_BATCH
1832 */
1833 if (cap->cc_qblocked != 0) {
1834 crypto_driver_unlock(cap);
1835 continue;
1836 }
1837 crypto_driver_unlock(cap);
1838
1839 /*
1840 * skip batch crp until the end of crp_q
1841 */
1842 if ((crp->crp_flags & CRYPTO_F_BATCH) != 0) {
1843 if (submit == NULL) {
1844 submit = crp;
1845 } else {
1846 if (CRYPTO_SESID2HID(submit->crp_sid)
1847 == hid)
1848 hint = CRYPTO_HINT_MORE;
1849 }
1850
1851 continue;
1852 }
1853
1854 /*
1855 * found first crp which is neither blocked nor batch.
1856 */
1857 submit = crp;
1858 /*
1859 * batch crp can be processed much later, so clear hint.
1860 */
1861 hint = 0;
1862 break;
1863 }
1864 if (submit != NULL) {
1865 TAILQ_REMOVE(crp_q, submit, crp_next);
1866 result = crypto_invoke(submit, hint);
1867 KASSERTMSG(result == 0 || result == ERESTART,
1868 "result=%d", result);
1869 /* we must take here as the TAILQ op or kinvoke
1870 may need this mutex below. sigh. */
1871 if (result == ERESTART) {
1872 /*
1873 * The driver ran out of resources, mark the
1874 * driver ``blocked'' for cryptop's and put
1875 * the request back in the queue. It would
1876 * best to put the request back where we got
1877 * it but that's hard so for now we put it
1878 * at the front. This should be ok; putting
1879 * it at the end does not work.
1880 */
1881 /* validate sid again */
1882 cap = crypto_checkdriver_lock(CRYPTO_SESID2HID(submit->crp_sid));
1883 if (cap == NULL) {
1884 /* migrate again, sigh... */
1885 TAILQ_INSERT_TAIL(crp_q, submit, crp_next);
1886 } else {
1887 cap->cc_qblocked = 1;
1888 crypto_driver_unlock(cap);
1889 TAILQ_INSERT_HEAD(crp_q, submit, crp_next);
1890 cryptostats.cs_blocks++;
1891 }
1892 }
1893 }
1894
1895 /* As above, but for key ops */
1896 TAILQ_FOREACH_SAFE(krp, crp_kq, krp_next, knext) {
1897 cap = crypto_checkdriver_lock(krp->krp_hid);
1898 if (cap == NULL || cap->cc_kprocess == NULL) {
1899 if (cap != NULL)
1900 crypto_driver_unlock(cap);
1901 /* Op needs to be migrated, process it. */
1902 break;
1903 }
1904 if (!cap->cc_kqblocked) {
1905 crypto_driver_unlock(cap);
1906 break;
1907 }
1908 crypto_driver_unlock(cap);
1909 }
1910 if (krp != NULL) {
1911 TAILQ_REMOVE(crp_kq, krp, krp_next);
1912 result = crypto_kinvoke(krp, 0);
1913 KASSERTMSG(result == 0 || result == ERESTART,
1914 "result=%d", result);
1915 /* the next iteration will want the mutex. :-/ */
1916 if (result == ERESTART) {
1917 /*
1918 * The driver ran out of resources, mark the
1919 * driver ``blocked'' for cryptkop's and put
1920 * the request back in the queue. It would
1921 * best to put the request back where we got
1922 * it but that's hard so for now we put it
1923 * at the front. This should be ok; putting
1924 * it at the end does not work.
1925 */
1926 /* validate sid again */
1927 cap = crypto_checkdriver_lock(krp->krp_hid);
1928 if (cap == NULL) {
1929 /* migrate again, sigh... */
1930 TAILQ_INSERT_TAIL(crp_kq, krp, krp_next);
1931 } else {
1932 cap->cc_kqblocked = 1;
1933 crypto_driver_unlock(cap);
1934 TAILQ_INSERT_HEAD(crp_kq, krp, krp_next);
1935 cryptostats.cs_kblocks++;
1936 }
1937 }
1938 }
1939 } while (submit != NULL || krp != NULL);
1940 crypto_put_crp_qs(&s);
1941 }
1942
1943 /*
1944 * softint handler to do callbacks.
1945 */
1946 static void
1947 cryptoret_softint(void *arg __unused)
1948 {
1949 struct crypto_crp_ret_qs *qs;
1950 struct crypto_crp_ret_q *crp_ret_q;
1951 struct crypto_crp_ret_kq *crp_ret_kq;
1952
1953 qs = crypto_get_crp_ret_qs(curcpu());
1954 crp_ret_q = &qs->crp_ret_q;
1955 crp_ret_kq = &qs->crp_ret_kq;
1956 for (;;) {
1957 struct cryptop *crp;
1958 struct cryptkop *krp;
1959
1960 crp = TAILQ_FIRST(crp_ret_q);
1961 if (crp != NULL) {
1962 TAILQ_REMOVE(crp_ret_q, crp, crp_next);
1963 qs->crp_ret_q_len--;
1964 crp->crp_flags &= ~CRYPTO_F_ONRETQ;
1965 }
1966 krp = TAILQ_FIRST(crp_ret_kq);
1967 if (krp != NULL) {
1968 TAILQ_REMOVE(crp_ret_kq, krp, krp_next);
1969 qs->crp_ret_q_len--;
1970 krp->krp_flags &= ~CRYPTO_F_ONRETQ;
1971 }
1972
1973 /* drop before calling any callbacks. */
1974 if (crp == NULL && krp == NULL)
1975 break;
1976
1977 mutex_spin_exit(&qs->crp_ret_q_mtx);
1978 if (crp != NULL) {
1979 #ifdef CRYPTO_TIMING
1980 if (crypto_timing) {
1981 /*
1982 * NB: We must copy the timestamp before
1983 * doing the callback as the cryptop is
1984 * likely to be reclaimed.
1985 */
1986 struct timespec t = crp->crp_tstamp;
1987 crypto_tstat(&cryptostats.cs_cb, &t);
1988 crp->crp_callback(crp);
1989 crypto_tstat(&cryptostats.cs_finis, &t);
1990 } else
1991 #endif
1992 {
1993 crp->crp_callback(crp);
1994 }
1995 }
1996 if (krp != NULL)
1997 krp->krp_callback(krp);
1998
1999 mutex_spin_enter(&qs->crp_ret_q_mtx);
2000 }
2001 crypto_put_crp_ret_qs(curcpu());
2002 }
2003
2004 /* NetBSD module interface */
2005
2006 MODULE(MODULE_CLASS_MISC, opencrypto, NULL);
2007
2008 static int
2009 opencrypto_modcmd(modcmd_t cmd, void *opaque)
2010 {
2011 int error = 0;
2012
2013 switch (cmd) {
2014 case MODULE_CMD_INIT:
2015 #ifdef _MODULE
2016 error = crypto_init();
2017 #endif
2018 break;
2019 case MODULE_CMD_FINI:
2020 #ifdef _MODULE
2021 error = crypto_destroy(true);
2022 #endif
2023 break;
2024 default:
2025 error = ENOTTY;
2026 }
2027 return error;
2028 }
Cache object: f61ab3ebc3f1d759acf551f8f3030278
|