1 /* $NetBSD: cryptodev.c,v 1.44.8.4 2010/02/14 13:36:57 bouyer Exp $ */
2 /* $FreeBSD: src/sys/opencrypto/cryptodev.c,v 1.4.2.4 2003/06/03 00:09:02 sam Exp $ */
3 /* $OpenBSD: cryptodev.c,v 1.53 2002/07/10 22:21:30 mickey Exp $ */
4
5 /*-
6 * Copyright (c) 2008 The NetBSD Foundation, Inc.
7 * All rights reserved.
8 *
9 * This code is derived from software contributed to The NetBSD Foundation
10 * by Coyote Point Systems, Inc.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions and the following disclaimer.
17 * 2. Redistributions in binary form must reproduce the above copyright
18 * notice, this list of conditions and the following disclaimer in the
19 * documentation and/or other materials provided with the distribution.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
23 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
24 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
25 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 /*
35 * Copyright (c) 2001 Theo de Raadt
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 *
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. The name of the author may not be used to endorse or promote products
47 * derived from this software without specific prior written permission.
48 *
49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 *
60 * Effort sponsored in part by the Defense Advanced Research Projects
61 * Agency (DARPA) and Air Force Research Laboratory, Air Force
62 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
63 *
64 */
65
66 #include <sys/cdefs.h>
67 __KERNEL_RCSID(0, "$NetBSD: cryptodev.c,v 1.44.8.4 2010/02/14 13:36:57 bouyer Exp $");
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/kmem.h>
72 #include <sys/malloc.h>
73 #include <sys/mbuf.h>
74 #include <sys/pool.h>
75 #include <sys/sysctl.h>
76 #include <sys/file.h>
77 #include <sys/filedesc.h>
78 #include <sys/errno.h>
79 #include <sys/md5.h>
80 #include <sys/sha1.h>
81 #include <sys/conf.h>
82 #include <sys/device.h>
83 #include <sys/kauth.h>
84 #include <sys/select.h>
85 #include <sys/poll.h>
86 #include <sys/atomic.h>
87
88 #include "opt_ocf.h"
89 #include <opencrypto/cryptodev.h>
90 #include <opencrypto/ocryptodev.h>
91 #include <opencrypto/xform.h>
92
93 struct csession {
94 TAILQ_ENTRY(csession) next;
95 u_int64_t sid;
96 u_int32_t ses;
97
98 u_int32_t cipher; /* note: shares name space in crd_alg */
99 struct enc_xform *txform;
100 u_int32_t mac; /* note: shares name space in crd_alg */
101 struct auth_hash *thash;
102 u_int32_t comp_alg; /* note: shares name space in crd_alg */
103 struct comp_algo *tcomp;
104
105 void * key;
106 int keylen;
107 u_char tmp_iv[EALG_MAX_BLOCK_LEN];
108
109 void * mackey;
110 int mackeylen;
111 u_char tmp_mac[CRYPTO_MAX_MAC_LEN];
112
113 struct iovec iovec[1]; /* user requests never have more */
114 struct uio uio;
115 int error;
116 };
117
118 struct fcrypt {
119 TAILQ_HEAD(csessionlist, csession) csessions;
120 TAILQ_HEAD(crprethead, cryptop) crp_ret_mq;
121 TAILQ_HEAD(krprethead, cryptkop) crp_ret_mkq;
122 int sesn;
123 struct selinfo sinfo;
124 u_int32_t requestid;
125 };
126
127 /* For our fixed-size allocations */
128 static struct pool fcrpl;
129 static struct pool csepl;
130
131 /* Declaration of master device (fd-cloning/ctxt-allocating) entrypoints */
132 static int cryptoopen(dev_t dev, int flag, int mode, struct lwp *l);
133 static int cryptoread(dev_t dev, struct uio *uio, int ioflag);
134 static int cryptowrite(dev_t dev, struct uio *uio, int ioflag);
135 static int cryptoselect(dev_t dev, int rw, struct lwp *l);
136
137 /* Declaration of cloned-device (per-ctxt) entrypoints */
138 static int cryptof_read(struct file *, off_t *, struct uio *,
139 kauth_cred_t, int);
140 static int cryptof_write(struct file *, off_t *, struct uio *,
141 kauth_cred_t, int);
142 static int cryptof_ioctl(struct file *, u_long, void *);
143 static int cryptof_close(struct file *);
144 static int cryptof_poll(struct file *, int);
145
146 static const struct fileops cryptofops = {
147 .fo_read = cryptof_read,
148 .fo_write = cryptof_write,
149 .fo_ioctl = cryptof_ioctl,
150 .fo_fcntl = fnullop_fcntl,
151 .fo_poll = cryptof_poll,
152 .fo_stat = fbadop_stat,
153 .fo_close = cryptof_close,
154 .fo_kqfilter = fnullop_kqfilter,
155 .fo_drain = fnullop_drain,
156 };
157
158 struct csession *cryptodev_csefind(struct fcrypt *, u_int);
159 static struct csession *csefind(struct fcrypt *, u_int);
160 static int csedelete(struct fcrypt *, struct csession *);
161 static struct csession *cseadd(struct fcrypt *, struct csession *);
162 static struct csession *csecreate(struct fcrypt *, u_int64_t, void *,
163 u_int64_t, void *, u_int64_t, u_int32_t, u_int32_t, u_int32_t,
164 struct enc_xform *, struct auth_hash *, struct comp_algo *);
165 static int csefree(struct csession *);
166
167 static int cryptodev_key(struct crypt_kop *);
168 static int cryptodev_mkey(struct fcrypt *, struct crypt_n_kop *, int);
169 static int cryptodev_msessionfin(struct fcrypt *, int, u_int32_t *);
170
171 static int cryptodev_cb(void *);
172 static int cryptodevkey_cb(void *);
173
174 static int cryptodev_mcb(void *);
175 static int cryptodevkey_mcb(void *);
176
177 static int cryptodev_getmstatus(struct fcrypt *, struct crypt_result *,
178 int);
179 static int cryptodev_getstatus(struct fcrypt *, struct crypt_result *);
180
181 extern int ocryptof_ioctl(struct file *, u_long, void *);
182
183 /*
184 * sysctl-able control variables for /dev/crypto now defined in crypto.c:
185 * crypto_usercrypto, crypto_userasmcrypto, crypto_devallowsoft.
186 */
187
188 /* ARGSUSED */
189 int
190 cryptof_read(file_t *fp, off_t *poff,
191 struct uio *uio, kauth_cred_t cred, int flags)
192 {
193 return EIO;
194 }
195
196 /* ARGSUSED */
197 int
198 cryptof_write(file_t *fp, off_t *poff,
199 struct uio *uio, kauth_cred_t cred, int flags)
200 {
201 return EIO;
202 }
203
204 /* ARGSUSED */
205 int
206 cryptof_ioctl(struct file *fp, u_long cmd, void *data)
207 {
208 struct fcrypt *fcr = fp->f_data;
209 struct csession *cse;
210 struct session_op *sop;
211 struct session_n_op *snop;
212 struct crypt_op *cop;
213 struct crypt_mop *mop;
214 struct crypt_mkop *mkop;
215 struct crypt_n_op *cnop;
216 struct crypt_n_kop *knop;
217 struct crypt_sgop *sgop;
218 struct crypt_sfop *sfop;
219 struct cryptret *crypt_ret;
220 struct crypt_result *crypt_res;
221 u_int32_t ses;
222 u_int32_t *sesid;
223 int error = 0;
224 size_t count;
225
226 /* backwards compatibility */
227 file_t *criofp;
228 struct fcrypt *criofcr;
229 int criofd;
230
231 switch (cmd) {
232 case CRIOGET: /* XXX deprecated, remove after 5.0 */
233 if ((error = fd_allocfile(&criofp, &criofd)) != 0)
234 return error;
235 criofcr = pool_get(&fcrpl, PR_WAITOK);
236 mutex_spin_enter(&crypto_mtx);
237 TAILQ_INIT(&criofcr->csessions);
238 TAILQ_INIT(&criofcr->crp_ret_mq);
239 TAILQ_INIT(&criofcr->crp_ret_mkq);
240 selinit(&criofcr->sinfo);
241
242 /*
243 * Don't ever return session 0, to allow detection of
244 * failed creation attempts with multi-create ioctl.
245 */
246 criofcr->sesn = 1;
247 criofcr->requestid = 1;
248 mutex_spin_exit(&crypto_mtx);
249 (void)fd_clone(criofp, criofd, (FREAD|FWRITE),
250 &cryptofops, criofcr);
251 *(u_int32_t *)data = criofd;
252 return error;
253 break;
254 case CIOCGSESSION:
255 sop = (struct session_op *)data;
256 error = cryptodev_session(fcr, sop);
257 break;
258 case CIOCNGSESSION:
259 sgop = (struct crypt_sgop *)data;
260 snop = kmem_alloc((sgop->count *
261 sizeof(struct session_n_op)), KM_SLEEP);
262 error = copyin(sgop->sessions, snop, sgop->count *
263 sizeof(struct session_n_op));
264 if (error) {
265 goto mbail;
266 }
267
268 error = cryptodev_msession(fcr, snop, sgop->count);
269 if (error) {
270 goto mbail;
271 }
272
273 error = copyout(snop, sgop->sessions, sgop->count *
274 sizeof(struct session_n_op));
275 mbail:
276 kmem_free(snop, sgop->count * sizeof(struct session_n_op));
277 break;
278 case CIOCFSESSION:
279 mutex_spin_enter(&crypto_mtx);
280 ses = *(u_int32_t *)data;
281 cse = csefind(fcr, ses);
282 if (cse == NULL)
283 return EINVAL;
284 csedelete(fcr, cse);
285 error = csefree(cse);
286 mutex_spin_exit(&crypto_mtx);
287 break;
288 case CIOCNFSESSION:
289 sfop = (struct crypt_sfop *)data;
290 sesid = kmem_alloc((sfop->count * sizeof(u_int32_t)),
291 KM_SLEEP);
292 error = copyin(sfop->sesid, sesid,
293 (sfop->count * sizeof(u_int32_t)));
294 if (!error) {
295 error = cryptodev_msessionfin(fcr, sfop->count, sesid);
296 }
297 kmem_free(sesid, (sfop->count * sizeof(u_int32_t)));
298 break;
299 case CIOCCRYPT:
300 mutex_spin_enter(&crypto_mtx);
301 cop = (struct crypt_op *)data;
302 cse = csefind(fcr, cop->ses);
303 mutex_spin_exit(&crypto_mtx);
304 if (cse == NULL) {
305 DPRINTF(("csefind failed\n"));
306 return EINVAL;
307 }
308 error = cryptodev_op(cse, cop, curlwp);
309 DPRINTF(("cryptodev_op error = %d\n", error));
310 break;
311 case CIOCNCRYPTM:
312 mop = (struct crypt_mop *)data;
313 cnop = kmem_alloc((mop->count * sizeof(struct crypt_n_op)),
314 KM_SLEEP);
315 error = copyin(mop->reqs, cnop,
316 (mop->count * sizeof(struct crypt_n_op)));
317 if(!error) {
318 error = cryptodev_mop(fcr, cnop, mop->count, curlwp);
319 if (!error) {
320 error = copyout(cnop, mop->reqs,
321 (mop->count * sizeof(struct crypt_n_op)));
322 }
323 }
324 kmem_free(cnop, (mop->count * sizeof(struct crypt_n_op)));
325 break;
326 case CIOCKEY:
327 error = cryptodev_key((struct crypt_kop *)data);
328 DPRINTF(("cryptodev_key error = %d\n", error));
329 break;
330 case CIOCNFKEYM:
331 mkop = (struct crypt_mkop *)data;
332 knop = kmem_alloc((mkop->count * sizeof(struct crypt_n_kop)),
333 KM_SLEEP);
334 error = copyin(mkop->reqs, knop,
335 (mkop->count * sizeof(struct crypt_n_kop)));
336 if (!error) {
337 error = cryptodev_mkey(fcr, knop, mkop->count);
338 if (!error)
339 error = copyout(knop, mkop->reqs,
340 (mkop->count * sizeof(struct crypt_n_kop)));
341 }
342 kmem_free(knop, (mkop->count * sizeof(struct crypt_n_kop)));
343 break;
344 case CIOCASYMFEAT:
345 error = crypto_getfeat((int *)data);
346 break;
347 case CIOCNCRYPTRETM:
348 crypt_ret = (struct cryptret *)data;
349 count = crypt_ret->count;
350 crypt_res = kmem_alloc((count * sizeof(struct crypt_result)),
351 KM_SLEEP);
352 error = copyin(crypt_ret->results, crypt_res,
353 (count * sizeof(struct crypt_result)));
354 if (error)
355 goto reterr;
356 crypt_ret->count = cryptodev_getmstatus(fcr, crypt_res,
357 crypt_ret->count);
358 /* sanity check count */
359 if (crypt_ret->count > count) {
360 printf("%s.%d: error returned count %zd > original "
361 " count %zd\n",
362 __FILE__, __LINE__, crypt_ret->count, count);
363 crypt_ret->count = count;
364
365 }
366 error = copyout(crypt_res, crypt_ret->results,
367 (crypt_ret->count * sizeof(struct crypt_result)));
368 reterr:
369 kmem_free(crypt_res, (count * sizeof(struct crypt_result)));
370 break;
371 case CIOCNCRYPTRET:
372 error = cryptodev_getstatus(fcr, (struct crypt_result *)data);
373 break;
374 default:
375 /* Check for backward compatible commands */
376 error = ocryptof_ioctl(fp, cmd, data);
377 }
378 return error;
379 }
380
381 int
382 cryptodev_op(struct csession *cse, struct crypt_op *cop, struct lwp *l)
383 {
384 struct cryptop *crp = NULL;
385 struct cryptodesc *crde = NULL, *crda = NULL, *crdc = NULL;
386 int error;
387 int iov_len = cop->len;
388 int flags=0;
389 int dst_len; /* copyout size */
390
391 if (cop->len > 256*1024-4)
392 return E2BIG;
393
394 if (cse->txform) {
395 if (cop->len == 0 || (cop->len % cse->txform->blocksize) != 0)
396 return EINVAL;
397 }
398
399 DPRINTF(("cryptodev_op[%d]: iov_len %d\n", (uint32_t)cse->sid, iov_len));
400 if ((cse->tcomp) && cop->dst_len) {
401 if (iov_len < cop->dst_len) {
402 /* Need larger iov to deal with decompress */
403 iov_len = cop->dst_len;
404 }
405 DPRINTF(("cryptodev_op: iov_len -> %d for decompress\n", iov_len));
406 }
407
408 (void)memset(&cse->uio, 0, sizeof(cse->uio));
409 cse->uio.uio_iovcnt = 1;
410 cse->uio.uio_resid = 0;
411 cse->uio.uio_rw = UIO_WRITE;
412 cse->uio.uio_iov = cse->iovec;
413 UIO_SETUP_SYSSPACE(&cse->uio);
414 memset(&cse->iovec, 0, sizeof(cse->iovec));
415
416 /* the iov needs to be big enough to handle the uncompressed
417 * data.... */
418 cse->uio.uio_iov[0].iov_len = iov_len;
419 cse->uio.uio_iov[0].iov_base = kmem_alloc(iov_len, KM_SLEEP);
420 cse->uio.uio_resid = cse->uio.uio_iov[0].iov_len;
421 DPRINTF(("cryptodev_op[%d]: uio.iov_base %p malloced %d bytes\n",
422 (uint32_t)cse->sid, cse->uio.uio_iov[0].iov_base, iov_len));
423
424 crp = crypto_getreq((cse->tcomp != NULL) + (cse->txform != NULL) + (cse->thash != NULL));
425 if (crp == NULL) {
426 error = ENOMEM;
427 goto bail;
428 }
429 DPRINTF(("cryptodev_op[%d]: crp %p\n", (uint32_t)cse->sid, crp));
430
431 /* crds are always ordered tcomp, thash, then txform */
432 /* with optional missing links */
433
434 /* XXX: If we're going to compress then hash or encrypt, we need
435 * to be able to pass on the new size of the data.
436 */
437
438 if (cse->tcomp) {
439 crdc = crp->crp_desc;
440 }
441
442 if (cse->thash) {
443 crda = crdc ? crdc->crd_next : crp->crp_desc;
444 if (cse->txform && crda)
445 crde = crda->crd_next;
446 } else {
447 if (cse->txform) {
448 crde = crdc ? crdc->crd_next : crp->crp_desc;
449 } else if (!cse->tcomp) {
450 error = EINVAL;
451 goto bail;
452 }
453 }
454
455 DPRINTF(("ocf[%d]: iov_len %d, cop->len %d\n",
456 (uint32_t)cse->sid,
457 cse->uio.uio_iov[0].iov_len,
458 cop->len));
459
460 if ((error = copyin(cop->src, cse->uio.uio_iov[0].iov_base, cop->len)))
461 {
462 printf("copyin failed %s %d \n", (char *)cop->src, error);
463 goto bail;
464 }
465
466 if (crdc) {
467 switch (cop->op) {
468 case COP_COMP:
469 crdc->crd_flags |= CRD_F_COMP;
470 break;
471 case COP_DECOMP:
472 crdc->crd_flags &= ~CRD_F_COMP;
473 break;
474 default:
475 break;
476 }
477 /* more data to follow? */
478 if (cop->flags & COP_F_MORE) {
479 flags |= CRYPTO_F_MORE;
480 }
481 crdc->crd_len = cop->len;
482 crdc->crd_inject = 0;
483
484 crdc->crd_alg = cse->comp_alg;
485 crdc->crd_key = NULL;
486 crdc->crd_klen = 0;
487 DPRINTF(("cryptodev_op[%d]: crdc setup for comp_alg %d.\n",
488 (uint32_t)cse->sid, crdc->crd_alg));
489 }
490
491 if (crda) {
492 crda->crd_skip = 0;
493 crda->crd_len = cop->len;
494 crda->crd_inject = 0; /* ??? */
495
496 crda->crd_alg = cse->mac;
497 crda->crd_key = cse->mackey;
498 crda->crd_klen = cse->mackeylen * 8;
499 DPRINTF(("cryptodev_op: crda setup for mac %d.\n", crda->crd_alg));
500 }
501
502 if (crde) {
503 switch (cop->op) {
504 case COP_ENCRYPT:
505 crde->crd_flags |= CRD_F_ENCRYPT;
506 break;
507 case COP_DECRYPT:
508 crde->crd_flags &= ~CRD_F_ENCRYPT;
509 break;
510 default:
511 break;
512 }
513 crde->crd_len = cop->len;
514 crde->crd_inject = 0;
515
516 crde->crd_alg = cse->cipher;
517 crde->crd_key = cse->key;
518 crde->crd_klen = cse->keylen * 8;
519 DPRINTF(("cryptodev_op: crde setup for cipher %d.\n", crde->crd_alg));
520 }
521
522
523 crp->crp_ilen = cop->len;
524 /* The reqest is flagged as CRYPTO_F_USER as long as it is running
525 * in the user IOCTL thread. This flag lets us skip using the retq for
526 * the request if it completes immediately. If the request ends up being
527 * delayed or is not completed immediately the flag is removed.
528 */
529 crp->crp_flags = CRYPTO_F_IOV | (cop->flags & COP_F_BATCH) | CRYPTO_F_USER |
530 flags;
531 crp->crp_buf = (void *)&cse->uio;
532 crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_cb;
533 crp->crp_sid = cse->sid;
534 crp->crp_opaque = (void *)cse;
535
536 if (cop->iv) {
537 if (crde == NULL) {
538 error = EINVAL;
539 goto bail;
540 }
541 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
542 error = EINVAL;
543 goto bail;
544 }
545 if ((error = copyin(cop->iv, cse->tmp_iv,
546 cse->txform->blocksize)))
547 goto bail;
548 (void)memcpy(crde->crd_iv, cse->tmp_iv, cse->txform->blocksize);
549 crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
550 crde->crd_skip = 0;
551 } else if (crde) {
552 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
553 crde->crd_skip = 0;
554 } else {
555 crde->crd_flags |= CRD_F_IV_PRESENT;
556 crde->crd_skip = cse->txform->blocksize;
557 crde->crd_len -= cse->txform->blocksize;
558 }
559 }
560
561 if (cop->mac) {
562 if (crda == NULL) {
563 error = EINVAL;
564 goto bail;
565 }
566 crp->crp_mac=cse->tmp_mac;
567 }
568
569 /*
570 * XXX there was a comment here which said that we went to
571 * XXX splcrypto() but needed to only if CRYPTO_F_CBIMM,
572 * XXX disabled on NetBSD since 1.6O due to a race condition.
573 * XXX But crypto_dispatch went to splcrypto() itself! (And
574 * XXX now takes the crypto_mtx mutex itself). We do, however,
575 * XXX need to hold the mutex across the call to cv_wait().
576 * XXX (should we arrange for crypto_dispatch to return to
577 * XXX us with it held? it seems quite ugly to do so.)
578 */
579 #ifdef notyet
580 eagain:
581 #endif
582 error = crypto_dispatch(crp);
583 mutex_spin_enter(&crypto_mtx);
584
585 /*
586 * If the request was going to be completed by the
587 * ioctl thread then it would have been done by now.
588 * Remove the F_USER flag so crypto_done() is not confused
589 * if the crypto device calls it after this point.
590 */
591 crp->crp_flags &= ~(CRYPTO_F_USER);
592
593 switch (error) {
594 #ifdef notyet /* don't loop forever -- but EAGAIN not possible here yet */
595 case EAGAIN:
596 mutex_spin_exit(&crypto_mtx);
597 goto eagain;
598 break;
599 #endif
600 case 0:
601 break;
602 default:
603 DPRINTF(("cryptodev_op: not waiting, error.\n"));
604 mutex_spin_exit(&crypto_mtx);
605 goto bail;
606 }
607
608 while (!(crp->crp_flags & CRYPTO_F_DONE)) {
609 DPRINTF(("cryptodev_op[%d]: sleeping on cv %08x for crp %08x\n",
610 (uint32_t)cse->sid, (uint32_t)&crp->crp_cv,
611 (uint32_t)crp));
612 cv_wait(&crp->crp_cv, &crypto_mtx); /* XXX cv_wait_sig? */
613 }
614 if (crp->crp_flags & CRYPTO_F_ONRETQ) {
615 /* XXX this should never happen now with the CRYPTO_F_USER flag
616 * changes.
617 */
618 DPRINTF(("cryptodev_op: DONE, not woken by cryptoret.\n"));
619 (void)crypto_ret_q_remove(crp);
620 }
621 mutex_spin_exit(&crypto_mtx);
622
623 if (crp->crp_etype != 0) {
624 DPRINTF(("cryptodev_op: crp_etype %d\n", crp->crp_etype));
625 error = crp->crp_etype;
626 goto bail;
627 }
628
629 if (cse->error) {
630 DPRINTF(("cryptodev_op: cse->error %d\n", cse->error));
631 error = cse->error;
632 goto bail;
633 }
634
635 dst_len = crp->crp_ilen;
636 /* let the user know how much data was returned */
637 if (crp->crp_olen) {
638 dst_len = cop->dst_len = crp->crp_olen;
639 }
640 crp->len = dst_len;
641
642 if (cop->dst) {
643 DPRINTF(("cryptodev_op: copyout %d bytes to %p\n", dst_len, cop->dst));
644 }
645 if (cop->dst &&
646 (error = copyout(cse->uio.uio_iov[0].iov_base, cop->dst, dst_len)))
647 {
648 DPRINTF(("cryptodev_op: copyout error %d\n", error));
649 goto bail;
650 }
651
652 if (cop->mac &&
653 (error = copyout(crp->crp_mac, cop->mac, cse->thash->authsize))) {
654 DPRINTF(("cryptodev_op: mac copyout error %d\n", error));
655 goto bail;
656 }
657
658
659 bail:
660 if (crp) {
661 crypto_freereq(crp);
662 }
663 if (cse->uio.uio_iov[0].iov_base) {
664 kmem_free(cse->uio.uio_iov[0].iov_base,iov_len);
665 }
666
667 return error;
668 }
669
670 static int
671 cryptodev_cb(void *op)
672 {
673 struct cryptop *crp = (struct cryptop *) op;
674 struct csession *cse = (struct csession *)crp->crp_opaque;
675 int error = 0;
676
677 mutex_spin_enter(&crypto_mtx);
678 cse->error = crp->crp_etype;
679 if (crp->crp_etype == EAGAIN) {
680 /* always drop mutex to call dispatch routine */
681 mutex_spin_exit(&crypto_mtx);
682 error = crypto_dispatch(crp);
683 mutex_spin_enter(&crypto_mtx);
684 }
685 if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
686 cv_signal(&crp->crp_cv);
687 }
688 mutex_spin_exit(&crypto_mtx);
689 return 0;
690 }
691
692 static int
693 cryptodev_mcb(void *op)
694 {
695 struct cryptop *crp = (struct cryptop *) op;
696 struct csession *cse = (struct csession *)crp->crp_opaque;
697 int error=0;
698
699 mutex_spin_enter(&crypto_mtx);
700 cse->error = crp->crp_etype;
701 if (crp->crp_etype == EAGAIN) {
702 mutex_spin_exit(&crypto_mtx);
703 error = crypto_dispatch(crp);
704 mutex_spin_enter(&crypto_mtx);
705 }
706 if (error != 0 || (crp->crp_flags & CRYPTO_F_DONE)) {
707 cv_signal(&crp->crp_cv);
708 }
709
710 TAILQ_INSERT_TAIL(&crp->fcrp->crp_ret_mq, crp, crp_next);
711 selnotify(&crp->fcrp->sinfo, 0, 0);
712 mutex_spin_exit(&crypto_mtx);
713 return 0;
714 }
715
716 static int
717 cryptodevkey_cb(void *op)
718 {
719 struct cryptkop *krp = op;
720
721 mutex_spin_enter(&crypto_mtx);
722 cv_signal(&krp->krp_cv);
723 mutex_spin_exit(&crypto_mtx);
724 return 0;
725 }
726
727 static int
728 cryptodevkey_mcb(void *op)
729 {
730 struct cryptkop *krp = op;
731
732 mutex_spin_enter(&crypto_mtx);
733 cv_signal(&krp->krp_cv);
734 TAILQ_INSERT_TAIL(&krp->fcrp->crp_ret_mkq, krp, krp_next);
735 selnotify(&krp->fcrp->sinfo, 0, 0);
736 mutex_spin_exit(&crypto_mtx);
737 return 0;
738 }
739
740 static int
741 cryptodev_key(struct crypt_kop *kop)
742 {
743 struct cryptkop *krp = NULL;
744 int error = EINVAL;
745 int in, out, size, i;
746
747 if (kop->crk_iparams + kop->crk_oparams > CRK_MAXPARAM)
748 return EFBIG;
749
750 in = kop->crk_iparams;
751 out = kop->crk_oparams;
752 switch (kop->crk_op) {
753 case CRK_MOD_EXP:
754 if (in == 3 && out == 1)
755 break;
756 return EINVAL;
757 case CRK_MOD_EXP_CRT:
758 if (in == 6 && out == 1)
759 break;
760 return EINVAL;
761 case CRK_DSA_SIGN:
762 if (in == 5 && out == 2)
763 break;
764 return EINVAL;
765 case CRK_DSA_VERIFY:
766 if (in == 7 && out == 0)
767 break;
768 return EINVAL;
769 case CRK_DH_COMPUTE_KEY:
770 if (in == 3 && out == 1)
771 break;
772 return EINVAL;
773 case CRK_MOD_ADD:
774 if (in == 3 && out == 1)
775 break;
776 return EINVAL;
777 case CRK_MOD_ADDINV:
778 if (in == 2 && out == 1)
779 break;
780 return EINVAL;
781 case CRK_MOD_SUB:
782 if (in == 3 && out == 1)
783 break;
784 return EINVAL;
785 case CRK_MOD_MULT:
786 if (in == 3 && out == 1)
787 break;
788 return EINVAL;
789 case CRK_MOD_MULTINV:
790 if (in == 2 && out == 1)
791 break;
792 return EINVAL;
793 case CRK_MOD:
794 if (in == 2 && out == 1)
795 break;
796 return EINVAL;
797 default:
798 return EINVAL;
799 }
800
801 krp = pool_get(&cryptkop_pool, PR_WAITOK);
802 (void)memset(krp, 0, sizeof *krp);
803 cv_init(&krp->krp_cv, "crykdev");
804 krp->krp_op = kop->crk_op;
805 krp->krp_status = kop->crk_status;
806 krp->krp_iparams = kop->crk_iparams;
807 krp->krp_oparams = kop->crk_oparams;
808 krp->krp_status = 0;
809 krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
810
811 for (i = 0; i < CRK_MAXPARAM; i++)
812 krp->krp_param[i].crp_nbits = kop->crk_param[i].crp_nbits;
813 for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
814 size = (krp->krp_param[i].crp_nbits + 7) / 8;
815 if (size == 0)
816 continue;
817 krp->krp_param[i].crp_p = kmem_alloc(size, KM_SLEEP);
818 if (i >= krp->krp_iparams)
819 continue;
820 error = copyin(kop->crk_param[i].crp_p,
821 krp->krp_param[i].crp_p, size);
822 if (error)
823 goto fail;
824 }
825
826 error = crypto_kdispatch(krp);
827 if (error != 0) {
828 goto fail;
829 }
830
831 mutex_spin_enter(&crypto_mtx);
832 while (!(krp->krp_flags & CRYPTO_F_DONE)) {
833 cv_wait(&krp->krp_cv, &crypto_mtx); /* XXX cv_wait_sig? */
834 }
835 if (krp->krp_flags & CRYPTO_F_ONRETQ) {
836 DPRINTF(("cryptodev_key: DONE early, not via cryptoret.\n"));
837 (void)crypto_ret_kq_remove(krp);
838 }
839 mutex_spin_exit(&crypto_mtx);
840
841 if (krp->krp_status != 0) {
842 DPRINTF(("cryptodev_key: krp->krp_status 0x%08x\n",
843 krp->krp_status));
844 error = krp->krp_status;
845 goto fail;
846 }
847
848 for (i = krp->krp_iparams; i < krp->krp_iparams + krp->krp_oparams;
849 i++) {
850 size = (krp->krp_param[i].crp_nbits + 7) / 8;
851 if (size == 0)
852 continue;
853 error = copyout(krp->krp_param[i].crp_p,
854 kop->crk_param[i].crp_p, size);
855 if (error) {
856 DPRINTF(("cryptodev_key: copyout oparam %d failed, "
857 "error=%d\n", i-krp->krp_iparams, error));
858 goto fail;
859 }
860 }
861
862 fail:
863 kop->crk_status = krp->krp_status;
864 for (i = 0; i < CRK_MAXPARAM; i++) {
865 struct crparam *kp = &(krp->krp_param[i]);
866 if (krp->krp_param[i].crp_p) {
867 size = (kp->crp_nbits + 7) / 8;
868 KASSERT(size > 0);
869 (void)memset(kp->crp_p, 0, size);
870 kmem_free(kp->crp_p, size);
871 }
872 }
873 cv_destroy(&krp->krp_cv);
874 pool_put(&cryptkop_pool, krp);
875 DPRINTF(("cryptodev_key: error=0x%08x\n", error));
876 return error;
877 }
878
879 /* ARGSUSED */
880 static int
881 cryptof_close(struct file *fp)
882 {
883 struct fcrypt *fcr = fp->f_data;
884 struct csession *cse;
885
886 mutex_spin_enter(&crypto_mtx);
887 while ((cse = TAILQ_FIRST(&fcr->csessions))) {
888 TAILQ_REMOVE(&fcr->csessions, cse, next);
889 (void)csefree(cse);
890 }
891 seldestroy(&fcr->sinfo);
892 fp->f_data = NULL;
893 mutex_spin_exit(&crypto_mtx);
894
895 pool_put(&fcrpl, fcr);
896 return 0;
897 }
898
899 /* needed for compatibility module */
900 struct csession *cryptodev_csefind(struct fcrypt *fcr, u_int ses)
901 {
902 return csefind(fcr, ses);
903 }
904
905 /* csefind: call with crypto_mtx held. */
906 static struct csession *
907 csefind(struct fcrypt *fcr, u_int ses)
908 {
909 struct csession *cse, *cnext, *ret = NULL;
910
911 KASSERT(mutex_owned(&crypto_mtx));
912 TAILQ_FOREACH_SAFE(cse, &fcr->csessions, next, cnext)
913 if (cse->ses == ses)
914 ret = cse;
915
916 return ret;
917 }
918
919 /* csedelete: call with crypto_mtx held. */
920 static int
921 csedelete(struct fcrypt *fcr, struct csession *cse_del)
922 {
923 struct csession *cse, *cnext;
924 int ret = 0;
925
926 KASSERT(mutex_owned(&crypto_mtx));
927 TAILQ_FOREACH_SAFE(cse, &fcr->csessions, next, cnext) {
928 if (cse == cse_del) {
929 TAILQ_REMOVE(&fcr->csessions, cse, next);
930 ret = 1;
931 }
932 }
933 return ret;
934 }
935
936 /* cseadd: call with crypto_mtx held. */
937 static struct csession *
938 cseadd(struct fcrypt *fcr, struct csession *cse)
939 {
940 KASSERT(mutex_owned(&crypto_mtx));
941 /* don't let session ID wrap! */
942 if (fcr->sesn + 1 == 0) return NULL;
943 TAILQ_INSERT_TAIL(&fcr->csessions, cse, next);
944 cse->ses = fcr->sesn++;
945 return cse;
946 }
947
948 /* csecreate: call with crypto_mtx held. */
949 static struct csession *
950 csecreate(struct fcrypt *fcr, u_int64_t sid, void *key, u_int64_t keylen,
951 void *mackey, u_int64_t mackeylen, u_int32_t cipher, u_int32_t mac,
952 u_int32_t comp_alg, struct enc_xform *txform, struct auth_hash *thash,
953 struct comp_algo *tcomp)
954 {
955 struct csession *cse;
956
957 KASSERT(mutex_owned(&crypto_mtx));
958 cse = pool_get(&csepl, PR_NOWAIT);
959 if (cse == NULL)
960 return NULL;
961 cse->key = key;
962 cse->keylen = keylen/8;
963 cse->mackey = mackey;
964 cse->mackeylen = mackeylen/8;
965 cse->sid = sid;
966 cse->cipher = cipher;
967 cse->mac = mac;
968 cse->comp_alg = comp_alg;
969 cse->txform = txform;
970 cse->thash = thash;
971 cse->tcomp = tcomp;
972 cse->error = 0;
973 if (cseadd(fcr, cse))
974 return cse;
975 else {
976 pool_put(&csepl, cse);
977 return NULL;
978 }
979 }
980
981 /* csefree: call with crypto_mtx held. */
982 static int
983 csefree(struct csession *cse)
984 {
985 int error;
986
987 KASSERT(mutex_owned(&crypto_mtx));
988 error = crypto_freesession(cse->sid);
989 if (cse->key)
990 free(cse->key, M_XDATA);
991 if (cse->mackey)
992 free(cse->mackey, M_XDATA);
993 pool_put(&csepl, cse);
994 return error;
995 }
996
997 static int
998 cryptoopen(dev_t dev, int flag, int mode,
999 struct lwp *l)
1000 {
1001 file_t *fp;
1002 struct fcrypt *fcr;
1003 int fd, error;
1004
1005 if (crypto_usercrypto == 0)
1006 return ENXIO;
1007
1008 if ((error = fd_allocfile(&fp, &fd)) != 0)
1009 return error;
1010
1011 fcr = pool_get(&fcrpl, PR_WAITOK);
1012 mutex_spin_enter(&crypto_mtx);
1013 TAILQ_INIT(&fcr->csessions);
1014 TAILQ_INIT(&fcr->crp_ret_mq);
1015 TAILQ_INIT(&fcr->crp_ret_mkq);
1016 selinit(&fcr->sinfo);
1017 /*
1018 * Don't ever return session 0, to allow detection of
1019 * failed creation attempts with multi-create ioctl.
1020 */
1021 fcr->sesn = 1;
1022 fcr->requestid = 1;
1023 mutex_spin_exit(&crypto_mtx);
1024 return fd_clone(fp, fd, flag, &cryptofops, fcr);
1025 }
1026
1027 static int
1028 cryptoread(dev_t dev, struct uio *uio, int ioflag)
1029 {
1030 return EIO;
1031 }
1032
1033 static int
1034 cryptowrite(dev_t dev, struct uio *uio, int ioflag)
1035 {
1036 return EIO;
1037 }
1038
1039 int
1040 cryptoselect(dev_t dev, int rw, struct lwp *l)
1041 {
1042 return 0;
1043 }
1044
1045 /*static*/
1046 struct cdevsw crypto_cdevsw = {
1047 /* open */ cryptoopen,
1048 /* close */ noclose,
1049 /* read */ cryptoread,
1050 /* write */ cryptowrite,
1051 /* ioctl */ noioctl,
1052 /* ttstop?*/ nostop,
1053 /* ??*/ notty,
1054 /* poll */ cryptoselect /*nopoll*/,
1055 /* mmap */ nommap,
1056 /* kqfilter */ nokqfilter,
1057 /* type */ D_OTHER,
1058 };
1059
1060 int
1061 cryptodev_mop(struct fcrypt *fcr,
1062 struct crypt_n_op * cnop,
1063 int count, struct lwp *l)
1064 {
1065 struct cryptop *crp = NULL;
1066 struct cryptodesc *crde = NULL, *crda = NULL, *crdc = NULL;
1067 int req, error=0;
1068 struct csession *cse;
1069 int flags=0;
1070 int iov_len;
1071
1072 for (req = 0; req < count; req++) {
1073 mutex_spin_enter(&crypto_mtx);
1074 cse = csefind(fcr, cnop[req].ses);
1075 if (cse == NULL) {
1076 DPRINTF(("csefind failed\n"));
1077 cnop[req].status = EINVAL;
1078 mutex_spin_exit(&crypto_mtx);
1079 continue;
1080 }
1081 mutex_spin_exit(&crypto_mtx);
1082
1083 if (cnop[req].len > 256*1024-4) {
1084 DPRINTF(("length failed\n"));
1085 cnop[req].status = EINVAL;
1086 continue;
1087 }
1088 if (cse->txform) {
1089 if (cnop[req].len == 0 ||
1090 (cnop[req].len % cse->txform->blocksize) != 0) {
1091 cnop[req].status = EINVAL;
1092 continue;
1093 }
1094 }
1095
1096 crp = crypto_getreq((cse->txform != NULL) +
1097 (cse->thash != NULL) +
1098 (cse->tcomp != NULL));
1099 if (crp == NULL) {
1100 cnop[req].status = ENOMEM;
1101 goto bail;
1102 }
1103
1104 iov_len = cnop[req].len;
1105 /* got a compression/decompression max size? */
1106 if ((cse->tcomp) && cnop[req].dst_len) {
1107 if (iov_len < cnop[req].dst_len) {
1108 /* Need larger iov to deal with decompress */
1109 iov_len = cnop[req].dst_len;
1110 }
1111 DPRINTF(("cryptodev_mop: iov_len -> %d for decompress\n", iov_len));
1112 }
1113
1114 (void)memset(&crp->uio, 0, sizeof(crp->uio));
1115 crp->uio.uio_iovcnt = 1;
1116 crp->uio.uio_resid = 0;
1117 crp->uio.uio_rw = UIO_WRITE;
1118 crp->uio.uio_iov = crp->iovec;
1119 UIO_SETUP_SYSSPACE(&crp->uio);
1120 memset(&crp->iovec, 0, sizeof(crp->iovec));
1121 crp->uio.uio_iov[0].iov_len = iov_len;
1122 DPRINTF(("cryptodev_mop: kmem_alloc(%d) for iov \n", iov_len));
1123 crp->uio.uio_iov[0].iov_base = kmem_alloc(iov_len, KM_SLEEP);
1124 crp->uio.uio_resid = crp->uio.uio_iov[0].iov_len;
1125
1126 if (cse->tcomp) {
1127 crdc = crp->crp_desc;
1128 }
1129
1130 if (cse->thash) {
1131 crda = crdc ? crdc->crd_next : crp->crp_desc;
1132 if (cse->txform && crda)
1133 crde = crda->crd_next;
1134 } else {
1135 if (cse->txform) {
1136 crde = crdc ? crdc->crd_next : crp->crp_desc;
1137 } else if (!cse->tcomp) {
1138 error = EINVAL;
1139 goto bail;
1140 }
1141 }
1142
1143 if ((copyin(cnop[req].src,
1144 crp->uio.uio_iov[0].iov_base, cnop[req].len))) {
1145 cnop[req].status = EINVAL;
1146 goto bail;
1147 }
1148
1149 if (crdc) {
1150 switch (cnop[req].op) {
1151 case COP_COMP:
1152 crdc->crd_flags |= CRD_F_COMP;
1153 break;
1154 case COP_DECOMP:
1155 crdc->crd_flags &= ~CRD_F_COMP;
1156 break;
1157 default:
1158 break;
1159 }
1160 /* more data to follow? */
1161 if (cnop[req].flags & COP_F_MORE) {
1162 flags |= CRYPTO_F_MORE;
1163 }
1164 crdc->crd_len = cnop[req].len;
1165 crdc->crd_inject = 0;
1166
1167 crdc->crd_alg = cse->comp_alg;
1168 crdc->crd_key = NULL;
1169 crdc->crd_klen = 0;
1170 DPRINTF(("cryptodev_mop[%d]: crdc setup for comp_alg %d"
1171 " len %d.\n",
1172 (uint32_t)cse->sid, crdc->crd_alg,
1173 crdc->crd_len));
1174 }
1175
1176 if (crda) {
1177 crda->crd_skip = 0;
1178 crda->crd_len = cnop[req].len;
1179 crda->crd_inject = 0; /* ??? */
1180
1181 crda->crd_alg = cse->mac;
1182 crda->crd_key = cse->mackey;
1183 crda->crd_klen = cse->mackeylen * 8;
1184 }
1185
1186 if (crde) {
1187 if (cnop[req].op == COP_ENCRYPT)
1188 crde->crd_flags |= CRD_F_ENCRYPT;
1189 else
1190 crde->crd_flags &= ~CRD_F_ENCRYPT;
1191 crde->crd_len = cnop[req].len;
1192 crde->crd_inject = 0;
1193
1194 crde->crd_alg = cse->cipher;
1195 #ifdef notyet /* XXX must notify h/w driver new key, drain */
1196 if(cnop[req].key && cnop[req].keylen) {
1197 crde->crd_key = malloc(cnop[req].keylen,
1198 M_XDATA, M_WAITOK);
1199 if((error = copyin(cnop[req].key,
1200 crde->crd_key, cnop[req].keylen))) {
1201 cnop[req].status = EINVAL;
1202 goto bail;
1203 }
1204 crde->crd_klen = cnop[req].keylen * 8;
1205 } else { ... }
1206 #endif
1207 crde->crd_key = cse->key;
1208 crde->crd_klen = cse->keylen * 8;
1209 }
1210
1211 crp->crp_ilen = cnop[req].len;
1212 crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM |
1213 (cnop[req].flags & COP_F_BATCH) | flags;
1214 crp->crp_buf = (void *)&crp->uio;
1215 crp->crp_callback = (int (*) (struct cryptop *)) cryptodev_mcb;
1216 crp->crp_sid = cse->sid;
1217 crp->crp_opaque = (void *)cse;
1218 crp->fcrp = fcr;
1219 crp->dst = cnop[req].dst;
1220 crp->len = cnop[req].len; /* input len, iov may be larger */
1221 crp->mac = cnop[req].mac;
1222 DPRINTF(("cryptodev_mop: iov_base %p dst %p len %d mac %p\n",
1223 crp->uio.uio_iov[0].iov_base, crp->dst, crp->len,
1224 crp->mac));
1225
1226 if (cnop[req].iv) {
1227 if (crde == NULL) {
1228 cnop[req].status = EINVAL;
1229 goto bail;
1230 }
1231 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
1232 cnop[req].status = EINVAL;
1233 goto bail;
1234 }
1235 if ((error = copyin(cnop[req].iv, crp->tmp_iv,
1236 cse->txform->blocksize))) {
1237 cnop[req].status = EINVAL;
1238 goto bail;
1239 }
1240 (void)memcpy(crde->crd_iv, crp->tmp_iv,
1241 cse->txform->blocksize);
1242 crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1243 crde->crd_skip = 0;
1244 } else if (crde) {
1245 if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
1246 crde->crd_skip = 0;
1247 } else {
1248 crde->crd_flags |= CRD_F_IV_PRESENT;
1249 crde->crd_skip = cse->txform->blocksize;
1250 crde->crd_len -= cse->txform->blocksize;
1251 }
1252 }
1253
1254 if (cnop[req].mac) {
1255 if (crda == NULL) {
1256 cnop[req].status = EINVAL;
1257 goto bail;
1258 }
1259 crp->crp_mac=cse->tmp_mac;
1260 }
1261 cnop[req].reqid = atomic_inc_32_nv(&(fcr->requestid));
1262 crp->crp_reqid = cnop[req].reqid;
1263 crp->crp_usropaque = cnop[req].opaque;
1264 #ifdef notyet
1265 eagain:
1266 #endif
1267 cnop[req].status = crypto_dispatch(crp);
1268 mutex_spin_enter(&crypto_mtx); /* XXX why mutex? */
1269
1270 switch (cnop[req].status) {
1271 #ifdef notyet /* don't loop forever -- but EAGAIN not possible here yet */
1272 case EAGAIN:
1273 mutex_spin_exit(&crypto_mtx);
1274 goto eagain;
1275 break;
1276 #endif
1277 case 0:
1278 break;
1279 default:
1280 DPRINTF(("cryptodev_op: not waiting, error.\n"));
1281 mutex_spin_exit(&crypto_mtx);
1282 goto bail;
1283 }
1284
1285 mutex_spin_exit(&crypto_mtx);
1286 bail:
1287 if (cnop[req].status) {
1288 if (crp) {
1289 if (crp->uio.uio_iov[0].iov_base) {
1290 kmem_free(crp->uio.uio_iov[0].iov_base,
1291 crp->uio.uio_iov[0].iov_len);
1292 }
1293 crypto_freereq(crp);
1294 }
1295 error = 0;
1296 }
1297 }
1298 return error;
1299 }
1300
1301 static int
1302 cryptodev_mkey(struct fcrypt *fcr, struct crypt_n_kop *kop, int count)
1303 {
1304 struct cryptkop *krp = NULL;
1305 int error = EINVAL;
1306 int in, out, size, i, req;
1307
1308 for (req = 0; req < count; req++) {
1309 if (kop[req].crk_iparams + kop[req].crk_oparams > CRK_MAXPARAM)
1310 return EFBIG;
1311
1312 in = kop[req].crk_iparams;
1313 out = kop[req].crk_oparams;
1314 switch (kop[req].crk_op) {
1315 case CRK_MOD_EXP:
1316 if (in == 3 && out == 1)
1317 break;
1318 kop[req].crk_status = EINVAL;
1319 continue;
1320 case CRK_MOD_EXP_CRT:
1321 if (in == 6 && out == 1)
1322 break;
1323 kop[req].crk_status = EINVAL;
1324 continue;
1325 case CRK_DSA_SIGN:
1326 if (in == 5 && out == 2)
1327 break;
1328 kop[req].crk_status = EINVAL;
1329 continue;
1330 case CRK_DSA_VERIFY:
1331 if (in == 7 && out == 0)
1332 break;
1333 kop[req].crk_status = EINVAL;
1334 continue;
1335 case CRK_DH_COMPUTE_KEY:
1336 if (in == 3 && out == 1)
1337 break;
1338 kop[req].crk_status = EINVAL;
1339 continue;
1340 case CRK_MOD_ADD:
1341 if (in == 3 && out == 1)
1342 break;
1343 kop[req].crk_status = EINVAL;
1344 continue;
1345 case CRK_MOD_ADDINV:
1346 if (in == 2 && out == 1)
1347 break;
1348 kop[req].crk_status = EINVAL;
1349 continue;
1350 case CRK_MOD_SUB:
1351 if (in == 3 && out == 1)
1352 break;
1353 kop[req].crk_status = EINVAL;
1354 continue;
1355 case CRK_MOD_MULT:
1356 if (in == 3 && out == 1)
1357 break;
1358 kop[req].crk_status = EINVAL;
1359 continue;
1360 case CRK_MOD_MULTINV:
1361 if (in == 2 && out == 1)
1362 break;
1363 kop[req].crk_status = EINVAL;
1364 continue;
1365 case CRK_MOD:
1366 if (in == 2 && out == 1)
1367 break;
1368 kop[req].crk_status = EINVAL;
1369 continue;
1370 default:
1371 kop[req].crk_status = EINVAL;
1372 continue;
1373 }
1374
1375 krp = pool_get(&cryptkop_pool, PR_WAITOK);
1376 (void)memset(krp, 0, sizeof *krp);
1377 cv_init(&krp->krp_cv, "crykdev");
1378 krp->krp_op = kop[req].crk_op;
1379 krp->krp_status = kop[req].crk_status;
1380 krp->krp_iparams = kop[req].crk_iparams;
1381 krp->krp_oparams = kop[req].crk_oparams;
1382 krp->krp_status = 0;
1383 krp->krp_callback =
1384 (int (*) (struct cryptkop *)) cryptodevkey_mcb;
1385 (void)memcpy(krp->crk_param, kop[req].crk_param,
1386 sizeof(kop[req].crk_param));
1387
1388 krp->krp_flags = CRYPTO_F_CBIMM;
1389
1390 for (i = 0; i < CRK_MAXPARAM; i++)
1391 krp->krp_param[i].crp_nbits =
1392 kop[req].crk_param[i].crp_nbits;
1393 for (i = 0; i < krp->krp_iparams + krp->krp_oparams; i++) {
1394 size = (krp->krp_param[i].crp_nbits + 7) / 8;
1395 if (size == 0)
1396 continue;
1397 krp->krp_param[i].crp_p =
1398 kmem_alloc(size, KM_SLEEP);
1399 if (i >= krp->krp_iparams)
1400 continue;
1401 kop[req].crk_status =
1402 copyin(kop[req].crk_param[i].crp_p,
1403 krp->krp_param[i].crp_p, size);
1404 if (kop[req].crk_status)
1405 goto fail;
1406 }
1407 krp->fcrp = fcr;
1408
1409 kop[req].crk_reqid = atomic_inc_32_nv(&(fcr->requestid));
1410 krp->krp_reqid = kop[req].crk_reqid;
1411 krp->krp_usropaque = kop[req].crk_opaque;
1412
1413 kop[req].crk_status = crypto_kdispatch(krp);
1414 if (kop[req].crk_status != 0) {
1415 goto fail;
1416 }
1417
1418 fail:
1419 if(kop[req].crk_status) {
1420 if (krp) {
1421 kop[req].crk_status = krp->krp_status;
1422 for (i = 0; i < CRK_MAXPARAM; i++) {
1423 struct crparam *kp =
1424 &(krp->krp_param[i]);
1425 if (kp->crp_p) {
1426 size = (kp->crp_nbits + 7) / 8;
1427 KASSERT(size > 0);
1428 memset(kp->crp_p, 0, size);
1429 kmem_free(kp->crp_p, size);
1430 }
1431 }
1432 cv_destroy(&krp->krp_cv);
1433 pool_put(&cryptkop_pool, krp);
1434 }
1435 }
1436 error = 0;
1437 }
1438 DPRINTF(("cryptodev_key: error=0x%08x\n", error));
1439 return error;
1440 }
1441
1442 int
1443 cryptodev_session(struct fcrypt *fcr, struct session_op *sop)
1444 {
1445 struct cryptoini cria, crie;
1446 struct cryptoini cric; /* compressor */
1447 struct cryptoini *crihead = NULL;
1448 struct enc_xform *txform = NULL;
1449 struct auth_hash *thash = NULL;
1450 struct comp_algo *tcomp = NULL;
1451 struct csession *cse;
1452 u_int64_t sid;
1453 int error = 0;
1454
1455 DPRINTF(("cryptodev_session() cipher=%d, mac=%d\n", sop->cipher, sop->mac));
1456
1457 /* XXX there must be a way to not embed the list of xforms here */
1458 switch (sop->cipher) {
1459 case 0:
1460 break;
1461 case CRYPTO_DES_CBC:
1462 txform = &enc_xform_des;
1463 break;
1464 case CRYPTO_3DES_CBC:
1465 txform = &enc_xform_3des;
1466 break;
1467 case CRYPTO_BLF_CBC:
1468 txform = &enc_xform_blf;
1469 break;
1470 case CRYPTO_CAST_CBC:
1471 txform = &enc_xform_cast5;
1472 break;
1473 case CRYPTO_SKIPJACK_CBC:
1474 txform = &enc_xform_skipjack;
1475 break;
1476 case CRYPTO_AES_CBC:
1477 txform = &enc_xform_rijndael128;
1478 break;
1479 case CRYPTO_NULL_CBC:
1480 txform = &enc_xform_null;
1481 break;
1482 case CRYPTO_ARC4:
1483 txform = &enc_xform_arc4;
1484 break;
1485 default:
1486 DPRINTF(("Invalid cipher %d\n", sop->cipher));
1487 return EINVAL;
1488 }
1489
1490 switch (sop->comp_alg) {
1491 case 0:
1492 break;
1493 case CRYPTO_DEFLATE_COMP:
1494 tcomp = &comp_algo_deflate;
1495 break;
1496 case CRYPTO_GZIP_COMP:
1497 tcomp = &comp_algo_gzip;
1498 DPRINTF(("cryptodev_session() tcomp for GZIP\n"));
1499 break;
1500 default:
1501 DPRINTF(("Invalid compression alg %d\n", sop->comp_alg));
1502 return EINVAL;
1503 }
1504
1505 switch (sop->mac) {
1506 case 0:
1507 break;
1508 case CRYPTO_MD5_HMAC:
1509 thash = &auth_hash_hmac_md5;
1510 break;
1511 case CRYPTO_SHA1_HMAC:
1512 thash = &auth_hash_hmac_sha1;
1513 break;
1514 case CRYPTO_MD5_HMAC_96:
1515 thash = &auth_hash_hmac_md5_96;
1516 break;
1517 case CRYPTO_SHA1_HMAC_96:
1518 thash = &auth_hash_hmac_sha1_96;
1519 break;
1520 case CRYPTO_SHA2_HMAC:
1521 /* XXX switching on key length seems questionable */
1522 if (sop->mackeylen == auth_hash_hmac_sha2_256.keysize) {
1523 thash = &auth_hash_hmac_sha2_256;
1524 } else if (sop->mackeylen == auth_hash_hmac_sha2_384.keysize) {
1525 thash = &auth_hash_hmac_sha2_384;
1526 } else if (sop->mackeylen == auth_hash_hmac_sha2_512.keysize) {
1527 thash = &auth_hash_hmac_sha2_512;
1528 } else {
1529 DPRINTF(("Invalid mackeylen %d\n", sop->mackeylen));
1530 return EINVAL;
1531 }
1532 break;
1533 case CRYPTO_RIPEMD160_HMAC:
1534 thash = &auth_hash_hmac_ripemd_160;
1535 break;
1536 case CRYPTO_RIPEMD160_HMAC_96:
1537 thash = &auth_hash_hmac_ripemd_160_96;
1538 break;
1539 case CRYPTO_MD5:
1540 thash = &auth_hash_md5;
1541 break;
1542 case CRYPTO_SHA1:
1543 thash = &auth_hash_sha1;
1544 break;
1545 case CRYPTO_NULL_HMAC:
1546 thash = &auth_hash_null;
1547 break;
1548 default:
1549 DPRINTF(("Invalid mac %d\n", sop->mac));
1550 return EINVAL;
1551 }
1552
1553 memset(&crie, 0, sizeof(crie));
1554 memset(&cria, 0, sizeof(cria));
1555 memset(&cric, 0, sizeof(cric));
1556
1557 if (tcomp) {
1558 cric.cri_alg = tcomp->type;
1559 cric.cri_klen = 0;
1560 DPRINTF(("tcomp->type = %d\n", tcomp->type));
1561
1562 crihead = &cric;
1563 if (thash) {
1564 cric.cri_next = &cria;
1565 } else if (txform) {
1566 cric.cri_next = &crie;
1567 }
1568 }
1569
1570 if (txform) {
1571 crie.cri_alg = txform->type;
1572 crie.cri_klen = sop->keylen * 8;
1573 if (sop->keylen > txform->maxkey ||
1574 sop->keylen < txform->minkey) {
1575 DPRINTF(("keylen %d not in [%d,%d]\n",
1576 sop->keylen, txform->minkey, txform->maxkey));
1577 error = EINVAL;
1578 goto bail;
1579 }
1580
1581 crie.cri_key = malloc(crie.cri_klen / 8, M_XDATA, M_WAITOK);
1582 if ((error = copyin(sop->key, crie.cri_key, crie.cri_klen / 8)))
1583 goto bail;
1584 if (!crihead) {
1585 crihead = &crie;
1586 }
1587 }
1588
1589 if (thash) {
1590 cria.cri_alg = thash->type;
1591 cria.cri_klen = sop->mackeylen * 8;
1592 if (sop->mackeylen != thash->keysize) {
1593 DPRINTF(("mackeylen %d != keysize %d\n",
1594 sop->mackeylen, thash->keysize));
1595 error = EINVAL;
1596 goto bail;
1597 }
1598 if (cria.cri_klen) {
1599 cria.cri_key = malloc(cria.cri_klen / 8, M_XDATA,
1600 M_WAITOK);
1601 if ((error = copyin(sop->mackey, cria.cri_key,
1602 cria.cri_klen / 8))) {
1603 goto bail;
1604 }
1605 }
1606 if (txform)
1607 cria.cri_next = &crie; /* XXX forces enc then hash? */
1608 if (!crihead) {
1609 crihead = &cria;
1610 }
1611 }
1612
1613 /* crypto_newsession requires that we hold the mutex. */
1614 mutex_spin_enter(&crypto_mtx);
1615 error = crypto_newsession(&sid, crihead, crypto_devallowsoft);
1616 if (!error) {
1617 DPRINTF(("cyrptodev_session: got session %d\n", (uint32_t)sid));
1618 cse = csecreate(fcr, sid, crie.cri_key, crie.cri_klen,
1619 cria.cri_key, cria.cri_klen, (txform ? sop->cipher : 0), sop->mac,
1620 (tcomp ? sop->comp_alg : 0), txform, thash, tcomp);
1621 if (cse != NULL) {
1622 sop->ses = cse->ses;
1623 } else {
1624 DPRINTF(("csecreate failed\n"));
1625 crypto_freesession(sid);
1626 error = EINVAL;
1627 }
1628 } else {
1629 DPRINTF(("SIOCSESSION violates kernel parameters %d\n",
1630 error));
1631 }
1632 mutex_spin_exit(&crypto_mtx);
1633 bail:
1634 if (error) {
1635 if (crie.cri_key) {
1636 memset(crie.cri_key, 0, crie.cri_klen / 8);
1637 free(crie.cri_key, M_XDATA);
1638 }
1639 if (cria.cri_key) {
1640 memset(cria.cri_key, 0, cria.cri_klen / 8);
1641 free(cria.cri_key, M_XDATA);
1642 }
1643 }
1644 return error;
1645 }
1646
1647 int
1648 cryptodev_msession(struct fcrypt *fcr, struct session_n_op *sn_ops,
1649 int count)
1650 {
1651 int i;
1652
1653 for (i = 0; i < count; i++, sn_ops++) {
1654 struct session_op s_op;
1655 s_op.cipher = sn_ops->cipher;
1656 s_op.mac = sn_ops->mac;
1657 s_op.keylen = sn_ops->keylen;
1658 s_op.key = sn_ops->key;
1659 s_op.mackeylen = sn_ops->mackeylen;
1660 s_op.mackey = sn_ops->mackey;
1661
1662 sn_ops->status = cryptodev_session(fcr, &s_op);
1663 sn_ops->ses = s_op.ses;
1664 }
1665
1666 return 0;
1667 }
1668
1669 static int
1670 cryptodev_msessionfin(struct fcrypt *fcr, int count, u_int32_t *sesid)
1671 {
1672 struct csession *cse;
1673 int req, error = 0;
1674
1675 mutex_spin_enter(&crypto_mtx);
1676 for(req = 0; req < count; req++) {
1677 cse = csefind(fcr, sesid[req]);
1678 if (cse == NULL)
1679 continue;
1680 csedelete(fcr, cse);
1681 error = csefree(cse);
1682 }
1683 mutex_spin_exit(&crypto_mtx);
1684 return 0;
1685 }
1686
1687 /*
1688 * collect as many completed requests as are availble, or count completed
1689 * requests whichever is less.
1690 * return the number of requests.
1691 */
1692 static int
1693 cryptodev_getmstatus(struct fcrypt *fcr, struct crypt_result *crypt_res,
1694 int count)
1695 {
1696 struct cryptop *crp = NULL;
1697 struct cryptkop *krp = NULL;
1698 struct csession *cse;
1699 int i, size, req = 0;
1700 int completed=0;
1701
1702 /* On queue so nobody else can grab them
1703 * and copyout can be delayed-- no locking */
1704 TAILQ_HEAD(, cryptop) crp_delfree_q =
1705 TAILQ_HEAD_INITIALIZER(crp_delfree_q);
1706 TAILQ_HEAD(, cryptkop) krp_delfree_q =
1707 TAILQ_HEAD_INITIALIZER(krp_delfree_q);
1708
1709 /* at this point we do not know which response user is requesting for
1710 * (symmetric or asymmetric) so we copyout one from each i.e if the
1711 * count is 2 then 1 from symmetric and 1 from asymmetric queue and
1712 * if 3 then 2 symmetric and 1 asymmetric and so on */
1713
1714 /* pull off a list of requests while protected from changes */
1715 mutex_spin_enter(&crypto_mtx);
1716 while (req < count) {
1717 crp = TAILQ_FIRST(&fcr->crp_ret_mq);
1718 if (crp) {
1719 TAILQ_REMOVE(&fcr->crp_ret_mq, crp, crp_next);
1720 TAILQ_INSERT_TAIL(&crp_delfree_q, crp, crp_next);
1721 cse = (struct csession *)crp->crp_opaque;
1722
1723 /* see if the session is still valid */
1724 cse = csefind(fcr, cse->ses);
1725 if (cse != NULL) {
1726 crypt_res[req].status = 0;
1727 } else {
1728 DPRINTF(("csefind failed\n"));
1729 crypt_res[req].status = EINVAL;
1730 }
1731 req++;
1732 }
1733 if(req < count) {
1734 crypt_res[req].status = 0;
1735 krp = TAILQ_FIRST(&fcr->crp_ret_mkq);
1736 if (krp) {
1737 TAILQ_REMOVE(&fcr->crp_ret_mkq, krp, krp_next);
1738 TAILQ_INSERT_TAIL(&krp_delfree_q, krp, krp_next);
1739 req++;
1740 }
1741 }
1742 }
1743 mutex_spin_exit(&crypto_mtx);
1744
1745 /* now do all the work outside the mutex */
1746 for(req=0; req < count ;) {
1747 crp = TAILQ_FIRST(&crp_delfree_q);
1748 if (crp) {
1749 if (crypt_res[req].status != 0) {
1750 /* csefind failed during collection */
1751 goto bail;
1752 }
1753 cse = (struct csession *)crp->crp_opaque;
1754 crypt_res[req].reqid = crp->crp_reqid;
1755 crypt_res[req].opaque = crp->crp_usropaque;
1756 completed++;
1757
1758 if (crp->crp_etype != 0) {
1759 crypt_res[req].status = crp->crp_etype;
1760 goto bail;
1761 }
1762
1763 if (cse->error) {
1764 crypt_res[req].status = cse->error;
1765 goto bail;
1766 }
1767
1768 if (crp->dst && (crypt_res[req].status =
1769 copyout(crp->uio.uio_iov[0].iov_base, crp->dst,
1770 crp->len)))
1771 goto bail;
1772
1773 if (crp->mac && (crypt_res[req].status =
1774 copyout(crp->crp_mac, crp->mac,
1775 cse->thash->authsize)))
1776 goto bail;
1777
1778 bail:
1779 TAILQ_REMOVE(&crp_delfree_q, crp, crp_next);
1780 kmem_free(crp->uio.uio_iov[0].iov_base,
1781 crp->uio.uio_iov[0].iov_len);
1782 crypto_freereq(crp);
1783 req++;
1784 }
1785
1786 if (req < count) {
1787 krp = TAILQ_FIRST(&krp_delfree_q);
1788 if (krp) {
1789 crypt_res[req].reqid = krp->krp_reqid;
1790 crypt_res[req].opaque = krp->krp_usropaque;
1791 completed++;
1792 if (krp->krp_status != 0) {
1793 DPRINTF(("cryptodev_key: "
1794 "krp->krp_status 0x%08x\n",
1795 krp->krp_status));
1796 crypt_res[req].status = krp->krp_status;
1797 goto fail;
1798 }
1799
1800 for (i = krp->krp_iparams; i < krp->krp_iparams
1801 + krp->krp_oparams; i++) {
1802 size = (krp->krp_param[i].crp_nbits
1803 + 7) / 8;
1804 if (size == 0)
1805 continue;
1806 crypt_res[req].status = copyout
1807 (krp->krp_param[i].crp_p,
1808 krp->crk_param[i].crp_p, size);
1809 if (crypt_res[req].status) {
1810 DPRINTF(("cryptodev_key: "
1811 "copyout oparam %d failed, "
1812 "error=%d\n",
1813 i - krp->krp_iparams,
1814 crypt_res[req].status));
1815 goto fail;
1816 }
1817 }
1818 fail:
1819 TAILQ_REMOVE(&krp_delfree_q, krp, krp_next);
1820 /* not sure what to do for this */
1821 /* kop[req].crk_status = krp->krp_status; */
1822 for (i = 0; i < CRK_MAXPARAM; i++) {
1823 struct crparam *kp = &(krp->krp_param[i]);
1824 if (kp->crp_p) {
1825 size = (kp->crp_nbits + 7) / 8;
1826 KASSERT(size > 0);
1827 (void)memset(kp->crp_p, 0, size);
1828 kmem_free(kp->crp_p, size);
1829 }
1830 }
1831 cv_destroy(&krp->krp_cv);
1832 pool_put(&cryptkop_pool, krp);
1833 req++;
1834 }
1835 }
1836 }
1837
1838 return completed;
1839 }
1840
1841 static int
1842 cryptodev_getstatus (struct fcrypt *fcr, struct crypt_result *crypt_res)
1843 {
1844 struct cryptop *crp = NULL, *cnext;
1845 struct cryptkop *krp = NULL, *knext;
1846 struct csession *cse;
1847 int i, size, req = 0;
1848
1849 mutex_spin_enter(&crypto_mtx);
1850 /* Here we dont know for which request the user is requesting the
1851 * response so checking in both the queues */
1852 TAILQ_FOREACH_SAFE(crp, &fcr->crp_ret_mq, crp_next, cnext) {
1853 if(crp && (crp->crp_reqid == crypt_res->reqid)) {
1854 cse = (struct csession *)crp->crp_opaque;
1855 crypt_res->opaque = crp->crp_usropaque;
1856 cse = csefind(fcr, cse->ses);
1857 if (cse == NULL) {
1858 DPRINTF(("csefind failed\n"));
1859 crypt_res->status = EINVAL;
1860 goto bail;
1861 }
1862
1863 if (crp->crp_etype != 0) {
1864 crypt_res->status = crp->crp_etype;
1865 goto bail;
1866 }
1867
1868 if (cse->error) {
1869 crypt_res->status = cse->error;
1870 goto bail;
1871 }
1872
1873 if (crp->dst && (crypt_res->status =
1874 copyout(crp->uio.uio_iov[0].iov_base,
1875 crp->dst, crp->len)))
1876 goto bail;
1877
1878 if (crp->mac && (crypt_res->status =
1879 copyout(crp->crp_mac, crp->mac,
1880 cse->thash->authsize)))
1881 goto bail;
1882 bail:
1883 TAILQ_REMOVE(&fcr->crp_ret_mq, crp, crp_next);
1884
1885 mutex_spin_exit(&crypto_mtx);
1886 crypto_freereq(crp);
1887 return 0;
1888 }
1889 }
1890
1891 TAILQ_FOREACH_SAFE(krp, &fcr->crp_ret_mkq, krp_next, knext) {
1892 if(krp && (krp->krp_reqid == crypt_res->reqid)) {
1893 crypt_res[req].opaque = krp->krp_usropaque;
1894 if (krp->krp_status != 0) {
1895 DPRINTF(("cryptodev_key: "
1896 "krp->krp_status 0x%08x\n",
1897 krp->krp_status));
1898 crypt_res[req].status = krp->krp_status;
1899 goto fail;
1900 }
1901
1902 for (i = krp->krp_iparams; i < krp->krp_iparams +
1903 krp->krp_oparams; i++) {
1904 size = (krp->krp_param[i].crp_nbits + 7) / 8;
1905 if (size == 0)
1906 continue;
1907 crypt_res[req].status = copyout(
1908 krp->krp_param[i].crp_p,
1909 krp->crk_param[i].crp_p, size);
1910 if (crypt_res[req].status) {
1911 DPRINTF(("cryptodev_key: copyout oparam"
1912 "%d failed, error=%d\n",
1913 i - krp->krp_iparams,
1914 crypt_res[req].status));
1915 goto fail;
1916 }
1917 }
1918 fail:
1919 TAILQ_REMOVE(&fcr->crp_ret_mkq, krp, krp_next);
1920 mutex_spin_exit(&crypto_mtx);
1921 /* not sure what to do for this */
1922 /* kop[req].crk_status = krp->krp_status; */
1923 for (i = 0; i < CRK_MAXPARAM; i++) {
1924 struct crparam *kp = &(krp->krp_param[i]);
1925 if (kp->crp_p) {
1926 size = (kp->crp_nbits + 7) / 8;
1927 KASSERT(size > 0);
1928 memset(kp->crp_p, 0, size);
1929 kmem_free(kp->crp_p, size);
1930 }
1931 }
1932 cv_destroy(&krp->krp_cv);
1933 pool_put(&cryptkop_pool, krp);
1934 return 0;
1935 }
1936 }
1937 mutex_spin_exit(&crypto_mtx);
1938 return EINPROGRESS;
1939 }
1940
1941 static int
1942 cryptof_poll(struct file *fp, int events)
1943 {
1944 struct fcrypt *fcr = (struct fcrypt *)fp->f_data;
1945 int revents = 0;
1946
1947 if (!(events & (POLLIN | POLLRDNORM))) {
1948 /* only support read and POLLIN */
1949 return 0;
1950 }
1951
1952 mutex_spin_enter(&crypto_mtx);
1953 if (TAILQ_EMPTY(&fcr->crp_ret_mq) && TAILQ_EMPTY(&fcr->crp_ret_mkq)) {
1954 /* no completed requests pending, save the poll for later */
1955 selrecord(curlwp, &fcr->sinfo);
1956 } else {
1957 /* let the app(s) know that there are completed requests */
1958 revents = events & (POLLIN | POLLRDNORM);
1959 }
1960 mutex_spin_exit(&crypto_mtx);
1961
1962 return revents;
1963 }
1964
1965 /*
1966 * Pseudo-device initialization routine for /dev/crypto
1967 */
1968 void cryptoattach(int);
1969
1970 void
1971 cryptoattach(int num)
1972 {
1973 pool_init(&fcrpl, sizeof(struct fcrypt), 0, 0, 0, "fcrpl",
1974 NULL, IPL_NET); /* XXX IPL_NET ("splcrypto") */
1975 pool_init(&csepl, sizeof(struct csession), 0, 0, 0, "csepl",
1976 NULL, IPL_NET); /* XXX IPL_NET ("splcrypto") */
1977
1978 /*
1979 * Preallocate space for 64 users, with 5 sessions each.
1980 * (consider that a TLS protocol session requires at least
1981 * 3DES, MD5, and SHA1 (both hashes are used in the PRF) for
1982 * the negotiation, plus HMAC_SHA1 for the actual SSL records,
1983 * consuming one session here for each algorithm.
1984 */
1985 pool_prime(&fcrpl, 64);
1986 pool_prime(&csepl, 64 * 5);
1987 }
Cache object: 36ff2af5e7d1b8aac92a05ccce2b63f9
|