1 /* $FreeBSD$ */
2 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
3
4 /*
5 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
6 *
7 * This code was written by Angelos D. Keromytis in Athens, Greece, in
8 * February 2000. Network Security Technologies Inc. (NSTI) kindly
9 * supported the development of this code.
10 *
11 * Copyright (c) 2000, 2001 Angelos D. Keromytis
12 *
13 * Permission to use, copy, and modify this software with or without fee
14 * is hereby granted, provided that this entire notice is included in
15 * all source code copies of any software which is or includes a copy or
16 * modification of this software.
17 *
18 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
19 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
20 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
21 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
22 * PURPOSE.
23 */
24
25 #include <sys/param.h>
26 #include <sys/systm.h>
27 #include <sys/malloc.h>
28 #include <sys/mbuf.h>
29 #include <sys/sysctl.h>
30 #include <sys/errno.h>
31 #include <sys/random.h>
32 #include <sys/kernel.h>
33 #include <sys/uio.h>
34
35 #include <crypto/blowfish/blowfish.h>
36 #include <crypto/cast128/cast128.h>
37 #include <crypto/sha1.h>
38 #include <opencrypto/rmd160.h>
39 #include <opencrypto/skipjack.h>
40 #include <sys/md5.h>
41
42 #include <opencrypto/cryptodev.h>
43 #include <opencrypto/cryptosoft.h>
44 #include <opencrypto/xform.h>
45
46 u_int8_t hmac_ipad_buffer[64] = {
47 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
48 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
49 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
50 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
51 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
52 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
53 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
54 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36
55 };
56
57 u_int8_t hmac_opad_buffer[64] = {
58 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
59 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
60 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
61 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
62 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
63 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
64 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C,
65 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C, 0x5C
66 };
67
68
69 struct swcr_data **swcr_sessions = NULL;
70 u_int32_t swcr_sesnum = 0;
71 int32_t swcr_id = -1;
72
73 #define COPYBACK(x, a, b, c, d) \
74 (x) == CRYPTO_BUF_MBUF ? m_copyback((struct mbuf *)a,b,c,d) \
75 : cuio_copyback((struct uio *)a,b,c,d)
76 #define COPYDATA(x, a, b, c, d) \
77 (x) == CRYPTO_BUF_MBUF ? m_copydata((struct mbuf *)a,b,c,d) \
78 : cuio_copydata((struct uio *)a,b,c,d)
79
80 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
81 static int swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
82 struct swcr_data *sw, caddr_t buf, int outtype);
83 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
84 static int swcr_process(void *, struct cryptop *, int);
85 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
86 static int swcr_freesession(void *, u_int64_t);
87
88 /*
89 * Apply a symmetric encryption/decryption algorithm.
90 */
91 static int
92 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
93 int outtype)
94 {
95 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
96 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
97 struct enc_xform *exf;
98 int i, k, j, blks;
99
100 exf = sw->sw_exf;
101 blks = exf->blocksize;
102
103 /* Check for non-padded data */
104 if (crd->crd_len % blks)
105 return EINVAL;
106
107 /* Initialize the IV */
108 if (crd->crd_flags & CRD_F_ENCRYPT) {
109 /* IV explicitly provided ? */
110 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
111 bcopy(crd->crd_iv, iv, blks);
112 else {
113 /* Get random IV */
114 for (i = 0;
115 i + sizeof (u_int32_t) < EALG_MAX_BLOCK_LEN;
116 i += sizeof (u_int32_t)) {
117 u_int32_t temp = arc4random();
118
119 bcopy(&temp, iv + i, sizeof(u_int32_t));
120 }
121 /*
122 * What if the block size is not a multiple
123 * of sizeof (u_int32_t), which is the size of
124 * what arc4random() returns ?
125 */
126 if (EALG_MAX_BLOCK_LEN % sizeof (u_int32_t) != 0) {
127 u_int32_t temp = arc4random();
128
129 bcopy (&temp, iv + i,
130 EALG_MAX_BLOCK_LEN - i);
131 }
132 }
133
134 /* Do we need to write the IV */
135 if (!(crd->crd_flags & CRD_F_IV_PRESENT)) {
136 COPYBACK(outtype, buf, crd->crd_inject, blks, iv);
137 }
138
139 } else { /* Decryption */
140 /* IV explicitly provided ? */
141 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
142 bcopy(crd->crd_iv, iv, blks);
143 else {
144 /* Get IV off buf */
145 COPYDATA(outtype, buf, crd->crd_inject, blks, iv);
146 }
147 }
148
149 ivp = iv;
150
151 if (outtype == CRYPTO_BUF_CONTIG) {
152 if (crd->crd_flags & CRD_F_ENCRYPT) {
153 for (i = crd->crd_skip;
154 i < crd->crd_skip + crd->crd_len; i += blks) {
155 /* XOR with the IV/previous block, as appropriate. */
156 if (i == crd->crd_skip)
157 for (k = 0; k < blks; k++)
158 buf[i + k] ^= ivp[k];
159 else
160 for (k = 0; k < blks; k++)
161 buf[i + k] ^= buf[i + k - blks];
162 exf->encrypt(sw->sw_kschedule, buf + i);
163 }
164 } else { /* Decrypt */
165 /*
166 * Start at the end, so we don't need to keep the encrypted
167 * block as the IV for the next block.
168 */
169 for (i = crd->crd_skip + crd->crd_len - blks;
170 i >= crd->crd_skip; i -= blks) {
171 exf->decrypt(sw->sw_kschedule, buf + i);
172
173 /* XOR with the IV/previous block, as appropriate */
174 if (i == crd->crd_skip)
175 for (k = 0; k < blks; k++)
176 buf[i + k] ^= ivp[k];
177 else
178 for (k = 0; k < blks; k++)
179 buf[i + k] ^= buf[i + k - blks];
180 }
181 }
182
183 return 0;
184 } else if (outtype == CRYPTO_BUF_MBUF) {
185 struct mbuf *m = (struct mbuf *) buf;
186
187 /* Find beginning of data */
188 m = m_getptr(m, crd->crd_skip, &k);
189 if (m == NULL)
190 return EINVAL;
191
192 i = crd->crd_len;
193
194 while (i > 0) {
195 /*
196 * If there's insufficient data at the end of
197 * an mbuf, we have to do some copying.
198 */
199 if (m->m_len < k + blks && m->m_len != k) {
200 m_copydata(m, k, blks, blk);
201
202 /* Actual encryption/decryption */
203 if (crd->crd_flags & CRD_F_ENCRYPT) {
204 /* XOR with previous block */
205 for (j = 0; j < blks; j++)
206 blk[j] ^= ivp[j];
207
208 exf->encrypt(sw->sw_kschedule, blk);
209
210 /*
211 * Keep encrypted block for XOR'ing
212 * with next block
213 */
214 bcopy(blk, iv, blks);
215 ivp = iv;
216 } else { /* decrypt */
217 /*
218 * Keep encrypted block for XOR'ing
219 * with next block
220 */
221 if (ivp == iv)
222 bcopy(blk, piv, blks);
223 else
224 bcopy(blk, iv, blks);
225
226 exf->decrypt(sw->sw_kschedule, blk);
227
228 /* XOR with previous block */
229 for (j = 0; j < blks; j++)
230 blk[j] ^= ivp[j];
231
232 if (ivp == iv)
233 bcopy(piv, iv, blks);
234 else
235 ivp = iv;
236 }
237
238 /* Copy back decrypted block */
239 m_copyback(m, k, blks, blk);
240
241 /* Advance pointer */
242 m = m_getptr(m, k + blks, &k);
243 if (m == NULL)
244 return EINVAL;
245
246 i -= blks;
247
248 /* Could be done... */
249 if (i == 0)
250 break;
251 }
252
253 /* Skip possibly empty mbufs */
254 if (k == m->m_len) {
255 for (m = m->m_next; m && m->m_len == 0;
256 m = m->m_next)
257 ;
258 k = 0;
259 }
260
261 /* Sanity check */
262 if (m == NULL)
263 return EINVAL;
264
265 /*
266 * Warning: idat may point to garbage here, but
267 * we only use it in the while() loop, only if
268 * there are indeed enough data.
269 */
270 idat = mtod(m, unsigned char *) + k;
271
272 while (m->m_len >= k + blks && i > 0) {
273 if (crd->crd_flags & CRD_F_ENCRYPT) {
274 /* XOR with previous block/IV */
275 for (j = 0; j < blks; j++)
276 idat[j] ^= ivp[j];
277
278 exf->encrypt(sw->sw_kschedule, idat);
279 ivp = idat;
280 } else { /* decrypt */
281 /*
282 * Keep encrypted block to be used
283 * in next block's processing.
284 */
285 if (ivp == iv)
286 bcopy(idat, piv, blks);
287 else
288 bcopy(idat, iv, blks);
289
290 exf->decrypt(sw->sw_kschedule, idat);
291
292 /* XOR with previous block/IV */
293 for (j = 0; j < blks; j++)
294 idat[j] ^= ivp[j];
295
296 if (ivp == iv)
297 bcopy(piv, iv, blks);
298 else
299 ivp = iv;
300 }
301
302 idat += blks;
303 k += blks;
304 i -= blks;
305 }
306 }
307
308 return 0; /* Done with mbuf encryption/decryption */
309 } else if (outtype == CRYPTO_BUF_IOV) {
310 struct uio *uio = (struct uio *) buf;
311 struct iovec *iov;
312
313 /* Find beginning of data */
314 iov = cuio_getptr(uio, crd->crd_skip, &k);
315 if (iov == NULL)
316 return EINVAL;
317
318 i = crd->crd_len;
319
320 while (i > 0) {
321 /*
322 * If there's insufficient data at the end of
323 * an iovec, we have to do some copying.
324 */
325 if (iov->iov_len < k + blks && iov->iov_len != k) {
326 cuio_copydata(uio, k, blks, blk);
327
328 /* Actual encryption/decryption */
329 if (crd->crd_flags & CRD_F_ENCRYPT) {
330 /* XOR with previous block */
331 for (j = 0; j < blks; j++)
332 blk[j] ^= ivp[j];
333
334 exf->encrypt(sw->sw_kschedule, blk);
335
336 /*
337 * Keep encrypted block for XOR'ing
338 * with next block
339 */
340 bcopy(blk, iv, blks);
341 ivp = iv;
342 } else { /* decrypt */
343 /*
344 * Keep encrypted block for XOR'ing
345 * with next block
346 */
347 if (ivp == iv)
348 bcopy(blk, piv, blks);
349 else
350 bcopy(blk, iv, blks);
351
352 exf->decrypt(sw->sw_kschedule, blk);
353
354 /* XOR with previous block */
355 for (j = 0; j < blks; j++)
356 blk[j] ^= ivp[j];
357
358 if (ivp == iv)
359 bcopy(piv, iv, blks);
360 else
361 ivp = iv;
362 }
363
364 /* Copy back decrypted block */
365 cuio_copyback(uio, k, blks, blk);
366
367 /* Advance pointer */
368 iov = cuio_getptr(uio, k + blks, &k);
369 if (iov == NULL)
370 return EINVAL;
371
372 i -= blks;
373
374 /* Could be done... */
375 if (i == 0)
376 break;
377 }
378
379 /*
380 * Warning: idat may point to garbage here, but
381 * we only use it in the while() loop, only if
382 * there are indeed enough data.
383 */
384 idat = (char *)iov->iov_base + k;
385
386 while (iov->iov_len >= k + blks && i > 0) {
387 if (crd->crd_flags & CRD_F_ENCRYPT) {
388 /* XOR with previous block/IV */
389 for (j = 0; j < blks; j++)
390 idat[j] ^= ivp[j];
391
392 exf->encrypt(sw->sw_kschedule, idat);
393 ivp = idat;
394 } else { /* decrypt */
395 /*
396 * Keep encrypted block to be used
397 * in next block's processing.
398 */
399 if (ivp == iv)
400 bcopy(idat, piv, blks);
401 else
402 bcopy(idat, iv, blks);
403
404 exf->decrypt(sw->sw_kschedule, idat);
405
406 /* XOR with previous block/IV */
407 for (j = 0; j < blks; j++)
408 idat[j] ^= ivp[j];
409
410 if (ivp == iv)
411 bcopy(piv, iv, blks);
412 else
413 ivp = iv;
414 }
415
416 idat += blks;
417 k += blks;
418 i -= blks;
419 }
420 }
421
422 return 0; /* Done with mbuf encryption/decryption */
423 }
424
425 /* Unreachable */
426 return EINVAL;
427 }
428
429 /*
430 * Compute keyed-hash authenticator.
431 */
432 static int
433 swcr_authcompute(struct cryptop *crp, struct cryptodesc *crd,
434 struct swcr_data *sw, caddr_t buf, int outtype)
435 {
436 unsigned char aalg[AALG_MAX_RESULT_LEN];
437 struct auth_hash *axf;
438 union authctx ctx;
439 int err;
440
441 if (sw->sw_ictx == 0)
442 return EINVAL;
443
444 axf = sw->sw_axf;
445
446 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
447
448 switch (outtype) {
449 case CRYPTO_BUF_CONTIG:
450 axf->Update(&ctx, buf + crd->crd_skip, crd->crd_len);
451 break;
452 case CRYPTO_BUF_MBUF:
453 err = m_apply((struct mbuf *) buf, crd->crd_skip, crd->crd_len,
454 (int (*)(caddr_t, caddr_t, unsigned int)) axf->Update,
455 (caddr_t) &ctx);
456 if (err)
457 return err;
458 break;
459 case CRYPTO_BUF_IOV:
460 default:
461 return EINVAL;
462 }
463
464 switch (sw->sw_alg) {
465 case CRYPTO_MD5_HMAC:
466 case CRYPTO_SHA1_HMAC:
467 case CRYPTO_SHA2_HMAC:
468 case CRYPTO_RIPEMD160_HMAC:
469 if (sw->sw_octx == NULL)
470 return EINVAL;
471
472 axf->Final(aalg, &ctx);
473 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
474 axf->Update(&ctx, aalg, axf->hashsize);
475 axf->Final(aalg, &ctx);
476 break;
477
478 case CRYPTO_MD5_KPDK:
479 case CRYPTO_SHA1_KPDK:
480 if (sw->sw_octx == NULL)
481 return EINVAL;
482
483 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
484 axf->Final(aalg, &ctx);
485 break;
486
487 case CRYPTO_NULL_HMAC:
488 axf->Final(aalg, &ctx);
489 break;
490 }
491
492 /* Inject the authentication data */
493 if (outtype == CRYPTO_BUF_CONTIG)
494 bcopy(aalg, buf + crd->crd_inject, axf->authsize);
495 else
496 m_copyback((struct mbuf *) buf, crd->crd_inject,
497 axf->authsize, aalg);
498 return 0;
499 }
500
501 /*
502 * Apply a compression/decompression algorithm
503 */
504 static int
505 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
506 caddr_t buf, int outtype)
507 {
508 u_int8_t *data, *out;
509 struct comp_algo *cxf;
510 int adj;
511 u_int32_t result;
512
513 cxf = sw->sw_cxf;
514
515 /* We must handle the whole buffer of data in one time
516 * then if there is not all the data in the mbuf, we must
517 * copy in a buffer.
518 */
519
520 MALLOC(data, u_int8_t *, crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
521 if (data == NULL)
522 return (EINVAL);
523 COPYDATA(outtype, buf, crd->crd_skip, crd->crd_len, data);
524
525 if (crd->crd_flags & CRD_F_COMP)
526 result = cxf->compress(data, crd->crd_len, &out);
527 else
528 result = cxf->decompress(data, crd->crd_len, &out);
529
530 FREE(data, M_CRYPTO_DATA);
531 if (result == 0)
532 return EINVAL;
533
534 /* Copy back the (de)compressed data. m_copyback is
535 * extending the mbuf as necessary.
536 */
537 sw->sw_size = result;
538 /* Check the compressed size when doing compression */
539 if (crd->crd_flags & CRD_F_COMP) {
540 if (result > crd->crd_len) {
541 /* Compression was useless, we lost time */
542 FREE(out, M_CRYPTO_DATA);
543 return 0;
544 }
545 }
546
547 COPYBACK(outtype, buf, crd->crd_skip, result, out);
548 if (result < crd->crd_len) {
549 adj = result - crd->crd_len;
550 if (outtype == CRYPTO_BUF_MBUF) {
551 adj = result - crd->crd_len;
552 m_adj((struct mbuf *)buf, adj);
553 } else {
554 struct uio *uio = (struct uio *)buf;
555 int ind;
556
557 adj = crd->crd_len - result;
558 ind = uio->uio_iovcnt - 1;
559
560 while (adj > 0 && ind >= 0) {
561 if (adj < uio->uio_iov[ind].iov_len) {
562 uio->uio_iov[ind].iov_len -= adj;
563 break;
564 }
565
566 adj -= uio->uio_iov[ind].iov_len;
567 uio->uio_iov[ind].iov_len = 0;
568 ind--;
569 uio->uio_iovcnt--;
570 }
571 }
572 }
573 FREE(out, M_CRYPTO_DATA);
574 return 0;
575 }
576
577 /*
578 * Generate a new software session.
579 */
580 static int
581 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
582 {
583 struct swcr_data **swd;
584 struct auth_hash *axf;
585 struct enc_xform *txf;
586 struct comp_algo *cxf;
587 u_int32_t i;
588 int k, error;
589
590 if (sid == NULL || cri == NULL)
591 return EINVAL;
592
593 if (swcr_sessions) {
594 for (i = 1; i < swcr_sesnum; i++)
595 if (swcr_sessions[i] == NULL)
596 break;
597 } else
598 i = 1; /* NB: to silence compiler warning */
599
600 if (swcr_sessions == NULL || i == swcr_sesnum) {
601 if (swcr_sessions == NULL) {
602 i = 1; /* We leave swcr_sessions[0] empty */
603 swcr_sesnum = CRYPTO_SW_SESSIONS;
604 } else
605 swcr_sesnum *= 2;
606
607 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
608 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
609 if (swd == NULL) {
610 /* Reset session number */
611 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
612 swcr_sesnum = 0;
613 else
614 swcr_sesnum /= 2;
615 return ENOBUFS;
616 }
617
618 /* Copy existing sessions */
619 if (swcr_sessions) {
620 bcopy(swcr_sessions, swd,
621 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
622 free(swcr_sessions, M_CRYPTO_DATA);
623 }
624
625 swcr_sessions = swd;
626 }
627
628 swd = &swcr_sessions[i];
629 *sid = i;
630
631 while (cri) {
632 MALLOC(*swd, struct swcr_data *, sizeof(struct swcr_data),
633 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
634 if (*swd == NULL) {
635 swcr_freesession(NULL, i);
636 return ENOBUFS;
637 }
638
639 switch (cri->cri_alg) {
640 case CRYPTO_DES_CBC:
641 txf = &enc_xform_des;
642 goto enccommon;
643 case CRYPTO_3DES_CBC:
644 txf = &enc_xform_3des;
645 goto enccommon;
646 case CRYPTO_BLF_CBC:
647 txf = &enc_xform_blf;
648 goto enccommon;
649 case CRYPTO_CAST_CBC:
650 txf = &enc_xform_cast5;
651 goto enccommon;
652 case CRYPTO_SKIPJACK_CBC:
653 txf = &enc_xform_skipjack;
654 goto enccommon;
655 case CRYPTO_RIJNDAEL128_CBC:
656 txf = &enc_xform_rijndael128;
657 goto enccommon;
658 case CRYPTO_NULL_CBC:
659 txf = &enc_xform_null;
660 goto enccommon;
661 enccommon:
662 error = txf->setkey(&((*swd)->sw_kschedule),
663 cri->cri_key, cri->cri_klen / 8);
664 if (error) {
665 swcr_freesession(NULL, i);
666 return error;
667 }
668 (*swd)->sw_exf = txf;
669 break;
670
671 case CRYPTO_MD5_HMAC:
672 axf = &auth_hash_hmac_md5_96;
673 goto authcommon;
674 case CRYPTO_SHA1_HMAC:
675 axf = &auth_hash_hmac_sha1_96;
676 goto authcommon;
677 case CRYPTO_SHA2_HMAC:
678 if (cri->cri_klen == 256)
679 axf = &auth_hash_hmac_sha2_256;
680 else if (cri->cri_klen == 384)
681 axf = &auth_hash_hmac_sha2_384;
682 else if (cri->cri_klen == 512)
683 axf = &auth_hash_hmac_sha2_512;
684 else {
685 swcr_freesession(NULL, i);
686 return EINVAL;
687 }
688 goto authcommon;
689 case CRYPTO_NULL_HMAC:
690 axf = &auth_hash_null;
691 goto authcommon;
692 case CRYPTO_RIPEMD160_HMAC:
693 axf = &auth_hash_hmac_ripemd_160_96;
694 authcommon:
695 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
696 M_NOWAIT);
697 if ((*swd)->sw_ictx == NULL) {
698 swcr_freesession(NULL, i);
699 return ENOBUFS;
700 }
701
702 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
703 M_NOWAIT);
704 if ((*swd)->sw_octx == NULL) {
705 swcr_freesession(NULL, i);
706 return ENOBUFS;
707 }
708
709 for (k = 0; k < cri->cri_klen / 8; k++)
710 cri->cri_key[k] ^= HMAC_IPAD_VAL;
711
712 axf->Init((*swd)->sw_ictx);
713 axf->Update((*swd)->sw_ictx, cri->cri_key,
714 cri->cri_klen / 8);
715 axf->Update((*swd)->sw_ictx, hmac_ipad_buffer,
716 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
717
718 for (k = 0; k < cri->cri_klen / 8; k++)
719 cri->cri_key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
720
721 axf->Init((*swd)->sw_octx);
722 axf->Update((*swd)->sw_octx, cri->cri_key,
723 cri->cri_klen / 8);
724 axf->Update((*swd)->sw_octx, hmac_opad_buffer,
725 HMAC_BLOCK_LEN - (cri->cri_klen / 8));
726
727 for (k = 0; k < cri->cri_klen / 8; k++)
728 cri->cri_key[k] ^= HMAC_OPAD_VAL;
729 (*swd)->sw_axf = axf;
730 break;
731
732 case CRYPTO_MD5_KPDK:
733 axf = &auth_hash_key_md5;
734 goto auth2common;
735
736 case CRYPTO_SHA1_KPDK:
737 axf = &auth_hash_key_sha1;
738 auth2common:
739 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
740 M_NOWAIT);
741 if ((*swd)->sw_ictx == NULL) {
742 swcr_freesession(NULL, i);
743 return ENOBUFS;
744 }
745
746 /* Store the key so we can "append" it to the payload */
747 (*swd)->sw_octx = malloc(cri->cri_klen / 8, M_CRYPTO_DATA,
748 M_NOWAIT);
749 if ((*swd)->sw_octx == NULL) {
750 swcr_freesession(NULL, i);
751 return ENOBUFS;
752 }
753
754 (*swd)->sw_klen = cri->cri_klen / 8;
755 bcopy(cri->cri_key, (*swd)->sw_octx, cri->cri_klen / 8);
756 axf->Init((*swd)->sw_ictx);
757 axf->Update((*swd)->sw_ictx, cri->cri_key,
758 cri->cri_klen / 8);
759 axf->Final(NULL, (*swd)->sw_ictx);
760 (*swd)->sw_axf = axf;
761 break;
762 #ifdef notdef
763 case CRYPTO_MD5:
764 axf = &auth_hash_md5;
765 goto auth3common;
766
767 case CRYPTO_SHA1:
768 axf = &auth_hash_sha1;
769 auth3common:
770 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
771 M_NOWAIT);
772 if ((*swd)->sw_ictx == NULL) {
773 swcr_freesession(NULL, i);
774 return ENOBUFS;
775 }
776
777 axf->Init((*swd)->sw_ictx);
778 (*swd)->sw_axf = axf;
779 break;
780 #endif
781 case CRYPTO_DEFLATE_COMP:
782 cxf = &comp_algo_deflate;
783 (*swd)->sw_cxf = cxf;
784 break;
785 default:
786 swcr_freesession(NULL, i);
787 return EINVAL;
788 }
789
790 (*swd)->sw_alg = cri->cri_alg;
791 cri = cri->cri_next;
792 swd = &((*swd)->sw_next);
793 }
794 return 0;
795 }
796
797 /*
798 * Free a session.
799 */
800 static int
801 swcr_freesession(void *arg, u_int64_t tid)
802 {
803 struct swcr_data *swd;
804 struct enc_xform *txf;
805 struct auth_hash *axf;
806 struct comp_algo *cxf;
807 u_int32_t sid = CRYPTO_SESID2LID(tid);
808
809 if (sid > swcr_sesnum || swcr_sessions == NULL ||
810 swcr_sessions[sid] == NULL)
811 return EINVAL;
812
813 /* Silently accept and return */
814 if (sid == 0)
815 return 0;
816
817 while ((swd = swcr_sessions[sid]) != NULL) {
818 swcr_sessions[sid] = swd->sw_next;
819
820 switch (swd->sw_alg) {
821 case CRYPTO_DES_CBC:
822 case CRYPTO_3DES_CBC:
823 case CRYPTO_BLF_CBC:
824 case CRYPTO_CAST_CBC:
825 case CRYPTO_SKIPJACK_CBC:
826 case CRYPTO_RIJNDAEL128_CBC:
827 case CRYPTO_NULL_CBC:
828 txf = swd->sw_exf;
829
830 if (swd->sw_kschedule)
831 txf->zerokey(&(swd->sw_kschedule));
832 break;
833
834 case CRYPTO_MD5_HMAC:
835 case CRYPTO_SHA1_HMAC:
836 case CRYPTO_SHA2_HMAC:
837 case CRYPTO_RIPEMD160_HMAC:
838 case CRYPTO_NULL_HMAC:
839 axf = swd->sw_axf;
840
841 if (swd->sw_ictx) {
842 bzero(swd->sw_ictx, axf->ctxsize);
843 free(swd->sw_ictx, M_CRYPTO_DATA);
844 }
845 if (swd->sw_octx) {
846 bzero(swd->sw_octx, axf->ctxsize);
847 free(swd->sw_octx, M_CRYPTO_DATA);
848 }
849 break;
850
851 case CRYPTO_MD5_KPDK:
852 case CRYPTO_SHA1_KPDK:
853 axf = swd->sw_axf;
854
855 if (swd->sw_ictx) {
856 bzero(swd->sw_ictx, axf->ctxsize);
857 free(swd->sw_ictx, M_CRYPTO_DATA);
858 }
859 if (swd->sw_octx) {
860 bzero(swd->sw_octx, swd->sw_klen);
861 free(swd->sw_octx, M_CRYPTO_DATA);
862 }
863 break;
864
865 case CRYPTO_MD5:
866 case CRYPTO_SHA1:
867 axf = swd->sw_axf;
868
869 if (swd->sw_ictx)
870 free(swd->sw_ictx, M_CRYPTO_DATA);
871 break;
872
873 case CRYPTO_DEFLATE_COMP:
874 cxf = swd->sw_cxf;
875 break;
876 }
877
878 FREE(swd, M_CRYPTO_DATA);
879 }
880 return 0;
881 }
882
883 /*
884 * Process a software request.
885 */
886 static int
887 swcr_process(void *arg, struct cryptop *crp, int hint)
888 {
889 struct cryptodesc *crd;
890 struct swcr_data *sw;
891 u_int32_t lid;
892 int type;
893
894 /* Sanity check */
895 if (crp == NULL)
896 return EINVAL;
897
898 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
899 crp->crp_etype = EINVAL;
900 goto done;
901 }
902
903 lid = crp->crp_sid & 0xffffffff;
904 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
905 crp->crp_etype = ENOENT;
906 goto done;
907 }
908
909 if (crp->crp_flags & CRYPTO_F_IMBUF) {
910 type = CRYPTO_BUF_MBUF;
911 } else if (crp->crp_flags & CRYPTO_F_IOV) {
912 type = CRYPTO_BUF_IOV;
913 } else {
914 type = CRYPTO_BUF_CONTIG;
915 }
916
917 /* Go through crypto descriptors, processing as we go */
918 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
919 /*
920 * Find the crypto context.
921 *
922 * XXX Note that the logic here prevents us from having
923 * XXX the same algorithm multiple times in a session
924 * XXX (or rather, we can but it won't give us the right
925 * XXX results). To do that, we'd need some way of differentiating
926 * XXX between the various instances of an algorithm (so we can
927 * XXX locate the correct crypto context).
928 */
929 for (sw = swcr_sessions[lid];
930 sw && sw->sw_alg != crd->crd_alg;
931 sw = sw->sw_next)
932 ;
933
934 /* No such context ? */
935 if (sw == NULL) {
936 crp->crp_etype = EINVAL;
937 goto done;
938 }
939 switch (sw->sw_alg) {
940 case CRYPTO_DES_CBC:
941 case CRYPTO_3DES_CBC:
942 case CRYPTO_BLF_CBC:
943 case CRYPTO_CAST_CBC:
944 case CRYPTO_SKIPJACK_CBC:
945 case CRYPTO_RIJNDAEL128_CBC:
946 if ((crp->crp_etype = swcr_encdec(crd, sw,
947 crp->crp_buf, type)) != 0)
948 goto done;
949 break;
950 case CRYPTO_NULL_CBC:
951 crp->crp_etype = 0;
952 break;
953 case CRYPTO_MD5_HMAC:
954 case CRYPTO_SHA1_HMAC:
955 case CRYPTO_SHA2_HMAC:
956 case CRYPTO_RIPEMD160_HMAC:
957 case CRYPTO_NULL_HMAC:
958 case CRYPTO_MD5_KPDK:
959 case CRYPTO_SHA1_KPDK:
960 case CRYPTO_MD5:
961 case CRYPTO_SHA1:
962 if ((crp->crp_etype = swcr_authcompute(crp, crd, sw,
963 crp->crp_buf, type)) != 0)
964 goto done;
965 break;
966
967 case CRYPTO_DEFLATE_COMP:
968 if ((crp->crp_etype = swcr_compdec(crd, sw,
969 crp->crp_buf, type)) != 0)
970 goto done;
971 else
972 crp->crp_olen = (int)sw->sw_size;
973 break;
974
975 default:
976 /* Unknown/unsupported algorithm */
977 crp->crp_etype = EINVAL;
978 goto done;
979 }
980 }
981
982 done:
983 crypto_done(crp);
984 return 0;
985 }
986
987 /*
988 * Initialize the driver, called from the kernel main().
989 */
990 static void
991 swcr_init(void)
992 {
993 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
994 if (swcr_id < 0)
995 panic("Software crypto device cannot initialize!");
996 crypto_register(swcr_id, CRYPTO_DES_CBC,
997 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
998 #define REGISTER(alg) \
999 crypto_register(swcr_id, alg, 0,0,NULL,NULL,NULL,NULL)
1000 REGISTER(CRYPTO_3DES_CBC);
1001 REGISTER(CRYPTO_BLF_CBC);
1002 REGISTER(CRYPTO_CAST_CBC);
1003 REGISTER(CRYPTO_SKIPJACK_CBC);
1004 REGISTER(CRYPTO_NULL_CBC);
1005 REGISTER(CRYPTO_MD5_HMAC);
1006 REGISTER(CRYPTO_SHA1_HMAC);
1007 REGISTER(CRYPTO_SHA2_HMAC);
1008 REGISTER(CRYPTO_RIPEMD160_HMAC);
1009 REGISTER(CRYPTO_NULL_HMAC);
1010 REGISTER(CRYPTO_MD5_KPDK);
1011 REGISTER(CRYPTO_SHA1_KPDK);
1012 REGISTER(CRYPTO_MD5);
1013 REGISTER(CRYPTO_SHA1);
1014 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1015 REGISTER(CRYPTO_DEFLATE_COMP);
1016 #undef REGISTER
1017 }
1018 SYSINIT(cryptosoft_init, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_init, NULL)
Cache object: d446529b4e4263893fbc938dbc7cf7a4
|