1 /* $OpenBSD: cryptosoft.c,v 1.35 2002/04/26 08:43:50 deraadt Exp $ */
2
3 /*-
4 * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5 *
6 * This code was written by Angelos D. Keromytis in Athens, Greece, in
7 * February 2000. Network Security Technologies Inc. (NSTI) kindly
8 * supported the development of this code.
9 *
10 * Copyright (c) 2000, 2001 Angelos D. Keromytis
11 *
12 * Permission to use, copy, and modify this software with or without fee
13 * is hereby granted, provided that this entire notice is included in
14 * all source code copies of any software which is or includes a copy or
15 * modification of this software.
16 *
17 * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18 * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19 * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20 * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21 * PURPOSE.
22 */
23
24 #include <sys/cdefs.h>
25 __FBSDID("$FreeBSD$");
26
27 #include <sys/param.h>
28 #include <sys/systm.h>
29 #include <sys/malloc.h>
30 #include <sys/mbuf.h>
31 #include <sys/sysctl.h>
32 #include <sys/errno.h>
33 #include <sys/random.h>
34 #include <sys/kernel.h>
35 #include <sys/uio.h>
36
37 #include <crypto/blowfish/blowfish.h>
38 #include <crypto/sha1.h>
39 #include <opencrypto/rmd160.h>
40 #include <opencrypto/cast.h>
41 #include <opencrypto/skipjack.h>
42 #include <sys/md5.h>
43
44 #include <opencrypto/cryptodev.h>
45 #include <opencrypto/cryptosoft.h>
46 #include <opencrypto/xform.h>
47
48 u_int8_t *hmac_ipad_buffer;
49 u_int8_t *hmac_opad_buffer;
50
51 struct swcr_data **swcr_sessions = NULL;
52 u_int32_t swcr_sesnum = 0;
53 int32_t swcr_id = -1;
54
55 static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
56 static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
57 static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
58 static int swcr_process(void *, struct cryptop *, int);
59 static int swcr_newsession(void *, u_int32_t *, struct cryptoini *);
60 static int swcr_freesession(void *, u_int64_t);
61
62 /*
63 * Apply a symmetric encryption/decryption algorithm.
64 */
65 static int
66 swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
67 int flags)
68 {
69 unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN], *idat;
70 unsigned char *ivp, piv[EALG_MAX_BLOCK_LEN];
71 struct enc_xform *exf;
72 int i, k, j, blks;
73
74 exf = sw->sw_exf;
75 blks = exf->blocksize;
76
77 /* Check for non-padded data */
78 if (crd->crd_len % blks)
79 return EINVAL;
80
81 /* Initialize the IV */
82 if (crd->crd_flags & CRD_F_ENCRYPT) {
83 /* IV explicitly provided ? */
84 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
85 bcopy(crd->crd_iv, iv, blks);
86 else
87 arc4rand(iv, blks, 0);
88
89 /* Do we need to write the IV */
90 if (!(crd->crd_flags & CRD_F_IV_PRESENT))
91 crypto_copyback(flags, buf, crd->crd_inject, blks, iv);
92
93 } else { /* Decryption */
94 /* IV explicitly provided ? */
95 if (crd->crd_flags & CRD_F_IV_EXPLICIT)
96 bcopy(crd->crd_iv, iv, blks);
97 else {
98 /* Get IV off buf */
99 crypto_copydata(flags, buf, crd->crd_inject, blks, iv);
100 }
101 }
102
103 if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
104 int error;
105
106 if (sw->sw_kschedule)
107 exf->zerokey(&(sw->sw_kschedule));
108 error = exf->setkey(&sw->sw_kschedule,
109 crd->crd_key, crd->crd_klen / 8);
110 if (error)
111 return (error);
112 }
113 ivp = iv;
114
115 if (flags & CRYPTO_F_IMBUF) {
116 struct mbuf *m = (struct mbuf *) buf;
117
118 /* Find beginning of data */
119 m = m_getptr(m, crd->crd_skip, &k);
120 if (m == NULL)
121 return EINVAL;
122
123 i = crd->crd_len;
124
125 while (i > 0) {
126 /*
127 * If there's insufficient data at the end of
128 * an mbuf, we have to do some copying.
129 */
130 if (m->m_len < k + blks && m->m_len != k) {
131 m_copydata(m, k, blks, blk);
132
133 /* Actual encryption/decryption */
134 if (crd->crd_flags & CRD_F_ENCRYPT) {
135 /* XOR with previous block */
136 for (j = 0; j < blks; j++)
137 blk[j] ^= ivp[j];
138
139 exf->encrypt(sw->sw_kschedule, blk);
140
141 /*
142 * Keep encrypted block for XOR'ing
143 * with next block
144 */
145 bcopy(blk, iv, blks);
146 ivp = iv;
147 } else { /* decrypt */
148 /*
149 * Keep encrypted block for XOR'ing
150 * with next block
151 */
152 if (ivp == iv)
153 bcopy(blk, piv, blks);
154 else
155 bcopy(blk, iv, blks);
156
157 exf->decrypt(sw->sw_kschedule, blk);
158
159 /* XOR with previous block */
160 for (j = 0; j < blks; j++)
161 blk[j] ^= ivp[j];
162
163 if (ivp == iv)
164 bcopy(piv, iv, blks);
165 else
166 ivp = iv;
167 }
168
169 /* Copy back decrypted block */
170 m_copyback(m, k, blks, blk);
171
172 /* Advance pointer */
173 m = m_getptr(m, k + blks, &k);
174 if (m == NULL)
175 return EINVAL;
176
177 i -= blks;
178
179 /* Could be done... */
180 if (i == 0)
181 break;
182 }
183
184 /* Skip possibly empty mbufs */
185 if (k == m->m_len) {
186 for (m = m->m_next; m && m->m_len == 0;
187 m = m->m_next)
188 ;
189 k = 0;
190 }
191
192 /* Sanity check */
193 if (m == NULL)
194 return EINVAL;
195
196 /*
197 * Warning: idat may point to garbage here, but
198 * we only use it in the while() loop, only if
199 * there are indeed enough data.
200 */
201 idat = mtod(m, unsigned char *) + k;
202
203 while (m->m_len >= k + blks && i > 0) {
204 if (crd->crd_flags & CRD_F_ENCRYPT) {
205 /* XOR with previous block/IV */
206 for (j = 0; j < blks; j++)
207 idat[j] ^= ivp[j];
208
209 exf->encrypt(sw->sw_kschedule, idat);
210 ivp = idat;
211 } else { /* decrypt */
212 /*
213 * Keep encrypted block to be used
214 * in next block's processing.
215 */
216 if (ivp == iv)
217 bcopy(idat, piv, blks);
218 else
219 bcopy(idat, iv, blks);
220
221 exf->decrypt(sw->sw_kschedule, idat);
222
223 /* XOR with previous block/IV */
224 for (j = 0; j < blks; j++)
225 idat[j] ^= ivp[j];
226
227 if (ivp == iv)
228 bcopy(piv, iv, blks);
229 else
230 ivp = iv;
231 }
232
233 idat += blks;
234 k += blks;
235 i -= blks;
236 }
237 }
238
239 return 0; /* Done with mbuf encryption/decryption */
240 } else if (flags & CRYPTO_F_IOV) {
241 struct uio *uio = (struct uio *) buf;
242 struct iovec *iov;
243
244 /* Find beginning of data */
245 iov = cuio_getptr(uio, crd->crd_skip, &k);
246 if (iov == NULL)
247 return EINVAL;
248
249 i = crd->crd_len;
250
251 while (i > 0) {
252 /*
253 * If there's insufficient data at the end of
254 * an iovec, we have to do some copying.
255 */
256 if (iov->iov_len < k + blks && iov->iov_len != k) {
257 cuio_copydata(uio, k, blks, blk);
258
259 /* Actual encryption/decryption */
260 if (crd->crd_flags & CRD_F_ENCRYPT) {
261 /* XOR with previous block */
262 for (j = 0; j < blks; j++)
263 blk[j] ^= ivp[j];
264
265 exf->encrypt(sw->sw_kschedule, blk);
266
267 /*
268 * Keep encrypted block for XOR'ing
269 * with next block
270 */
271 bcopy(blk, iv, blks);
272 ivp = iv;
273 } else { /* decrypt */
274 /*
275 * Keep encrypted block for XOR'ing
276 * with next block
277 */
278 if (ivp == iv)
279 bcopy(blk, piv, blks);
280 else
281 bcopy(blk, iv, blks);
282
283 exf->decrypt(sw->sw_kschedule, blk);
284
285 /* XOR with previous block */
286 for (j = 0; j < blks; j++)
287 blk[j] ^= ivp[j];
288
289 if (ivp == iv)
290 bcopy(piv, iv, blks);
291 else
292 ivp = iv;
293 }
294
295 /* Copy back decrypted block */
296 cuio_copyback(uio, k, blks, blk);
297
298 /* Advance pointer */
299 iov = cuio_getptr(uio, k + blks, &k);
300 if (iov == NULL)
301 return EINVAL;
302
303 i -= blks;
304
305 /* Could be done... */
306 if (i == 0)
307 break;
308 }
309
310 /*
311 * Warning: idat may point to garbage here, but
312 * we only use it in the while() loop, only if
313 * there are indeed enough data.
314 */
315 idat = (char *)iov->iov_base + k;
316
317 while (iov->iov_len >= k + blks && i > 0) {
318 if (crd->crd_flags & CRD_F_ENCRYPT) {
319 /* XOR with previous block/IV */
320 for (j = 0; j < blks; j++)
321 idat[j] ^= ivp[j];
322
323 exf->encrypt(sw->sw_kschedule, idat);
324 ivp = idat;
325 } else { /* decrypt */
326 /*
327 * Keep encrypted block to be used
328 * in next block's processing.
329 */
330 if (ivp == iv)
331 bcopy(idat, piv, blks);
332 else
333 bcopy(idat, iv, blks);
334
335 exf->decrypt(sw->sw_kschedule, idat);
336
337 /* XOR with previous block/IV */
338 for (j = 0; j < blks; j++)
339 idat[j] ^= ivp[j];
340
341 if (ivp == iv)
342 bcopy(piv, iv, blks);
343 else
344 ivp = iv;
345 }
346
347 idat += blks;
348 k += blks;
349 i -= blks;
350 }
351 }
352
353 return 0; /* Done with iovec encryption/decryption */
354 } else { /* contiguous buffer */
355 if (crd->crd_flags & CRD_F_ENCRYPT) {
356 for (i = crd->crd_skip;
357 i < crd->crd_skip + crd->crd_len; i += blks) {
358 /* XOR with the IV/previous block, as appropriate. */
359 if (i == crd->crd_skip)
360 for (k = 0; k < blks; k++)
361 buf[i + k] ^= ivp[k];
362 else
363 for (k = 0; k < blks; k++)
364 buf[i + k] ^= buf[i + k - blks];
365 exf->encrypt(sw->sw_kschedule, buf + i);
366 }
367 } else { /* Decrypt */
368 /*
369 * Start at the end, so we don't need to keep the encrypted
370 * block as the IV for the next block.
371 */
372 for (i = crd->crd_skip + crd->crd_len - blks;
373 i >= crd->crd_skip; i -= blks) {
374 exf->decrypt(sw->sw_kschedule, buf + i);
375
376 /* XOR with the IV/previous block, as appropriate */
377 if (i == crd->crd_skip)
378 for (k = 0; k < blks; k++)
379 buf[i + k] ^= ivp[k];
380 else
381 for (k = 0; k < blks; k++)
382 buf[i + k] ^= buf[i + k - blks];
383 }
384 }
385
386 return 0; /* Done with contiguous buffer encryption/decryption */
387 }
388
389 /* Unreachable */
390 return EINVAL;
391 }
392
393 static void
394 swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
395 int klen)
396 {
397 int k;
398
399 klen /= 8;
400
401 switch (axf->type) {
402 case CRYPTO_MD5_HMAC:
403 case CRYPTO_SHA1_HMAC:
404 case CRYPTO_SHA2_256_HMAC:
405 case CRYPTO_SHA2_384_HMAC:
406 case CRYPTO_SHA2_512_HMAC:
407 case CRYPTO_NULL_HMAC:
408 case CRYPTO_RIPEMD160_HMAC:
409 for (k = 0; k < klen; k++)
410 key[k] ^= HMAC_IPAD_VAL;
411
412 axf->Init(sw->sw_ictx);
413 axf->Update(sw->sw_ictx, key, klen);
414 axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
415
416 for (k = 0; k < klen; k++)
417 key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
418
419 axf->Init(sw->sw_octx);
420 axf->Update(sw->sw_octx, key, klen);
421 axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
422
423 for (k = 0; k < klen; k++)
424 key[k] ^= HMAC_OPAD_VAL;
425 break;
426 case CRYPTO_MD5_KPDK:
427 case CRYPTO_SHA1_KPDK:
428 {
429 /*
430 * We need a buffer that can hold an md5 and a sha1 result
431 * just to throw it away.
432 * What we do here is the initial part of:
433 * ALGO( key, keyfill, .. )
434 * adding the key to sw_ictx and abusing Final() to get the
435 * "keyfill" padding.
436 * In addition we abuse the sw_octx to save the key to have
437 * it to be able to append it at the end in swcr_authcompute().
438 */
439 u_char buf[SHA1_RESULTLEN];
440
441 sw->sw_klen = klen;
442 bcopy(key, sw->sw_octx, klen);
443 axf->Init(sw->sw_ictx);
444 axf->Update(sw->sw_ictx, key, klen);
445 axf->Final(buf, sw->sw_ictx);
446 break;
447 }
448 default:
449 printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
450 "doesn't use keys.\n", __func__, axf->type);
451 }
452 }
453
454 /*
455 * Compute keyed-hash authenticator.
456 */
457 static int
458 swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
459 int flags)
460 {
461 unsigned char aalg[HASH_MAX_LEN];
462 struct auth_hash *axf;
463 union authctx ctx;
464 int err;
465
466 if (sw->sw_ictx == 0)
467 return EINVAL;
468
469 axf = sw->sw_axf;
470
471 if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
472 swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
473
474 bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
475
476 err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
477 (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
478 if (err)
479 return err;
480
481 switch (sw->sw_alg) {
482 case CRYPTO_MD5_HMAC:
483 case CRYPTO_SHA1_HMAC:
484 case CRYPTO_SHA2_256_HMAC:
485 case CRYPTO_SHA2_384_HMAC:
486 case CRYPTO_SHA2_512_HMAC:
487 case CRYPTO_RIPEMD160_HMAC:
488 if (sw->sw_octx == NULL)
489 return EINVAL;
490
491 axf->Final(aalg, &ctx);
492 bcopy(sw->sw_octx, &ctx, axf->ctxsize);
493 axf->Update(&ctx, aalg, axf->hashsize);
494 axf->Final(aalg, &ctx);
495 break;
496
497 case CRYPTO_MD5_KPDK:
498 case CRYPTO_SHA1_KPDK:
499 /* If we have no key saved, return error. */
500 if (sw->sw_octx == NULL)
501 return EINVAL;
502
503 /*
504 * Add the trailing copy of the key (see comment in
505 * swcr_authprepare()) after the data:
506 * ALGO( .., key, algofill )
507 * and let Final() do the proper, natural "algofill"
508 * padding.
509 */
510 axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
511 axf->Final(aalg, &ctx);
512 break;
513
514 case CRYPTO_NULL_HMAC:
515 axf->Final(aalg, &ctx);
516 break;
517 }
518
519 /* Inject the authentication data */
520 crypto_copyback(flags, buf, crd->crd_inject,
521 sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
522 return 0;
523 }
524
525 /*
526 * Apply a compression/decompression algorithm
527 */
528 static int
529 swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
530 caddr_t buf, int flags)
531 {
532 u_int8_t *data, *out;
533 struct comp_algo *cxf;
534 int adj;
535 u_int32_t result;
536
537 cxf = sw->sw_cxf;
538
539 /* We must handle the whole buffer of data in one time
540 * then if there is not all the data in the mbuf, we must
541 * copy in a buffer.
542 */
543
544 MALLOC(data, u_int8_t *, crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
545 if (data == NULL)
546 return (EINVAL);
547 crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
548
549 if (crd->crd_flags & CRD_F_COMP)
550 result = cxf->compress(data, crd->crd_len, &out);
551 else
552 result = cxf->decompress(data, crd->crd_len, &out);
553
554 FREE(data, M_CRYPTO_DATA);
555 if (result == 0)
556 return EINVAL;
557
558 /* Copy back the (de)compressed data. m_copyback is
559 * extending the mbuf as necessary.
560 */
561 sw->sw_size = result;
562 /* Check the compressed size when doing compression */
563 if (crd->crd_flags & CRD_F_COMP) {
564 if (result >= crd->crd_len) {
565 /* Compression was useless, we lost time */
566 FREE(out, M_CRYPTO_DATA);
567 return 0;
568 }
569 }
570
571 crypto_copyback(flags, buf, crd->crd_skip, result, out);
572 if (result < crd->crd_len) {
573 adj = result - crd->crd_len;
574 if (flags & CRYPTO_F_IMBUF) {
575 adj = result - crd->crd_len;
576 m_adj((struct mbuf *)buf, adj);
577 } else if (flags & CRYPTO_F_IOV) {
578 struct uio *uio = (struct uio *)buf;
579 int ind;
580
581 adj = crd->crd_len - result;
582 ind = uio->uio_iovcnt - 1;
583
584 while (adj > 0 && ind >= 0) {
585 if (adj < uio->uio_iov[ind].iov_len) {
586 uio->uio_iov[ind].iov_len -= adj;
587 break;
588 }
589
590 adj -= uio->uio_iov[ind].iov_len;
591 uio->uio_iov[ind].iov_len = 0;
592 ind--;
593 uio->uio_iovcnt--;
594 }
595 }
596 }
597 FREE(out, M_CRYPTO_DATA);
598 return 0;
599 }
600
601 /*
602 * Generate a new software session.
603 */
604 static int
605 swcr_newsession(void *arg, u_int32_t *sid, struct cryptoini *cri)
606 {
607 struct swcr_data **swd;
608 struct auth_hash *axf;
609 struct enc_xform *txf;
610 struct comp_algo *cxf;
611 u_int32_t i;
612 int error;
613
614 if (sid == NULL || cri == NULL)
615 return EINVAL;
616
617 if (swcr_sessions) {
618 for (i = 1; i < swcr_sesnum; i++)
619 if (swcr_sessions[i] == NULL)
620 break;
621 } else
622 i = 1; /* NB: to silence compiler warning */
623
624 if (swcr_sessions == NULL || i == swcr_sesnum) {
625 if (swcr_sessions == NULL) {
626 i = 1; /* We leave swcr_sessions[0] empty */
627 swcr_sesnum = CRYPTO_SW_SESSIONS;
628 } else
629 swcr_sesnum *= 2;
630
631 swd = malloc(swcr_sesnum * sizeof(struct swcr_data *),
632 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
633 if (swd == NULL) {
634 /* Reset session number */
635 if (swcr_sesnum == CRYPTO_SW_SESSIONS)
636 swcr_sesnum = 0;
637 else
638 swcr_sesnum /= 2;
639 return ENOBUFS;
640 }
641
642 /* Copy existing sessions */
643 if (swcr_sessions) {
644 bcopy(swcr_sessions, swd,
645 (swcr_sesnum / 2) * sizeof(struct swcr_data *));
646 free(swcr_sessions, M_CRYPTO_DATA);
647 }
648
649 swcr_sessions = swd;
650 }
651
652 swd = &swcr_sessions[i];
653 *sid = i;
654
655 while (cri) {
656 MALLOC(*swd, struct swcr_data *, sizeof(struct swcr_data),
657 M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
658 if (*swd == NULL) {
659 swcr_freesession(NULL, i);
660 return ENOBUFS;
661 }
662
663 switch (cri->cri_alg) {
664 case CRYPTO_DES_CBC:
665 txf = &enc_xform_des;
666 goto enccommon;
667 case CRYPTO_3DES_CBC:
668 txf = &enc_xform_3des;
669 goto enccommon;
670 case CRYPTO_BLF_CBC:
671 txf = &enc_xform_blf;
672 goto enccommon;
673 case CRYPTO_CAST_CBC:
674 txf = &enc_xform_cast5;
675 goto enccommon;
676 case CRYPTO_SKIPJACK_CBC:
677 txf = &enc_xform_skipjack;
678 goto enccommon;
679 case CRYPTO_RIJNDAEL128_CBC:
680 txf = &enc_xform_rijndael128;
681 goto enccommon;
682 case CRYPTO_CAMELLIA_CBC:
683 txf = &enc_xform_camellia;
684 goto enccommon;
685 case CRYPTO_NULL_CBC:
686 txf = &enc_xform_null;
687 goto enccommon;
688 enccommon:
689 if (cri->cri_key != NULL) {
690 error = txf->setkey(&((*swd)->sw_kschedule),
691 cri->cri_key, cri->cri_klen / 8);
692 if (error) {
693 swcr_freesession(NULL, i);
694 return error;
695 }
696 }
697 (*swd)->sw_exf = txf;
698 break;
699
700 case CRYPTO_MD5_HMAC:
701 axf = &auth_hash_hmac_md5;
702 goto authcommon;
703 case CRYPTO_SHA1_HMAC:
704 axf = &auth_hash_hmac_sha1;
705 goto authcommon;
706 case CRYPTO_SHA2_256_HMAC:
707 axf = &auth_hash_hmac_sha2_256;
708 goto authcommon;
709 case CRYPTO_SHA2_384_HMAC:
710 axf = &auth_hash_hmac_sha2_384;
711 goto authcommon;
712 case CRYPTO_SHA2_512_HMAC:
713 axf = &auth_hash_hmac_sha2_512;
714 goto authcommon;
715 case CRYPTO_NULL_HMAC:
716 axf = &auth_hash_null;
717 goto authcommon;
718 case CRYPTO_RIPEMD160_HMAC:
719 axf = &auth_hash_hmac_ripemd_160;
720 authcommon:
721 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
722 M_NOWAIT);
723 if ((*swd)->sw_ictx == NULL) {
724 swcr_freesession(NULL, i);
725 return ENOBUFS;
726 }
727
728 (*swd)->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
729 M_NOWAIT);
730 if ((*swd)->sw_octx == NULL) {
731 swcr_freesession(NULL, i);
732 return ENOBUFS;
733 }
734
735 if (cri->cri_key != NULL) {
736 swcr_authprepare(axf, *swd, cri->cri_key,
737 cri->cri_klen);
738 }
739
740 (*swd)->sw_mlen = cri->cri_mlen;
741 (*swd)->sw_axf = axf;
742 break;
743
744 case CRYPTO_MD5_KPDK:
745 axf = &auth_hash_key_md5;
746 goto auth2common;
747
748 case CRYPTO_SHA1_KPDK:
749 axf = &auth_hash_key_sha1;
750 auth2common:
751 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
752 M_NOWAIT);
753 if ((*swd)->sw_ictx == NULL) {
754 swcr_freesession(NULL, i);
755 return ENOBUFS;
756 }
757
758 (*swd)->sw_octx = malloc(cri->cri_klen / 8,
759 M_CRYPTO_DATA, M_NOWAIT);
760 if ((*swd)->sw_octx == NULL) {
761 swcr_freesession(NULL, i);
762 return ENOBUFS;
763 }
764
765 /* Store the key so we can "append" it to the payload */
766 if (cri->cri_key != NULL) {
767 swcr_authprepare(axf, *swd, cri->cri_key,
768 cri->cri_klen);
769 }
770
771 (*swd)->sw_mlen = cri->cri_mlen;
772 (*swd)->sw_axf = axf;
773 break;
774 #ifdef notdef
775 case CRYPTO_MD5:
776 axf = &auth_hash_md5;
777 goto auth3common;
778
779 case CRYPTO_SHA1:
780 axf = &auth_hash_sha1;
781 auth3common:
782 (*swd)->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
783 M_NOWAIT);
784 if ((*swd)->sw_ictx == NULL) {
785 swcr_freesession(NULL, i);
786 return ENOBUFS;
787 }
788
789 axf->Init((*swd)->sw_ictx);
790 (*swd)->sw_mlen = cri->cri_mlen;
791 (*swd)->sw_axf = axf;
792 break;
793 #endif
794 case CRYPTO_DEFLATE_COMP:
795 cxf = &comp_algo_deflate;
796 (*swd)->sw_cxf = cxf;
797 break;
798 default:
799 swcr_freesession(NULL, i);
800 return EINVAL;
801 }
802
803 (*swd)->sw_alg = cri->cri_alg;
804 cri = cri->cri_next;
805 swd = &((*swd)->sw_next);
806 }
807 return 0;
808 }
809
810 /*
811 * Free a session.
812 */
813 static int
814 swcr_freesession(void *arg, u_int64_t tid)
815 {
816 struct swcr_data *swd;
817 struct enc_xform *txf;
818 struct auth_hash *axf;
819 struct comp_algo *cxf;
820 u_int32_t sid = CRYPTO_SESID2LID(tid);
821
822 if (sid > swcr_sesnum || swcr_sessions == NULL ||
823 swcr_sessions[sid] == NULL)
824 return EINVAL;
825
826 /* Silently accept and return */
827 if (sid == 0)
828 return 0;
829
830 while ((swd = swcr_sessions[sid]) != NULL) {
831 swcr_sessions[sid] = swd->sw_next;
832
833 switch (swd->sw_alg) {
834 case CRYPTO_DES_CBC:
835 case CRYPTO_3DES_CBC:
836 case CRYPTO_BLF_CBC:
837 case CRYPTO_CAST_CBC:
838 case CRYPTO_SKIPJACK_CBC:
839 case CRYPTO_RIJNDAEL128_CBC:
840 case CRYPTO_CAMELLIA_CBC:
841 case CRYPTO_NULL_CBC:
842 txf = swd->sw_exf;
843
844 if (swd->sw_kschedule)
845 txf->zerokey(&(swd->sw_kschedule));
846 break;
847
848 case CRYPTO_MD5_HMAC:
849 case CRYPTO_SHA1_HMAC:
850 case CRYPTO_SHA2_256_HMAC:
851 case CRYPTO_SHA2_384_HMAC:
852 case CRYPTO_SHA2_512_HMAC:
853 case CRYPTO_RIPEMD160_HMAC:
854 case CRYPTO_NULL_HMAC:
855 axf = swd->sw_axf;
856
857 if (swd->sw_ictx) {
858 bzero(swd->sw_ictx, axf->ctxsize);
859 free(swd->sw_ictx, M_CRYPTO_DATA);
860 }
861 if (swd->sw_octx) {
862 bzero(swd->sw_octx, axf->ctxsize);
863 free(swd->sw_octx, M_CRYPTO_DATA);
864 }
865 break;
866
867 case CRYPTO_MD5_KPDK:
868 case CRYPTO_SHA1_KPDK:
869 axf = swd->sw_axf;
870
871 if (swd->sw_ictx) {
872 bzero(swd->sw_ictx, axf->ctxsize);
873 free(swd->sw_ictx, M_CRYPTO_DATA);
874 }
875 if (swd->sw_octx) {
876 bzero(swd->sw_octx, swd->sw_klen);
877 free(swd->sw_octx, M_CRYPTO_DATA);
878 }
879 break;
880
881 case CRYPTO_MD5:
882 case CRYPTO_SHA1:
883 axf = swd->sw_axf;
884
885 if (swd->sw_ictx)
886 free(swd->sw_ictx, M_CRYPTO_DATA);
887 break;
888
889 case CRYPTO_DEFLATE_COMP:
890 cxf = swd->sw_cxf;
891 break;
892 }
893
894 FREE(swd, M_CRYPTO_DATA);
895 }
896 return 0;
897 }
898
899 /*
900 * Process a software request.
901 */
902 static int
903 swcr_process(void *arg, struct cryptop *crp, int hint)
904 {
905 struct cryptodesc *crd;
906 struct swcr_data *sw;
907 u_int32_t lid;
908
909 /* Sanity check */
910 if (crp == NULL)
911 return EINVAL;
912
913 if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
914 crp->crp_etype = EINVAL;
915 goto done;
916 }
917
918 lid = crp->crp_sid & 0xffffffff;
919 if (lid >= swcr_sesnum || lid == 0 || swcr_sessions[lid] == NULL) {
920 crp->crp_etype = ENOENT;
921 goto done;
922 }
923
924 /* Go through crypto descriptors, processing as we go */
925 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
926 /*
927 * Find the crypto context.
928 *
929 * XXX Note that the logic here prevents us from having
930 * XXX the same algorithm multiple times in a session
931 * XXX (or rather, we can but it won't give us the right
932 * XXX results). To do that, we'd need some way of differentiating
933 * XXX between the various instances of an algorithm (so we can
934 * XXX locate the correct crypto context).
935 */
936 for (sw = swcr_sessions[lid];
937 sw && sw->sw_alg != crd->crd_alg;
938 sw = sw->sw_next)
939 ;
940
941 /* No such context ? */
942 if (sw == NULL) {
943 crp->crp_etype = EINVAL;
944 goto done;
945 }
946 switch (sw->sw_alg) {
947 case CRYPTO_DES_CBC:
948 case CRYPTO_3DES_CBC:
949 case CRYPTO_BLF_CBC:
950 case CRYPTO_CAST_CBC:
951 case CRYPTO_SKIPJACK_CBC:
952 case CRYPTO_RIJNDAEL128_CBC:
953 case CRYPTO_CAMELLIA_CBC:
954 if ((crp->crp_etype = swcr_encdec(crd, sw,
955 crp->crp_buf, crp->crp_flags)) != 0)
956 goto done;
957 break;
958 case CRYPTO_NULL_CBC:
959 crp->crp_etype = 0;
960 break;
961 case CRYPTO_MD5_HMAC:
962 case CRYPTO_SHA1_HMAC:
963 case CRYPTO_SHA2_256_HMAC:
964 case CRYPTO_SHA2_384_HMAC:
965 case CRYPTO_SHA2_512_HMAC:
966 case CRYPTO_RIPEMD160_HMAC:
967 case CRYPTO_NULL_HMAC:
968 case CRYPTO_MD5_KPDK:
969 case CRYPTO_SHA1_KPDK:
970 case CRYPTO_MD5:
971 case CRYPTO_SHA1:
972 if ((crp->crp_etype = swcr_authcompute(crd, sw,
973 crp->crp_buf, crp->crp_flags)) != 0)
974 goto done;
975 break;
976
977 case CRYPTO_DEFLATE_COMP:
978 if ((crp->crp_etype = swcr_compdec(crd, sw,
979 crp->crp_buf, crp->crp_flags)) != 0)
980 goto done;
981 else
982 crp->crp_olen = (int)sw->sw_size;
983 break;
984
985 default:
986 /* Unknown/unsupported algorithm */
987 crp->crp_etype = EINVAL;
988 goto done;
989 }
990 }
991
992 done:
993 crypto_done(crp);
994 return 0;
995 }
996
997 /*
998 * Initialize the driver, called from the kernel main().
999 */
1000 static void
1001 swcr_init(void)
1002 {
1003 u_int i;
1004
1005 hmac_ipad_buffer = malloc(HMAC_MAX_BLOCK_LEN, M_CRYPTO_DATA, M_WAITOK);
1006 for (i = 0; i < HMAC_MAX_BLOCK_LEN; i++)
1007 hmac_ipad_buffer[i] = HMAC_IPAD_VAL;
1008 hmac_opad_buffer = malloc(HMAC_MAX_BLOCK_LEN, M_CRYPTO_DATA, M_WAITOK);
1009 for (i = 0; i < HMAC_MAX_BLOCK_LEN; i++)
1010 hmac_opad_buffer[i] = HMAC_OPAD_VAL;
1011
1012 swcr_id = crypto_get_driverid(CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
1013 if (swcr_id < 0)
1014 panic("Software crypto device cannot initialize!");
1015 crypto_register(swcr_id, CRYPTO_DES_CBC,
1016 0, 0, swcr_newsession, swcr_freesession, swcr_process, NULL);
1017 #define REGISTER(alg) \
1018 crypto_register(swcr_id, alg, 0,0,NULL,NULL,NULL,NULL)
1019 REGISTER(CRYPTO_3DES_CBC);
1020 REGISTER(CRYPTO_BLF_CBC);
1021 REGISTER(CRYPTO_CAST_CBC);
1022 REGISTER(CRYPTO_SKIPJACK_CBC);
1023 REGISTER(CRYPTO_NULL_CBC);
1024 REGISTER(CRYPTO_MD5_HMAC);
1025 REGISTER(CRYPTO_SHA1_HMAC);
1026 REGISTER(CRYPTO_SHA2_256_HMAC);
1027 REGISTER(CRYPTO_SHA2_384_HMAC);
1028 REGISTER(CRYPTO_SHA2_512_HMAC);
1029 REGISTER(CRYPTO_RIPEMD160_HMAC);
1030 REGISTER(CRYPTO_NULL_HMAC);
1031 REGISTER(CRYPTO_MD5_KPDK);
1032 REGISTER(CRYPTO_SHA1_KPDK);
1033 REGISTER(CRYPTO_MD5);
1034 REGISTER(CRYPTO_SHA1);
1035 REGISTER(CRYPTO_RIJNDAEL128_CBC);
1036 REGISTER(CRYPTO_CAMELLIA_CBC);
1037 REGISTER(CRYPTO_DEFLATE_COMP);
1038 #undef REGISTER
1039 }
1040 SYSINIT(cryptosoft_init, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_init, NULL)
1041
1042 static void
1043 swcr_uninit(void)
1044 {
1045
1046 if (swcr_sessions != NULL)
1047 FREE(swcr_sessions, M_CRYPTO_DATA);
1048 free(hmac_ipad_buffer, M_CRYPTO_DATA);
1049 free(hmac_opad_buffer, M_CRYPTO_DATA);
1050 }
1051 SYSUNINIT(cryptosoft_uninit, SI_SUB_PSEUDO, SI_ORDER_ANY, swcr_uninit, NULL);
Cache object: 8b955f258b4e7ee9d0d49ae7a17b2ffe
|