1 /*
2 * vim:sw=4 ts=8
3 */
4 /*-
5 * SPDX-License-Identifier: BSD-4-Clause
6 *
7 * Copyright (c) 2009 David McCullough <david.mccullough@securecomputing.com>
8 *
9 * Copyright (c) 2003-2007 Cavium Networks (support@cavium.com). All rights
10 * reserved.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions are met:
14 * 1. Redistributions of source code must retain the above copyright notice,
15 * this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright notice,
17 * this list of conditions and the following disclaimer in the documentation
18 * and/or other materials provided with the distribution.
19 * 3. All advertising materials mentioning features or use of this software
20 * must display the following acknowledgement:
21 * This product includes software developed by Cavium Networks
22 * 4. Cavium Networks' name may not be used to endorse or promote products
23 * derived from this software without specific prior written permission.
24 *
25 * This Software, including technical data, may be subject to U.S. export
26 * control laws, including the U.S. Export Administration Act and its
27 * associated regulations, and may be subject to export or import regulations
28 * in other countries. You warrant that You will comply strictly in all
29 * respects with all such regulations and acknowledge that you have the
30 * responsibility to obtain licenses to export, re-export or import the
31 * Software.
32 *
33 * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS" AND
34 * WITH ALL FAULTS AND CAVIUM MAKES NO PROMISES, REPRESENTATIONS OR WARRANTIES,
35 * EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO THE
36 * SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
37 * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
38 * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
39 * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
40 * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
41 * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
42 * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
43 */
44 /****************************************************************************/
45
46 #include <sys/cdefs.h>
47 __FBSDID("$FreeBSD$");
48
49 #include <sys/param.h>
50 #include <sys/systm.h>
51 #include <sys/kernel.h>
52 #include <sys/module.h>
53 #include <sys/malloc.h>
54 #include <sys/uio.h>
55
56 #include <opencrypto/cryptodev.h>
57
58 #include <contrib/octeon-sdk/cvmx.h>
59
60 #include <mips/cavium/cryptocteon/cryptocteonvar.h>
61
62 /****************************************************************************/
63
64 #define IOV_INIT(iov, ptr, idx, len) \
65 do { \
66 (idx) = 0; \
67 (ptr) = (iov)[(idx)].iov_base; \
68 (len) = (iov)[(idx)].iov_len; \
69 } while (0)
70
71 /*
72 * XXX
73 * It would be better if this were an IOV_READ/IOV_WRITE macro instead so
74 * that we could detect overflow before it happens rather than right after,
75 * which is especially bad since there is usually no IOV_CONSUME after the
76 * final read or write.
77 */
78 #define IOV_CONSUME(iov, ptr, idx, len) \
79 do { \
80 if ((len) > sizeof *(ptr)) { \
81 (len) -= sizeof *(ptr); \
82 (ptr)++; \
83 } else { \
84 if ((len) != sizeof *(ptr)) \
85 panic("%s: went past end of iovec.", __func__); \
86 (idx)++; \
87 (ptr) = (iov)[(idx)].iov_base; \
88 (len) = (iov)[(idx)].iov_len; \
89 } \
90 } while (0)
91
92 #define ESP_HEADER_LENGTH 8
93 #define AES_CBC_IV_LENGTH 16
94 #define ESP_HMAC_LEN 12
95
96 #define ESP_HEADER_LENGTH 8
97
98 /****************************************************************************/
99
100 #define CVM_LOAD_SHA_UNIT(dat, next) { \
101 if (next == 0) { \
102 next = 1; \
103 CVMX_MT_HSH_DAT (dat, 0); \
104 } else if (next == 1) { \
105 next = 2; \
106 CVMX_MT_HSH_DAT (dat, 1); \
107 } else if (next == 2) { \
108 next = 3; \
109 CVMX_MT_HSH_DAT (dat, 2); \
110 } else if (next == 3) { \
111 next = 4; \
112 CVMX_MT_HSH_DAT (dat, 3); \
113 } else if (next == 4) { \
114 next = 5; \
115 CVMX_MT_HSH_DAT (dat, 4); \
116 } else if (next == 5) { \
117 next = 6; \
118 CVMX_MT_HSH_DAT (dat, 5); \
119 } else if (next == 6) { \
120 next = 7; \
121 CVMX_MT_HSH_DAT (dat, 6); \
122 } else { \
123 CVMX_MT_HSH_STARTSHA (dat); \
124 next = 0; \
125 } \
126 }
127
128 #define CVM_LOAD2_SHA_UNIT(dat1, dat2, next) { \
129 if (next == 0) { \
130 CVMX_MT_HSH_DAT (dat1, 0); \
131 CVMX_MT_HSH_DAT (dat2, 1); \
132 next = 2; \
133 } else if (next == 1) { \
134 CVMX_MT_HSH_DAT (dat1, 1); \
135 CVMX_MT_HSH_DAT (dat2, 2); \
136 next = 3; \
137 } else if (next == 2) { \
138 CVMX_MT_HSH_DAT (dat1, 2); \
139 CVMX_MT_HSH_DAT (dat2, 3); \
140 next = 4; \
141 } else if (next == 3) { \
142 CVMX_MT_HSH_DAT (dat1, 3); \
143 CVMX_MT_HSH_DAT (dat2, 4); \
144 next = 5; \
145 } else if (next == 4) { \
146 CVMX_MT_HSH_DAT (dat1, 4); \
147 CVMX_MT_HSH_DAT (dat2, 5); \
148 next = 6; \
149 } else if (next == 5) { \
150 CVMX_MT_HSH_DAT (dat1, 5); \
151 CVMX_MT_HSH_DAT (dat2, 6); \
152 next = 7; \
153 } else if (next == 6) { \
154 CVMX_MT_HSH_DAT (dat1, 6); \
155 CVMX_MT_HSH_STARTSHA (dat2); \
156 next = 0; \
157 } else { \
158 CVMX_MT_HSH_STARTSHA (dat1); \
159 CVMX_MT_HSH_DAT (dat2, 0); \
160 next = 1; \
161 } \
162 }
163
164 /****************************************************************************/
165
166 #define CVM_LOAD_MD5_UNIT(dat, next) { \
167 if (next == 0) { \
168 next = 1; \
169 CVMX_MT_HSH_DAT (dat, 0); \
170 } else if (next == 1) { \
171 next = 2; \
172 CVMX_MT_HSH_DAT (dat, 1); \
173 } else if (next == 2) { \
174 next = 3; \
175 CVMX_MT_HSH_DAT (dat, 2); \
176 } else if (next == 3) { \
177 next = 4; \
178 CVMX_MT_HSH_DAT (dat, 3); \
179 } else if (next == 4) { \
180 next = 5; \
181 CVMX_MT_HSH_DAT (dat, 4); \
182 } else if (next == 5) { \
183 next = 6; \
184 CVMX_MT_HSH_DAT (dat, 5); \
185 } else if (next == 6) { \
186 next = 7; \
187 CVMX_MT_HSH_DAT (dat, 6); \
188 } else { \
189 CVMX_MT_HSH_STARTMD5 (dat); \
190 next = 0; \
191 } \
192 }
193
194 #define CVM_LOAD2_MD5_UNIT(dat1, dat2, next) { \
195 if (next == 0) { \
196 CVMX_MT_HSH_DAT (dat1, 0); \
197 CVMX_MT_HSH_DAT (dat2, 1); \
198 next = 2; \
199 } else if (next == 1) { \
200 CVMX_MT_HSH_DAT (dat1, 1); \
201 CVMX_MT_HSH_DAT (dat2, 2); \
202 next = 3; \
203 } else if (next == 2) { \
204 CVMX_MT_HSH_DAT (dat1, 2); \
205 CVMX_MT_HSH_DAT (dat2, 3); \
206 next = 4; \
207 } else if (next == 3) { \
208 CVMX_MT_HSH_DAT (dat1, 3); \
209 CVMX_MT_HSH_DAT (dat2, 4); \
210 next = 5; \
211 } else if (next == 4) { \
212 CVMX_MT_HSH_DAT (dat1, 4); \
213 CVMX_MT_HSH_DAT (dat2, 5); \
214 next = 6; \
215 } else if (next == 5) { \
216 CVMX_MT_HSH_DAT (dat1, 5); \
217 CVMX_MT_HSH_DAT (dat2, 6); \
218 next = 7; \
219 } else if (next == 6) { \
220 CVMX_MT_HSH_DAT (dat1, 6); \
221 CVMX_MT_HSH_STARTMD5 (dat2); \
222 next = 0; \
223 } else { \
224 CVMX_MT_HSH_STARTMD5 (dat1); \
225 CVMX_MT_HSH_DAT (dat2, 0); \
226 next = 1; \
227 } \
228 }
229
230 /****************************************************************************/
231
232 void
233 octo_calc_hash(uint8_t auth, unsigned char *key, uint64_t *inner, uint64_t *outer)
234 {
235 uint8_t hash_key[64];
236 uint64_t *key1;
237 register uint64_t xor1 = 0x3636363636363636ULL;
238 register uint64_t xor2 = 0x5c5c5c5c5c5c5c5cULL;
239
240 dprintf("%s()\n", __func__);
241
242 memset(hash_key, 0, sizeof(hash_key));
243 memcpy(hash_key, (uint8_t *) key, (auth ? 20 : 16));
244 key1 = (uint64_t *) hash_key;
245 if (auth) {
246 CVMX_MT_HSH_IV(0x67452301EFCDAB89ULL, 0);
247 CVMX_MT_HSH_IV(0x98BADCFE10325476ULL, 1);
248 CVMX_MT_HSH_IV(0xC3D2E1F000000000ULL, 2);
249 } else {
250 CVMX_MT_HSH_IV(0x0123456789ABCDEFULL, 0);
251 CVMX_MT_HSH_IV(0xFEDCBA9876543210ULL, 1);
252 }
253
254 CVMX_MT_HSH_DAT((*key1 ^ xor1), 0);
255 key1++;
256 CVMX_MT_HSH_DAT((*key1 ^ xor1), 1);
257 key1++;
258 CVMX_MT_HSH_DAT((*key1 ^ xor1), 2);
259 key1++;
260 CVMX_MT_HSH_DAT((*key1 ^ xor1), 3);
261 key1++;
262 CVMX_MT_HSH_DAT((*key1 ^ xor1), 4);
263 key1++;
264 CVMX_MT_HSH_DAT((*key1 ^ xor1), 5);
265 key1++;
266 CVMX_MT_HSH_DAT((*key1 ^ xor1), 6);
267 key1++;
268 if (auth)
269 CVMX_MT_HSH_STARTSHA((*key1 ^ xor1));
270 else
271 CVMX_MT_HSH_STARTMD5((*key1 ^ xor1));
272
273 CVMX_MF_HSH_IV(inner[0], 0);
274 CVMX_MF_HSH_IV(inner[1], 1);
275 if (auth) {
276 inner[2] = 0;
277 CVMX_MF_HSH_IV(((uint64_t *) inner)[2], 2);
278 }
279
280 memset(hash_key, 0, sizeof(hash_key));
281 memcpy(hash_key, (uint8_t *) key, (auth ? 20 : 16));
282 key1 = (uint64_t *) hash_key;
283 if (auth) {
284 CVMX_MT_HSH_IV(0x67452301EFCDAB89ULL, 0);
285 CVMX_MT_HSH_IV(0x98BADCFE10325476ULL, 1);
286 CVMX_MT_HSH_IV(0xC3D2E1F000000000ULL, 2);
287 } else {
288 CVMX_MT_HSH_IV(0x0123456789ABCDEFULL, 0);
289 CVMX_MT_HSH_IV(0xFEDCBA9876543210ULL, 1);
290 }
291
292 CVMX_MT_HSH_DAT((*key1 ^ xor2), 0);
293 key1++;
294 CVMX_MT_HSH_DAT((*key1 ^ xor2), 1);
295 key1++;
296 CVMX_MT_HSH_DAT((*key1 ^ xor2), 2);
297 key1++;
298 CVMX_MT_HSH_DAT((*key1 ^ xor2), 3);
299 key1++;
300 CVMX_MT_HSH_DAT((*key1 ^ xor2), 4);
301 key1++;
302 CVMX_MT_HSH_DAT((*key1 ^ xor2), 5);
303 key1++;
304 CVMX_MT_HSH_DAT((*key1 ^ xor2), 6);
305 key1++;
306 if (auth)
307 CVMX_MT_HSH_STARTSHA((*key1 ^ xor2));
308 else
309 CVMX_MT_HSH_STARTMD5((*key1 ^ xor2));
310
311 CVMX_MF_HSH_IV(outer[0], 0);
312 CVMX_MF_HSH_IV(outer[1], 1);
313 if (auth) {
314 outer[2] = 0;
315 CVMX_MF_HSH_IV(outer[2], 2);
316 }
317 return;
318 }
319
320 /****************************************************************************/
321 /* AES functions */
322
323 int
324 octo_aes_cbc_encrypt(
325 struct octo_sess *od,
326 struct iovec *iov, size_t iovcnt, size_t iovlen,
327 int auth_off, int auth_len,
328 int crypt_off, int crypt_len,
329 uint8_t *icv, uint8_t *ivp)
330 {
331 uint64_t *data, *pdata;
332 int data_i, data_l;
333
334 dprintf("%s()\n", __func__);
335
336 if (__predict_false(od == NULL || iov==NULL || iovlen==0 || ivp==NULL ||
337 (crypt_off & 0x7) || (crypt_off + crypt_len > iovlen))) {
338 dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
339 "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
340 "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
341 auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
342 return -EINVAL;
343 }
344
345 IOV_INIT(iov, data, data_i, data_l);
346
347 CVMX_PREFETCH0(ivp);
348 CVMX_PREFETCH0(od->octo_enckey);
349
350 /* load AES Key */
351 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
352 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
353
354 if (od->octo_encklen == 16) {
355 CVMX_MT_AES_KEY(0x0, 2);
356 CVMX_MT_AES_KEY(0x0, 3);
357 } else if (od->octo_encklen == 24) {
358 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
359 CVMX_MT_AES_KEY(0x0, 3);
360 } else if (od->octo_encklen == 32) {
361 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
362 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
363 } else {
364 dprintf("%s: Bad key length %d\n", __func__, od->octo_encklen);
365 return -EINVAL;
366 }
367 CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
368
369 CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
370 CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
371
372 while (crypt_off > 0) {
373 IOV_CONSUME(iov, data, data_i, data_l);
374 crypt_off -= 8;
375 }
376
377 while (crypt_len > 0) {
378 pdata = data;
379 CVMX_MT_AES_ENC_CBC0(*data);
380 IOV_CONSUME(iov, data, data_i, data_l);
381 CVMX_MT_AES_ENC_CBC1(*data);
382 CVMX_MF_AES_RESULT(*pdata, 0);
383 CVMX_MF_AES_RESULT(*data, 1);
384 IOV_CONSUME(iov, data, data_i, data_l);
385 crypt_len -= 16;
386 }
387
388 return 0;
389 }
390
391 int
392 octo_aes_cbc_decrypt(
393 struct octo_sess *od,
394 struct iovec *iov, size_t iovcnt, size_t iovlen,
395 int auth_off, int auth_len,
396 int crypt_off, int crypt_len,
397 uint8_t *icv, uint8_t *ivp)
398 {
399 uint64_t *data, *pdata;
400 int data_i, data_l;
401
402 dprintf("%s()\n", __func__);
403
404 if (__predict_false(od == NULL || iov==NULL || iovlen==0 || ivp==NULL ||
405 (crypt_off & 0x7) || (crypt_off + crypt_len > iovlen))) {
406 dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
407 "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
408 "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
409 auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
410 return -EINVAL;
411 }
412
413 IOV_INIT(iov, data, data_i, data_l);
414
415 CVMX_PREFETCH0(ivp);
416 CVMX_PREFETCH0(od->octo_enckey);
417
418 /* load AES Key */
419 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
420 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
421
422 if (od->octo_encklen == 16) {
423 CVMX_MT_AES_KEY(0x0, 2);
424 CVMX_MT_AES_KEY(0x0, 3);
425 } else if (od->octo_encklen == 24) {
426 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
427 CVMX_MT_AES_KEY(0x0, 3);
428 } else if (od->octo_encklen == 32) {
429 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
430 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
431 } else {
432 dprintf("%s: Bad key length %d\n", __func__, od->octo_encklen);
433 return -EINVAL;
434 }
435 CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
436
437 CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
438 CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
439
440 while (crypt_off > 0) {
441 IOV_CONSUME(iov, data, data_i, data_l);
442 crypt_off -= 8;
443 }
444
445 while (crypt_len > 0) {
446 pdata = data;
447 CVMX_MT_AES_DEC_CBC0(*data);
448 IOV_CONSUME(iov, data, data_i, data_l);
449 CVMX_MT_AES_DEC_CBC1(*data);
450 CVMX_MF_AES_RESULT(*pdata, 0);
451 CVMX_MF_AES_RESULT(*data, 1);
452 IOV_CONSUME(iov, data, data_i, data_l);
453 crypt_len -= 16;
454 }
455
456 return 0;
457 }
458
459 /****************************************************************************/
460 /* SHA1 */
461
462 int
463 octo_null_sha1_encrypt(
464 struct octo_sess *od,
465 struct iovec *iov, size_t iovcnt, size_t iovlen,
466 int auth_off, int auth_len,
467 int crypt_off, int crypt_len,
468 uint8_t *icv, uint8_t *ivp)
469 {
470 int next = 0;
471 uint64_t *data;
472 uint64_t tmp1, tmp2, tmp3;
473 int data_i, data_l, alen = auth_len;
474
475 dprintf("%s()\n", __func__);
476
477 if (__predict_false(od == NULL || iov==NULL || iovlen==0 ||
478 (auth_off & 0x7) || (auth_off + auth_len > iovlen))) {
479 dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
480 "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
481 "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
482 auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
483 return -EINVAL;
484 }
485
486 IOV_INIT(iov, data, data_i, data_l);
487
488 /* Load SHA1 IV */
489 CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
490 CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
491 CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
492
493 while (auth_off > 0) {
494 IOV_CONSUME(iov, data, data_i, data_l);
495 auth_off -= 8;
496 }
497
498 while (auth_len > 0) {
499 CVM_LOAD_SHA_UNIT(*data, next);
500 auth_len -= 8;
501 IOV_CONSUME(iov, data, data_i, data_l);
502 }
503
504 /* finish the hash */
505 CVMX_PREFETCH0(od->octo_hmouter);
506 #if 0
507 if (__predict_false(inplen)) {
508 uint64_t tmp = 0;
509 uint8_t *p = (uint8_t *) & tmp;
510 p[inplen] = 0x80;
511 do {
512 inplen--;
513 p[inplen] = ((uint8_t *) data)[inplen];
514 } while (inplen);
515 CVM_LOAD_MD5_UNIT(tmp, next);
516 } else {
517 CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
518 }
519 #else
520 CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
521 #endif
522
523 /* Finish Inner hash */
524 while (next != 7) {
525 CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
526 }
527 CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
528
529 /* Get the inner hash of HMAC */
530 CVMX_MF_HSH_IV(tmp1, 0);
531 CVMX_MF_HSH_IV(tmp2, 1);
532 tmp3 = 0;
533 CVMX_MF_HSH_IV(tmp3, 2);
534
535 /* Initialize hash unit */
536 CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
537 CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
538 CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
539
540 CVMX_MT_HSH_DAT(tmp1, 0);
541 CVMX_MT_HSH_DAT(tmp2, 1);
542 tmp3 |= 0x0000000080000000;
543 CVMX_MT_HSH_DAT(tmp3, 2);
544 CVMX_MT_HSH_DATZ(3);
545 CVMX_MT_HSH_DATZ(4);
546 CVMX_MT_HSH_DATZ(5);
547 CVMX_MT_HSH_DATZ(6);
548 CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
549
550 /* save the HMAC */
551 data = (uint64_t *)icv;
552 CVMX_MF_HSH_IV(*data, 0);
553 data++;
554 CVMX_MF_HSH_IV(tmp1, 1);
555 *(uint32_t *)data = (uint32_t) (tmp1 >> 32);
556
557 return 0;
558 }
559
560 /****************************************************************************/
561 /* AES SHA1 */
562
563 int
564 octo_aes_cbc_sha1_encrypt(
565 struct octo_sess *od,
566 struct iovec *iov, size_t iovcnt, size_t iovlen,
567 int auth_off, int auth_len,
568 int crypt_off, int crypt_len,
569 uint8_t *icv, uint8_t *ivp)
570 {
571 int next = 0;
572 union {
573 uint32_t data32[2];
574 uint64_t data64[1];
575 } mydata[2];
576 uint64_t *pdata = &mydata[0].data64[0];
577 uint64_t *data = &mydata[1].data64[0];
578 uint32_t *data32;
579 uint64_t tmp1, tmp2, tmp3;
580 int data_i, data_l, alen = auth_len;
581
582 dprintf("%s()\n", __func__);
583
584 if (__predict_false(od == NULL || iov==NULL || iovlen==0 || ivp==NULL ||
585 (crypt_off & 0x3) || (crypt_off + crypt_len > iovlen) ||
586 (crypt_len & 0x7) ||
587 (auth_len & 0x7) ||
588 (auth_off & 0x3) || (auth_off + auth_len > iovlen))) {
589 dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
590 "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
591 "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
592 auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
593 return -EINVAL;
594 }
595
596 IOV_INIT(iov, data32, data_i, data_l);
597
598 CVMX_PREFETCH0(ivp);
599 CVMX_PREFETCH0(od->octo_enckey);
600
601 /* load AES Key */
602 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
603 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
604
605 if (od->octo_encklen == 16) {
606 CVMX_MT_AES_KEY(0x0, 2);
607 CVMX_MT_AES_KEY(0x0, 3);
608 } else if (od->octo_encklen == 24) {
609 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
610 CVMX_MT_AES_KEY(0x0, 3);
611 } else if (od->octo_encklen == 32) {
612 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
613 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
614 } else {
615 dprintf("%s: Bad key length %d\n", __func__, od->octo_encklen);
616 return -EINVAL;
617 }
618 CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
619
620 CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
621 CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
622
623 /* Load SHA IV */
624 CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
625 CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
626 CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
627
628 while (crypt_off > 0 && auth_off > 0) {
629 IOV_CONSUME(iov, data32, data_i, data_l);
630 crypt_off -= 4;
631 auth_off -= 4;
632 }
633
634 while (crypt_len > 0 || auth_len > 0) {
635 uint32_t *pdata32[3];
636
637 pdata32[0] = data32;
638 mydata[0].data32[0] = *data32;
639 IOV_CONSUME(iov, data32, data_i, data_l);
640 pdata32[1] = data32;
641 mydata[0].data32[1] = *data32;
642 IOV_CONSUME(iov, data32, data_i, data_l);
643 pdata32[2] = data32;
644 mydata[1].data32[0] = *data32;
645 IOV_CONSUME(iov, data32, data_i, data_l);
646 mydata[1].data32[1] = *data32;
647
648 if (crypt_off <= 0) {
649 if (crypt_len > 0) {
650 CVMX_MT_AES_ENC_CBC0(*pdata);
651 CVMX_MT_AES_ENC_CBC1(*data);
652 CVMX_MF_AES_RESULT(*pdata, 0);
653 CVMX_MF_AES_RESULT(*data, 1);
654 crypt_len -= 16;
655 }
656 } else
657 crypt_off -= 16;
658
659 if (auth_off <= 0) {
660 if (auth_len > 0) {
661 CVM_LOAD_SHA_UNIT(*pdata, next);
662 CVM_LOAD_SHA_UNIT(*data, next);
663 auth_len -= 16;
664 }
665 } else
666 auth_off -= 16;
667
668 *pdata32[0] = mydata[0].data32[0];
669 *pdata32[1] = mydata[0].data32[1];
670 *pdata32[2] = mydata[1].data32[0];
671 *data32 = mydata[1].data32[1];
672
673 IOV_CONSUME(iov, data32, data_i, data_l);
674 }
675
676 /* finish the hash */
677 CVMX_PREFETCH0(od->octo_hmouter);
678 #if 0
679 if (__predict_false(inplen)) {
680 uint64_t tmp = 0;
681 uint8_t *p = (uint8_t *) & tmp;
682 p[inplen] = 0x80;
683 do {
684 inplen--;
685 p[inplen] = ((uint8_t *) data)[inplen];
686 } while (inplen);
687 CVM_LOAD_SHA_UNIT(tmp, next);
688 } else {
689 CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
690 }
691 #else
692 CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
693 #endif
694
695 /* Finish Inner hash */
696 while (next != 7) {
697 CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
698 }
699 CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
700
701 /* Get the inner hash of HMAC */
702 CVMX_MF_HSH_IV(tmp1, 0);
703 CVMX_MF_HSH_IV(tmp2, 1);
704 tmp3 = 0;
705 CVMX_MF_HSH_IV(tmp3, 2);
706
707 /* Initialize hash unit */
708 CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
709 CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
710 CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
711
712 CVMX_MT_HSH_DAT(tmp1, 0);
713 CVMX_MT_HSH_DAT(tmp2, 1);
714 tmp3 |= 0x0000000080000000;
715 CVMX_MT_HSH_DAT(tmp3, 2);
716 CVMX_MT_HSH_DATZ(3);
717 CVMX_MT_HSH_DATZ(4);
718 CVMX_MT_HSH_DATZ(5);
719 CVMX_MT_HSH_DATZ(6);
720 CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
721
722 /* finish the hash */
723 CVMX_PREFETCH0(od->octo_hmouter);
724 #if 0
725 if (__predict_false(inplen)) {
726 uint64_t tmp = 0;
727 uint8_t *p = (uint8_t *) & tmp;
728 p[inplen] = 0x80;
729 do {
730 inplen--;
731 p[inplen] = ((uint8_t *) data)[inplen];
732 } while (inplen);
733 CVM_LOAD_MD5_UNIT(tmp, next);
734 } else {
735 CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
736 }
737 #else
738 CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
739 #endif
740
741 /* save the HMAC */
742 data32 = (uint32_t *)icv;
743 CVMX_MF_HSH_IV(tmp1, 0);
744 *data32 = (uint32_t) (tmp1 >> 32);
745 data32++;
746 *data32 = (uint32_t) tmp1;
747 data32++;
748 CVMX_MF_HSH_IV(tmp1, 1);
749 *data32 = (uint32_t) (tmp1 >> 32);
750
751 return 0;
752 }
753
754 int
755 octo_aes_cbc_sha1_decrypt(
756 struct octo_sess *od,
757 struct iovec *iov, size_t iovcnt, size_t iovlen,
758 int auth_off, int auth_len,
759 int crypt_off, int crypt_len,
760 uint8_t *icv, uint8_t *ivp)
761 {
762 int next = 0;
763 union {
764 uint32_t data32[2];
765 uint64_t data64[1];
766 } mydata[2];
767 uint64_t *pdata = &mydata[0].data64[0];
768 uint64_t *data = &mydata[1].data64[0];
769 uint32_t *data32;
770 uint64_t tmp1, tmp2, tmp3;
771 int data_i, data_l, alen = auth_len;
772
773 dprintf("%s()\n", __func__);
774
775 if (__predict_false(od == NULL || iov==NULL || iovlen==0 || ivp==NULL ||
776 (crypt_off & 0x3) || (crypt_off + crypt_len > iovlen) ||
777 (crypt_len & 0x7) ||
778 (auth_len & 0x7) ||
779 (auth_off & 0x3) || (auth_off + auth_len > iovlen))) {
780 dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
781 "auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
782 "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
783 auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
784 return -EINVAL;
785 }
786
787 IOV_INIT(iov, data32, data_i, data_l);
788
789 CVMX_PREFETCH0(ivp);
790 CVMX_PREFETCH0(od->octo_enckey);
791
792 /* load AES Key */
793 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[0], 0);
794 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[1], 1);
795
796 if (od->octo_encklen == 16) {
797 CVMX_MT_AES_KEY(0x0, 2);
798 CVMX_MT_AES_KEY(0x0, 3);
799 } else if (od->octo_encklen == 24) {
800 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
801 CVMX_MT_AES_KEY(0x0, 3);
802 } else if (od->octo_encklen == 32) {
803 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[2], 2);
804 CVMX_MT_AES_KEY(((uint64_t *) od->octo_enckey)[3], 3);
805 } else {
806 dprintf("%s: Bad key length %d\n", __func__, od->octo_encklen);
807 return -EINVAL;
808 }
809 CVMX_MT_AES_KEYLENGTH(od->octo_encklen / 8 - 1);
810
811 CVMX_MT_AES_IV(((uint64_t *) ivp)[0], 0);
812 CVMX_MT_AES_IV(((uint64_t *) ivp)[1], 1);
813
814 /* Load MD5 IV */
815 CVMX_MT_HSH_IV(od->octo_hminner[0], 0);
816 CVMX_MT_HSH_IV(od->octo_hminner[1], 1);
817 CVMX_MT_HSH_IV(od->octo_hminner[2], 2);
818
819 while (crypt_off > 0 && auth_off > 0) {
820 IOV_CONSUME(iov, data32, data_i, data_l);
821 crypt_off -= 4;
822 auth_off -= 4;
823 }
824
825 while (crypt_len > 0 || auth_len > 0) {
826 uint32_t *pdata32[3];
827
828 pdata32[0] = data32;
829 mydata[0].data32[0] = *data32;
830 IOV_CONSUME(iov, data32, data_i, data_l);
831 pdata32[1] = data32;
832 mydata[0].data32[1] = *data32;
833 IOV_CONSUME(iov, data32, data_i, data_l);
834 pdata32[2] = data32;
835 mydata[1].data32[0] = *data32;
836 IOV_CONSUME(iov, data32, data_i, data_l);
837 mydata[1].data32[1] = *data32;
838
839 if (auth_off <= 0) {
840 if (auth_len > 0) {
841 CVM_LOAD_SHA_UNIT(*pdata, next);
842 CVM_LOAD_SHA_UNIT(*data, next);
843 auth_len -= 16;
844 }
845 } else
846 auth_off -= 16;
847
848 if (crypt_off <= 0) {
849 if (crypt_len > 0) {
850 CVMX_MT_AES_DEC_CBC0(*pdata);
851 CVMX_MT_AES_DEC_CBC1(*data);
852 CVMX_MF_AES_RESULT(*pdata, 0);
853 CVMX_MF_AES_RESULT(*data, 1);
854 crypt_len -= 16;
855 }
856 } else
857 crypt_off -= 16;
858
859 *pdata32[0] = mydata[0].data32[0];
860 *pdata32[1] = mydata[0].data32[1];
861 *pdata32[2] = mydata[1].data32[0];
862 *data32 = mydata[1].data32[1];
863
864 IOV_CONSUME(iov, data32, data_i, data_l);
865 }
866
867 /* finish the hash */
868 CVMX_PREFETCH0(od->octo_hmouter);
869 #if 0
870 if (__predict_false(inplen)) {
871 uint64_t tmp = 0;
872 uint8_t *p = (uint8_t *) & tmp;
873 p[inplen] = 0x80;
874 do {
875 inplen--;
876 p[inplen] = ((uint8_t *) data)[inplen];
877 } while (inplen);
878 CVM_LOAD_SHA_UNIT(tmp, next);
879 } else {
880 CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
881 }
882 #else
883 CVM_LOAD_SHA_UNIT(0x8000000000000000ULL, next);
884 #endif
885
886 /* Finish Inner hash */
887 while (next != 7) {
888 CVM_LOAD_SHA_UNIT(((uint64_t) 0x0ULL), next);
889 }
890 CVM_LOAD_SHA_UNIT((uint64_t) ((alen + 64) << 3), next);
891
892 /* Get the inner hash of HMAC */
893 CVMX_MF_HSH_IV(tmp1, 0);
894 CVMX_MF_HSH_IV(tmp2, 1);
895 tmp3 = 0;
896 CVMX_MF_HSH_IV(tmp3, 2);
897
898 /* Initialize hash unit */
899 CVMX_MT_HSH_IV(od->octo_hmouter[0], 0);
900 CVMX_MT_HSH_IV(od->octo_hmouter[1], 1);
901 CVMX_MT_HSH_IV(od->octo_hmouter[2], 2);
902
903 CVMX_MT_HSH_DAT(tmp1, 0);
904 CVMX_MT_HSH_DAT(tmp2, 1);
905 tmp3 |= 0x0000000080000000;
906 CVMX_MT_HSH_DAT(tmp3, 2);
907 CVMX_MT_HSH_DATZ(3);
908 CVMX_MT_HSH_DATZ(4);
909 CVMX_MT_HSH_DATZ(5);
910 CVMX_MT_HSH_DATZ(6);
911 CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
912
913 /* finish the hash */
914 CVMX_PREFETCH0(od->octo_hmouter);
915 #if 0
916 if (__predict_false(inplen)) {
917 uint64_t tmp = 0;
918 uint8_t *p = (uint8_t *) & tmp;
919 p[inplen] = 0x80;
920 do {
921 inplen--;
922 p[inplen] = ((uint8_t *) data)[inplen];
923 } while (inplen);
924 CVM_LOAD_MD5_UNIT(tmp, next);
925 } else {
926 CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
927 }
928 #else
929 CVM_LOAD_MD5_UNIT(0x8000000000000000ULL, next);
930 #endif
931
932 /* save the HMAC */
933 data32 = (uint32_t *)icv;
934 CVMX_MF_HSH_IV(tmp1, 0);
935 *data32 = (uint32_t) (tmp1 >> 32);
936 data32++;
937 *data32 = (uint32_t) tmp1;
938 data32++;
939 CVMX_MF_HSH_IV(tmp1, 1);
940 *data32 = (uint32_t) (tmp1 >> 32);
941
942 return 0;
943 }
944
945 /****************************************************************************/
Cache object: 3bcc42ef11423e27643d2766f54ddcbf
|