FreeBSD/Linux Kernel Cross Reference
sys/dev/sec/sec.c
1 /*-
2 * Copyright (C) 2008-2009 Semihalf, Piotr Ziecik
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
17 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
18 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
19 * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
21 * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
22 * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
23 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 /*
27 * Freescale integrated Security Engine (SEC) driver. Currently SEC 2.0 and
28 * 3.0 are supported.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD: releng/9.1/sys/dev/sec/sec.c 229093 2011-12-31 14:12:12Z hselasky $");
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/endian.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/mbuf.h>
42 #include <sys/module.h>
43 #include <sys/mutex.h>
44 #include <sys/random.h>
45 #include <sys/rman.h>
46
47 #include <machine/bus.h>
48 #include <machine/resource.h>
49
50 #include <opencrypto/cryptodev.h>
51 #include "cryptodev_if.h"
52
53 #include <dev/ofw/ofw_bus_subr.h>
54 #include <dev/sec/sec.h>
55
56 static int sec_probe(device_t dev);
57 static int sec_attach(device_t dev);
58 static int sec_detach(device_t dev);
59 static int sec_suspend(device_t dev);
60 static int sec_resume(device_t dev);
61 static int sec_shutdown(device_t dev);
62 static void sec_primary_intr(void *arg);
63 static void sec_secondary_intr(void *arg);
64 static int sec_setup_intr(struct sec_softc *sc, struct resource **ires,
65 void **ihand, int *irid, driver_intr_t handler, const char *iname);
66 static void sec_release_intr(struct sec_softc *sc, struct resource *ires,
67 void *ihand, int irid, const char *iname);
68 static int sec_controller_reset(struct sec_softc *sc);
69 static int sec_channel_reset(struct sec_softc *sc, int channel, int full);
70 static int sec_init(struct sec_softc *sc);
71 static int sec_alloc_dma_mem(struct sec_softc *sc,
72 struct sec_dma_mem *dma_mem, bus_size_t size);
73 static int sec_desc_map_dma(struct sec_softc *sc,
74 struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type,
75 struct sec_desc_map_info *sdmi);
76 static void sec_free_dma_mem(struct sec_dma_mem *dma_mem);
77 static void sec_enqueue(struct sec_softc *sc);
78 static int sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc,
79 int channel);
80 static int sec_eu_channel(struct sec_softc *sc, int eu);
81 static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
82 u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype);
83 static int sec_make_pointer_direct(struct sec_softc *sc,
84 struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
85 static int sec_alloc_session(struct sec_softc *sc);
86 static int sec_newsession(device_t dev, u_int32_t *sidp,
87 struct cryptoini *cri);
88 static int sec_freesession(device_t dev, uint64_t tid);
89 static int sec_process(device_t dev, struct cryptop *crp, int hint);
90 static int sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
91 struct cryptoini **mac);
92 static int sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
93 struct cryptodesc **mac);
94 static int sec_build_common_ns_desc(struct sec_softc *sc,
95 struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
96 struct cryptodesc *enc, int buftype);
97 static int sec_build_common_s_desc(struct sec_softc *sc,
98 struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
99 struct cryptodesc *enc, struct cryptodesc *mac, int buftype);
100
101 static struct sec_session *sec_get_session(struct sec_softc *sc, u_int sid);
102 static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
103
104 /* AESU */
105 static int sec_aesu_newsession(struct sec_softc *sc,
106 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
107 static int sec_aesu_make_desc(struct sec_softc *sc,
108 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
109 int buftype);
110
111 /* DEU */
112 static int sec_deu_newsession(struct sec_softc *sc,
113 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
114 static int sec_deu_make_desc(struct sec_softc *sc,
115 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
116 int buftype);
117
118 /* MDEU */
119 static int sec_mdeu_can_handle(u_int alg);
120 static int sec_mdeu_config(struct cryptodesc *crd,
121 u_int *eu, u_int *mode, u_int *hashlen);
122 static int sec_mdeu_newsession(struct sec_softc *sc,
123 struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
124 static int sec_mdeu_make_desc(struct sec_softc *sc,
125 struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
126 int buftype);
127
128 static device_method_t sec_methods[] = {
129 /* Device interface */
130 DEVMETHOD(device_probe, sec_probe),
131 DEVMETHOD(device_attach, sec_attach),
132 DEVMETHOD(device_detach, sec_detach),
133
134 DEVMETHOD(device_suspend, sec_suspend),
135 DEVMETHOD(device_resume, sec_resume),
136 DEVMETHOD(device_shutdown, sec_shutdown),
137
138 /* Crypto methods */
139 DEVMETHOD(cryptodev_newsession, sec_newsession),
140 DEVMETHOD(cryptodev_freesession,sec_freesession),
141 DEVMETHOD(cryptodev_process, sec_process),
142
143 DEVMETHOD_END
144 };
145 static driver_t sec_driver = {
146 "sec",
147 sec_methods,
148 sizeof(struct sec_softc),
149 };
150
151 static devclass_t sec_devclass;
152 DRIVER_MODULE(sec, simplebus, sec_driver, sec_devclass, 0, 0);
153 MODULE_DEPEND(sec, crypto, 1, 1, 1);
154
155 static struct sec_eu_methods sec_eus[] = {
156 {
157 sec_aesu_newsession,
158 sec_aesu_make_desc,
159 },
160 {
161 sec_deu_newsession,
162 sec_deu_make_desc,
163 },
164 {
165 sec_mdeu_newsession,
166 sec_mdeu_make_desc,
167 },
168 { NULL, NULL }
169 };
170
171 static inline void
172 sec_sync_dma_mem(struct sec_dma_mem *dma_mem, bus_dmasync_op_t op)
173 {
174
175 /* Sync only if dma memory is valid */
176 if (dma_mem->dma_vaddr != NULL)
177 bus_dmamap_sync(dma_mem->dma_tag, dma_mem->dma_map, op);
178 }
179
180 static inline void
181 sec_free_session(struct sec_softc *sc, struct sec_session *ses)
182 {
183
184 SEC_LOCK(sc, sessions);
185 ses->ss_used = 0;
186 SEC_UNLOCK(sc, sessions);
187 }
188
189 static inline void *
190 sec_get_pointer_data(struct sec_desc *desc, u_int n)
191 {
192
193 return (desc->sd_ptr_dmem[n].dma_vaddr);
194 }
195
196 static int
197 sec_probe(device_t dev)
198 {
199 struct sec_softc *sc;
200 uint64_t id;
201
202 if (!ofw_bus_is_compatible(dev, "fsl,sec2.0"))
203 return (ENXIO);
204
205 sc = device_get_softc(dev);
206
207 sc->sc_rrid = 0;
208 sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
209 RF_ACTIVE);
210
211 if (sc->sc_rres == NULL)
212 return (ENXIO);
213
214 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
215 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
216
217 id = SEC_READ(sc, SEC_ID);
218
219 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
220
221 switch (id) {
222 case SEC_20_ID:
223 device_set_desc(dev, "Freescale Security Engine 2.0");
224 sc->sc_version = 2;
225 break;
226 case SEC_30_ID:
227 device_set_desc(dev, "Freescale Security Engine 3.0");
228 sc->sc_version = 3;
229 break;
230 default:
231 device_printf(dev, "unknown SEC ID 0x%016llx!\n", id);
232 return (ENXIO);
233 }
234
235 return (0);
236 }
237
238 static int
239 sec_attach(device_t dev)
240 {
241 struct sec_softc *sc;
242 struct sec_hw_lt *lt;
243 int error = 0;
244 int i;
245
246 sc = device_get_softc(dev);
247 sc->sc_dev = dev;
248 sc->sc_blocked = 0;
249 sc->sc_shutdown = 0;
250
251 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
252 if (sc->sc_cid < 0) {
253 device_printf(dev, "could not get crypto driver ID!\n");
254 return (ENXIO);
255 }
256
257 /* Init locks */
258 mtx_init(&sc->sc_controller_lock, device_get_nameunit(dev),
259 "SEC Controller lock", MTX_DEF);
260 mtx_init(&sc->sc_descriptors_lock, device_get_nameunit(dev),
261 "SEC Descriptors lock", MTX_DEF);
262 mtx_init(&sc->sc_sessions_lock, device_get_nameunit(dev),
263 "SEC Sessions lock", MTX_DEF);
264
265 /* Allocate I/O memory for SEC registers */
266 sc->sc_rrid = 0;
267 sc->sc_rres = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_rrid,
268 RF_ACTIVE);
269
270 if (sc->sc_rres == NULL) {
271 device_printf(dev, "could not allocate I/O memory!\n");
272 goto fail1;
273 }
274
275 sc->sc_bas.bsh = rman_get_bushandle(sc->sc_rres);
276 sc->sc_bas.bst = rman_get_bustag(sc->sc_rres);
277
278 /* Setup interrupts */
279 sc->sc_pri_irid = 0;
280 error = sec_setup_intr(sc, &sc->sc_pri_ires, &sc->sc_pri_ihand,
281 &sc->sc_pri_irid, sec_primary_intr, "primary");
282
283 if (error)
284 goto fail2;
285
286
287 if (sc->sc_version == 3) {
288 sc->sc_sec_irid = 1;
289 error = sec_setup_intr(sc, &sc->sc_sec_ires, &sc->sc_sec_ihand,
290 &sc->sc_sec_irid, sec_secondary_intr, "secondary");
291
292 if (error)
293 goto fail3;
294 }
295
296 /* Alloc DMA memory for descriptors and link tables */
297 error = sec_alloc_dma_mem(sc, &(sc->sc_desc_dmem),
298 SEC_DESCRIPTORS * sizeof(struct sec_hw_desc));
299
300 if (error)
301 goto fail4;
302
303 error = sec_alloc_dma_mem(sc, &(sc->sc_lt_dmem),
304 (SEC_LT_ENTRIES + 1) * sizeof(struct sec_hw_lt));
305
306 if (error)
307 goto fail5;
308
309 /* Fill in descriptors and link tables */
310 for (i = 0; i < SEC_DESCRIPTORS; i++) {
311 sc->sc_desc[i].sd_desc =
312 (struct sec_hw_desc*)(sc->sc_desc_dmem.dma_vaddr) + i;
313 sc->sc_desc[i].sd_desc_paddr = sc->sc_desc_dmem.dma_paddr +
314 (i * sizeof(struct sec_hw_desc));
315 }
316
317 for (i = 0; i < SEC_LT_ENTRIES + 1; i++) {
318 sc->sc_lt[i].sl_lt =
319 (struct sec_hw_lt*)(sc->sc_lt_dmem.dma_vaddr) + i;
320 sc->sc_lt[i].sl_lt_paddr = sc->sc_lt_dmem.dma_paddr +
321 (i * sizeof(struct sec_hw_lt));
322 }
323
324 /* Last entry in link table is used to create a circle */
325 lt = sc->sc_lt[SEC_LT_ENTRIES].sl_lt;
326 lt->shl_length = 0;
327 lt->shl_r = 0;
328 lt->shl_n = 1;
329 lt->shl_ptr = sc->sc_lt[0].sl_lt_paddr;
330
331 /* Init descriptor and link table queues pointers */
332 SEC_CNT_INIT(sc, sc_free_desc_get_cnt, SEC_DESCRIPTORS);
333 SEC_CNT_INIT(sc, sc_free_desc_put_cnt, SEC_DESCRIPTORS);
334 SEC_CNT_INIT(sc, sc_ready_desc_get_cnt, SEC_DESCRIPTORS);
335 SEC_CNT_INIT(sc, sc_ready_desc_put_cnt, SEC_DESCRIPTORS);
336 SEC_CNT_INIT(sc, sc_queued_desc_get_cnt, SEC_DESCRIPTORS);
337 SEC_CNT_INIT(sc, sc_queued_desc_put_cnt, SEC_DESCRIPTORS);
338 SEC_CNT_INIT(sc, sc_lt_alloc_cnt, SEC_LT_ENTRIES);
339 SEC_CNT_INIT(sc, sc_lt_free_cnt, SEC_LT_ENTRIES);
340
341 /* Create masks for fast checks */
342 sc->sc_int_error_mask = 0;
343 for (i = 0; i < SEC_CHANNELS; i++)
344 sc->sc_int_error_mask |= (~0ULL & SEC_INT_CH_ERR(i));
345
346 switch (sc->sc_version) {
347 case 2:
348 sc->sc_channel_idle_mask =
349 (SEC_CHAN_CSR2_FFLVL_M << SEC_CHAN_CSR2_FFLVL_S) |
350 (SEC_CHAN_CSR2_MSTATE_M << SEC_CHAN_CSR2_MSTATE_S) |
351 (SEC_CHAN_CSR2_PSTATE_M << SEC_CHAN_CSR2_PSTATE_S) |
352 (SEC_CHAN_CSR2_GSTATE_M << SEC_CHAN_CSR2_GSTATE_S);
353 break;
354 case 3:
355 sc->sc_channel_idle_mask =
356 (SEC_CHAN_CSR3_FFLVL_M << SEC_CHAN_CSR3_FFLVL_S) |
357 (SEC_CHAN_CSR3_MSTATE_M << SEC_CHAN_CSR3_MSTATE_S) |
358 (SEC_CHAN_CSR3_PSTATE_M << SEC_CHAN_CSR3_PSTATE_S) |
359 (SEC_CHAN_CSR3_GSTATE_M << SEC_CHAN_CSR3_GSTATE_S);
360 break;
361 }
362
363 /* Init hardware */
364 error = sec_init(sc);
365
366 if (error)
367 goto fail6;
368
369 /* Register in OCF (AESU) */
370 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
371
372 /* Register in OCF (DEU) */
373 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
374 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
375
376 /* Register in OCF (MDEU) */
377 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
378 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
379 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
380 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
381 crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
382 if (sc->sc_version >= 3) {
383 crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0);
384 crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0);
385 }
386
387 return (0);
388
389 fail6:
390 sec_free_dma_mem(&(sc->sc_lt_dmem));
391 fail5:
392 sec_free_dma_mem(&(sc->sc_desc_dmem));
393 fail4:
394 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
395 sc->sc_sec_irid, "secondary");
396 fail3:
397 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
398 sc->sc_pri_irid, "primary");
399 fail2:
400 bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid, sc->sc_rres);
401 fail1:
402 mtx_destroy(&sc->sc_controller_lock);
403 mtx_destroy(&sc->sc_descriptors_lock);
404 mtx_destroy(&sc->sc_sessions_lock);
405
406 return (ENXIO);
407 }
408
409 static int
410 sec_detach(device_t dev)
411 {
412 struct sec_softc *sc = device_get_softc(dev);
413 int i, error, timeout = SEC_TIMEOUT;
414
415 /* Prepare driver to shutdown */
416 SEC_LOCK(sc, descriptors);
417 sc->sc_shutdown = 1;
418 SEC_UNLOCK(sc, descriptors);
419
420 /* Wait until all queued processing finishes */
421 while (1) {
422 SEC_LOCK(sc, descriptors);
423 i = SEC_READY_DESC_CNT(sc) + SEC_QUEUED_DESC_CNT(sc);
424 SEC_UNLOCK(sc, descriptors);
425
426 if (i == 0)
427 break;
428
429 if (timeout < 0) {
430 device_printf(dev, "queue flush timeout!\n");
431
432 /* DMA can be still active - stop it */
433 for (i = 0; i < SEC_CHANNELS; i++)
434 sec_channel_reset(sc, i, 1);
435
436 break;
437 }
438
439 timeout -= 1000;
440 DELAY(1000);
441 }
442
443 /* Disable interrupts */
444 SEC_WRITE(sc, SEC_IER, 0);
445
446 /* Unregister from OCF */
447 crypto_unregister_all(sc->sc_cid);
448
449 /* Free DMA memory */
450 for (i = 0; i < SEC_DESCRIPTORS; i++)
451 SEC_DESC_FREE_POINTERS(&(sc->sc_desc[i]));
452
453 sec_free_dma_mem(&(sc->sc_lt_dmem));
454 sec_free_dma_mem(&(sc->sc_desc_dmem));
455
456 /* Release interrupts */
457 sec_release_intr(sc, sc->sc_pri_ires, sc->sc_pri_ihand,
458 sc->sc_pri_irid, "primary");
459 sec_release_intr(sc, sc->sc_sec_ires, sc->sc_sec_ihand,
460 sc->sc_sec_irid, "secondary");
461
462 /* Release memory */
463 if (sc->sc_rres) {
464 error = bus_release_resource(dev, SYS_RES_MEMORY, sc->sc_rrid,
465 sc->sc_rres);
466 if (error)
467 device_printf(dev, "bus_release_resource() failed for"
468 " I/O memory, error %d\n", error);
469
470 sc->sc_rres = NULL;
471 }
472
473 mtx_destroy(&sc->sc_controller_lock);
474 mtx_destroy(&sc->sc_descriptors_lock);
475 mtx_destroy(&sc->sc_sessions_lock);
476
477 return (0);
478 }
479
480 static int
481 sec_suspend(device_t dev)
482 {
483
484 return (0);
485 }
486
487 static int
488 sec_resume(device_t dev)
489 {
490
491 return (0);
492 }
493
494 static int
495 sec_shutdown(device_t dev)
496 {
497
498 return (0);
499 }
500
501 static int
502 sec_setup_intr(struct sec_softc *sc, struct resource **ires, void **ihand,
503 int *irid, driver_intr_t handler, const char *iname)
504 {
505 int error;
506
507 (*ires) = bus_alloc_resource_any(sc->sc_dev, SYS_RES_IRQ, irid,
508 RF_ACTIVE);
509
510 if ((*ires) == NULL) {
511 device_printf(sc->sc_dev, "could not allocate %s IRQ\n", iname);
512 return (ENXIO);
513 }
514
515 error = bus_setup_intr(sc->sc_dev, *ires, INTR_MPSAFE | INTR_TYPE_NET,
516 NULL, handler, sc, ihand);
517
518 if (error) {
519 device_printf(sc->sc_dev, "failed to set up %s IRQ\n", iname);
520 if (bus_release_resource(sc->sc_dev, SYS_RES_IRQ, *irid, *ires))
521 device_printf(sc->sc_dev, "could not release %s IRQ\n",
522 iname);
523
524 (*ires) = NULL;
525 return (error);
526 }
527
528 return (0);
529 }
530
531 static void
532 sec_release_intr(struct sec_softc *sc, struct resource *ires, void *ihand,
533 int irid, const char *iname)
534 {
535 int error;
536
537 if (ires == NULL)
538 return;
539
540 error = bus_teardown_intr(sc->sc_dev, ires, ihand);
541 if (error)
542 device_printf(sc->sc_dev, "bus_teardown_intr() failed for %s"
543 " IRQ, error %d\n", iname, error);
544
545 error = bus_release_resource(sc->sc_dev, SYS_RES_IRQ, irid, ires);
546 if (error)
547 device_printf(sc->sc_dev, "bus_release_resource() failed for %s"
548 " IRQ, error %d\n", iname, error);
549 }
550
551 static void
552 sec_primary_intr(void *arg)
553 {
554 struct sec_softc *sc = arg;
555 struct sec_desc *desc;
556 uint64_t isr;
557 int i, wakeup = 0;
558
559 SEC_LOCK(sc, controller);
560
561 /* Check for errors */
562 isr = SEC_READ(sc, SEC_ISR);
563 if (isr & sc->sc_int_error_mask) {
564 /* Check each channel for error */
565 for (i = 0; i < SEC_CHANNELS; i++) {
566 if ((isr & SEC_INT_CH_ERR(i)) == 0)
567 continue;
568
569 device_printf(sc->sc_dev,
570 "I/O error on channel %i!\n", i);
571
572 /* Find and mark problematic descriptor */
573 desc = sec_find_desc(sc, SEC_READ(sc,
574 SEC_CHAN_CDPR(i)));
575
576 if (desc != NULL)
577 desc->sd_error = EIO;
578
579 /* Do partial channel reset */
580 sec_channel_reset(sc, i, 0);
581 }
582 }
583
584 /* ACK interrupt */
585 SEC_WRITE(sc, SEC_ICR, 0xFFFFFFFFFFFFFFFFULL);
586
587 SEC_UNLOCK(sc, controller);
588 SEC_LOCK(sc, descriptors);
589
590 /* Handle processed descriptors */
591 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
592
593 while (SEC_QUEUED_DESC_CNT(sc) > 0) {
594 desc = SEC_GET_QUEUED_DESC(sc);
595
596 if (desc->sd_desc->shd_done != 0xFF && desc->sd_error == 0) {
597 SEC_PUT_BACK_QUEUED_DESC(sc);
598 break;
599 }
600
601 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
602 BUS_DMASYNC_PREWRITE);
603
604 desc->sd_crp->crp_etype = desc->sd_error;
605 crypto_done(desc->sd_crp);
606
607 SEC_DESC_FREE_POINTERS(desc);
608 SEC_DESC_FREE_LT(sc, desc);
609 SEC_DESC_QUEUED2FREE(sc);
610 }
611
612 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
613
614 if (!sc->sc_shutdown) {
615 wakeup = sc->sc_blocked;
616 sc->sc_blocked = 0;
617 }
618
619 SEC_UNLOCK(sc, descriptors);
620
621 /* Enqueue ready descriptors in hardware */
622 sec_enqueue(sc);
623
624 if (wakeup)
625 crypto_unblock(sc->sc_cid, wakeup);
626 }
627
628 static void
629 sec_secondary_intr(void *arg)
630 {
631 struct sec_softc *sc = arg;
632
633 device_printf(sc->sc_dev, "spurious secondary interrupt!\n");
634 sec_primary_intr(arg);
635 }
636
637 static int
638 sec_controller_reset(struct sec_softc *sc)
639 {
640 int timeout = SEC_TIMEOUT;
641
642 /* Reset Controller */
643 SEC_WRITE(sc, SEC_MCR, SEC_MCR_SWR);
644
645 while (SEC_READ(sc, SEC_MCR) & SEC_MCR_SWR) {
646 DELAY(1000);
647 timeout -= 1000;
648
649 if (timeout < 0) {
650 device_printf(sc->sc_dev, "timeout while waiting for "
651 "device reset!\n");
652 return (ETIMEDOUT);
653 }
654 }
655
656 return (0);
657 }
658
659 static int
660 sec_channel_reset(struct sec_softc *sc, int channel, int full)
661 {
662 int timeout = SEC_TIMEOUT;
663 uint64_t bit = (full) ? SEC_CHAN_CCR_R : SEC_CHAN_CCR_CON;
664 uint64_t reg;
665
666 /* Reset Channel */
667 reg = SEC_READ(sc, SEC_CHAN_CCR(channel));
668 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg | bit);
669
670 while (SEC_READ(sc, SEC_CHAN_CCR(channel)) & bit) {
671 DELAY(1000);
672 timeout -= 1000;
673
674 if (timeout < 0) {
675 device_printf(sc->sc_dev, "timeout while waiting for "
676 "channel reset!\n");
677 return (ETIMEDOUT);
678 }
679 }
680
681 if (full) {
682 reg = SEC_CHAN_CCR_CDIE | SEC_CHAN_CCR_NT | SEC_CHAN_CCR_BS;
683
684 switch(sc->sc_version) {
685 case 2:
686 reg |= SEC_CHAN_CCR_CDWE;
687 break;
688 case 3:
689 reg |= SEC_CHAN_CCR_AWSE | SEC_CHAN_CCR_WGN;
690 break;
691 }
692
693 SEC_WRITE(sc, SEC_CHAN_CCR(channel), reg);
694 }
695
696 return (0);
697 }
698
699 static int
700 sec_init(struct sec_softc *sc)
701 {
702 uint64_t reg;
703 int error, i;
704
705 /* Reset controller twice to clear all pending interrupts */
706 error = sec_controller_reset(sc);
707 if (error)
708 return (error);
709
710 error = sec_controller_reset(sc);
711 if (error)
712 return (error);
713
714 /* Reset channels */
715 for (i = 0; i < SEC_CHANNELS; i++) {
716 error = sec_channel_reset(sc, i, 1);
717 if (error)
718 return (error);
719 }
720
721 /* Enable Interrupts */
722 reg = SEC_INT_ITO;
723 for (i = 0; i < SEC_CHANNELS; i++)
724 reg |= SEC_INT_CH_DN(i) | SEC_INT_CH_ERR(i);
725
726 SEC_WRITE(sc, SEC_IER, reg);
727
728 return (error);
729 }
730
731 static void
732 sec_alloc_dma_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
733 {
734 struct sec_dma_mem *dma_mem = arg;
735
736 if (error)
737 return;
738
739 KASSERT(nseg == 1, ("Wrong number of segments, should be 1"));
740 dma_mem->dma_paddr = segs->ds_addr;
741 }
742
743 static void
744 sec_dma_map_desc_cb(void *arg, bus_dma_segment_t *segs, int nseg,
745 int error)
746 {
747 struct sec_desc_map_info *sdmi = arg;
748 struct sec_softc *sc = sdmi->sdmi_sc;
749 struct sec_lt *lt = NULL;
750 bus_addr_t addr;
751 bus_size_t size;
752 int i;
753
754 SEC_LOCK_ASSERT(sc, descriptors);
755
756 if (error)
757 return;
758
759 for (i = 0; i < nseg; i++) {
760 addr = segs[i].ds_addr;
761 size = segs[i].ds_len;
762
763 /* Skip requested offset */
764 if (sdmi->sdmi_offset >= size) {
765 sdmi->sdmi_offset -= size;
766 continue;
767 }
768
769 addr += sdmi->sdmi_offset;
770 size -= sdmi->sdmi_offset;
771 sdmi->sdmi_offset = 0;
772
773 /* Do not link more than requested */
774 if (sdmi->sdmi_size < size)
775 size = sdmi->sdmi_size;
776
777 lt = SEC_ALLOC_LT_ENTRY(sc);
778 lt->sl_lt->shl_length = size;
779 lt->sl_lt->shl_r = 0;
780 lt->sl_lt->shl_n = 0;
781 lt->sl_lt->shl_ptr = addr;
782
783 if (sdmi->sdmi_lt_first == NULL)
784 sdmi->sdmi_lt_first = lt;
785
786 sdmi->sdmi_lt_used += 1;
787
788 if ((sdmi->sdmi_size -= size) == 0)
789 break;
790 }
791
792 sdmi->sdmi_lt_last = lt;
793 }
794
795 static void
796 sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
797 bus_size_t size, int error)
798 {
799
800 sec_dma_map_desc_cb(arg, segs, nseg, error);
801 }
802
803 static int
804 sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
805 bus_size_t size)
806 {
807 int error;
808
809 if (dma_mem->dma_vaddr != NULL)
810 return (EBUSY);
811
812 error = bus_dma_tag_create(NULL, /* parent */
813 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */
814 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
815 BUS_SPACE_MAXADDR, /* highaddr */
816 NULL, NULL, /* filtfunc, filtfuncarg */
817 size, 1, /* maxsize, nsegments */
818 size, 0, /* maxsegsz, flags */
819 NULL, NULL, /* lockfunc, lockfuncarg */
820 &(dma_mem->dma_tag)); /* dmat */
821
822 if (error) {
823 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
824 " %i!\n", error);
825 goto err1;
826 }
827
828 error = bus_dmamem_alloc(dma_mem->dma_tag, &(dma_mem->dma_vaddr),
829 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &(dma_mem->dma_map));
830
831 if (error) {
832 device_printf(sc->sc_dev, "failed to allocate DMA safe"
833 " memory, error %i!\n", error);
834 goto err2;
835 }
836
837 error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
838 dma_mem->dma_vaddr, size, sec_alloc_dma_mem_cb, dma_mem,
839 BUS_DMA_NOWAIT);
840
841 if (error) {
842 device_printf(sc->sc_dev, "cannot get address of the DMA"
843 " memory, error %i\n", error);
844 goto err3;
845 }
846
847 dma_mem->dma_is_map = 0;
848 return (0);
849
850 err3:
851 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr, dma_mem->dma_map);
852 err2:
853 bus_dma_tag_destroy(dma_mem->dma_tag);
854 err1:
855 dma_mem->dma_vaddr = NULL;
856 return(error);
857 }
858
859 static int
860 sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem,
861 bus_size_t size, int type, struct sec_desc_map_info *sdmi)
862 {
863 int error;
864
865 if (dma_mem->dma_vaddr != NULL)
866 return (EBUSY);
867
868 switch (type) {
869 case SEC_MEMORY:
870 break;
871 case SEC_UIO:
872 size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
873 break;
874 case SEC_MBUF:
875 size = m_length((struct mbuf*)mem, NULL);
876 break;
877 default:
878 return (EINVAL);
879 }
880
881 error = bus_dma_tag_create(NULL, /* parent */
882 SEC_DMA_ALIGNMENT, 0, /* alignment, boundary */
883 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
884 BUS_SPACE_MAXADDR, /* highaddr */
885 NULL, NULL, /* filtfunc, filtfuncarg */
886 size, /* maxsize */
887 SEC_FREE_LT_CNT(sc), /* nsegments */
888 SEC_MAX_DMA_BLOCK_SIZE, 0, /* maxsegsz, flags */
889 NULL, NULL, /* lockfunc, lockfuncarg */
890 &(dma_mem->dma_tag)); /* dmat */
891
892 if (error) {
893 device_printf(sc->sc_dev, "failed to allocate busdma tag, error"
894 " %i!\n", error);
895 dma_mem->dma_vaddr = NULL;
896 return (error);
897 }
898
899 error = bus_dmamap_create(dma_mem->dma_tag, 0, &(dma_mem->dma_map));
900
901 if (error) {
902 device_printf(sc->sc_dev, "failed to create DMA map, error %i!"
903 "\n", error);
904 bus_dma_tag_destroy(dma_mem->dma_tag);
905 return (error);
906 }
907
908 switch (type) {
909 case SEC_MEMORY:
910 error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
911 mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
912 break;
913 case SEC_UIO:
914 error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map,
915 mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
916 break;
917 case SEC_MBUF:
918 error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map,
919 mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
920 break;
921 }
922
923 if (error) {
924 device_printf(sc->sc_dev, "cannot get address of the DMA"
925 " memory, error %i!\n", error);
926 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
927 bus_dma_tag_destroy(dma_mem->dma_tag);
928 return (error);
929 }
930
931 dma_mem->dma_is_map = 1;
932 dma_mem->dma_vaddr = mem;
933
934 return (0);
935 }
936
937 static void
938 sec_free_dma_mem(struct sec_dma_mem *dma_mem)
939 {
940
941 /* Check for double free */
942 if (dma_mem->dma_vaddr == NULL)
943 return;
944
945 bus_dmamap_unload(dma_mem->dma_tag, dma_mem->dma_map);
946
947 if (dma_mem->dma_is_map)
948 bus_dmamap_destroy(dma_mem->dma_tag, dma_mem->dma_map);
949 else
950 bus_dmamem_free(dma_mem->dma_tag, dma_mem->dma_vaddr,
951 dma_mem->dma_map);
952
953 bus_dma_tag_destroy(dma_mem->dma_tag);
954 dma_mem->dma_vaddr = NULL;
955 }
956
957 static int
958 sec_eu_channel(struct sec_softc *sc, int eu)
959 {
960 uint64_t reg;
961 int channel = 0;
962
963 SEC_LOCK_ASSERT(sc, controller);
964
965 reg = SEC_READ(sc, SEC_EUASR);
966
967 switch (eu) {
968 case SEC_EU_AFEU:
969 channel = SEC_EUASR_AFEU(reg);
970 break;
971 case SEC_EU_DEU:
972 channel = SEC_EUASR_DEU(reg);
973 break;
974 case SEC_EU_MDEU_A:
975 case SEC_EU_MDEU_B:
976 channel = SEC_EUASR_MDEU(reg);
977 break;
978 case SEC_EU_RNGU:
979 channel = SEC_EUASR_RNGU(reg);
980 break;
981 case SEC_EU_PKEU:
982 channel = SEC_EUASR_PKEU(reg);
983 break;
984 case SEC_EU_AESU:
985 channel = SEC_EUASR_AESU(reg);
986 break;
987 case SEC_EU_KEU:
988 channel = SEC_EUASR_KEU(reg);
989 break;
990 case SEC_EU_CRCU:
991 channel = SEC_EUASR_CRCU(reg);
992 break;
993 }
994
995 return (channel - 1);
996 }
997
998 static int
999 sec_enqueue_desc(struct sec_softc *sc, struct sec_desc *desc, int channel)
1000 {
1001 u_int fflvl = SEC_MAX_FIFO_LEVEL;
1002 uint64_t reg;
1003 int i;
1004
1005 SEC_LOCK_ASSERT(sc, controller);
1006
1007 /* Find free channel if have not got one */
1008 if (channel < 0) {
1009 for (i = 0; i < SEC_CHANNELS; i++) {
1010 reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1011
1012 if ((reg & sc->sc_channel_idle_mask) == 0) {
1013 channel = i;
1014 break;
1015 }
1016 }
1017 }
1018
1019 /* There is no free channel */
1020 if (channel < 0)
1021 return (-1);
1022
1023 /* Check FIFO level on selected channel */
1024 reg = SEC_READ(sc, SEC_CHAN_CSR(channel));
1025
1026 switch(sc->sc_version) {
1027 case 2:
1028 fflvl = (reg >> SEC_CHAN_CSR2_FFLVL_S) & SEC_CHAN_CSR2_FFLVL_M;
1029 break;
1030 case 3:
1031 fflvl = (reg >> SEC_CHAN_CSR3_FFLVL_S) & SEC_CHAN_CSR3_FFLVL_M;
1032 break;
1033 }
1034
1035 if (fflvl >= SEC_MAX_FIFO_LEVEL)
1036 return (-1);
1037
1038 /* Enqueue descriptor in channel */
1039 SEC_WRITE(sc, SEC_CHAN_FF(channel), desc->sd_desc_paddr);
1040
1041 return (channel);
1042 }
1043
1044 static void
1045 sec_enqueue(struct sec_softc *sc)
1046 {
1047 struct sec_desc *desc;
1048 int ch0, ch1;
1049
1050 SEC_LOCK(sc, descriptors);
1051 SEC_LOCK(sc, controller);
1052
1053 while (SEC_READY_DESC_CNT(sc) > 0) {
1054 desc = SEC_GET_READY_DESC(sc);
1055
1056 ch0 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel0);
1057 ch1 = sec_eu_channel(sc, desc->sd_desc->shd_eu_sel1);
1058
1059 /*
1060 * Both EU are used by the same channel.
1061 * Enqueue descriptor in channel used by busy EUs.
1062 */
1063 if (ch0 >= 0 && ch0 == ch1) {
1064 if (sec_enqueue_desc(sc, desc, ch0) >= 0) {
1065 SEC_DESC_READY2QUEUED(sc);
1066 continue;
1067 }
1068 }
1069
1070 /*
1071 * Only one EU is free.
1072 * Enqueue descriptor in channel used by busy EU.
1073 */
1074 if ((ch0 >= 0 && ch1 < 0) || (ch1 >= 0 && ch0 < 0)) {
1075 if (sec_enqueue_desc(sc, desc, (ch0 >= 0) ? ch0 : ch1)
1076 >= 0) {
1077 SEC_DESC_READY2QUEUED(sc);
1078 continue;
1079 }
1080 }
1081
1082 /*
1083 * Both EU are free.
1084 * Enqueue descriptor in first free channel.
1085 */
1086 if (ch0 < 0 && ch1 < 0) {
1087 if (sec_enqueue_desc(sc, desc, -1) >= 0) {
1088 SEC_DESC_READY2QUEUED(sc);
1089 continue;
1090 }
1091 }
1092
1093 /* Current descriptor can not be queued at the moment */
1094 SEC_PUT_BACK_READY_DESC(sc);
1095 break;
1096 }
1097
1098 SEC_UNLOCK(sc, controller);
1099 SEC_UNLOCK(sc, descriptors);
1100 }
1101
1102 static struct sec_desc *
1103 sec_find_desc(struct sec_softc *sc, bus_addr_t paddr)
1104 {
1105 struct sec_desc *desc = NULL;
1106 int i;
1107
1108 SEC_LOCK_ASSERT(sc, descriptors);
1109
1110 for (i = 0; i < SEC_CHANNELS; i++) {
1111 if (sc->sc_desc[i].sd_desc_paddr == paddr) {
1112 desc = &(sc->sc_desc[i]);
1113 break;
1114 }
1115 }
1116
1117 return (desc);
1118 }
1119
1120 static int
1121 sec_make_pointer_direct(struct sec_softc *sc, struct sec_desc *desc, u_int n,
1122 bus_addr_t data, bus_size_t dsize)
1123 {
1124 struct sec_hw_desc_ptr *ptr;
1125
1126 SEC_LOCK_ASSERT(sc, descriptors);
1127
1128 ptr = &(desc->sd_desc->shd_pointer[n]);
1129 ptr->shdp_length = dsize;
1130 ptr->shdp_extent = 0;
1131 ptr->shdp_j = 0;
1132 ptr->shdp_ptr = data;
1133
1134 return (0);
1135 }
1136
1137 static int
1138 sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
1139 u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype)
1140 {
1141 struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
1142 struct sec_hw_desc_ptr *ptr;
1143 int error;
1144
1145 SEC_LOCK_ASSERT(sc, descriptors);
1146
1147 /* For flat memory map only requested region */
1148 if (dtype == SEC_MEMORY) {
1149 data = (uint8_t*)(data) + doffset;
1150 sdmi.sdmi_offset = 0;
1151 }
1152
1153 error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize,
1154 dtype, &sdmi);
1155
1156 if (error)
1157 return (error);
1158
1159 sdmi.sdmi_lt_last->sl_lt->shl_r = 1;
1160 desc->sd_lt_used += sdmi.sdmi_lt_used;
1161
1162 ptr = &(desc->sd_desc->shd_pointer[n]);
1163 ptr->shdp_length = dsize;
1164 ptr->shdp_extent = 0;
1165 ptr->shdp_j = 1;
1166 ptr->shdp_ptr = sdmi.sdmi_lt_first->sl_lt_paddr;
1167
1168 return (0);
1169 }
1170
1171 static int
1172 sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
1173 struct cryptoini **mac)
1174 {
1175 struct cryptoini *e, *m;
1176
1177 e = cri;
1178 m = cri->cri_next;
1179
1180 /* We can haldle only two operations */
1181 if (m && m->cri_next)
1182 return (EINVAL);
1183
1184 if (sec_mdeu_can_handle(e->cri_alg)) {
1185 cri = m;
1186 m = e;
1187 e = cri;
1188 }
1189
1190 if (m && !sec_mdeu_can_handle(m->cri_alg))
1191 return (EINVAL);
1192
1193 *enc = e;
1194 *mac = m;
1195
1196 return (0);
1197 }
1198
1199 static int
1200 sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
1201 struct cryptodesc **mac)
1202 {
1203 struct cryptodesc *e, *m, *t;
1204
1205 e = crp->crp_desc;
1206 m = e->crd_next;
1207
1208 /* We can haldle only two operations */
1209 if (m && m->crd_next)
1210 return (EINVAL);
1211
1212 if (sec_mdeu_can_handle(e->crd_alg)) {
1213 t = m;
1214 m = e;
1215 e = t;
1216 }
1217
1218 if (m && !sec_mdeu_can_handle(m->crd_alg))
1219 return (EINVAL);
1220
1221 *enc = e;
1222 *mac = m;
1223
1224 return (0);
1225 }
1226
1227 static int
1228 sec_alloc_session(struct sec_softc *sc)
1229 {
1230 struct sec_session *ses = NULL;
1231 int sid = -1;
1232 u_int i;
1233
1234 SEC_LOCK(sc, sessions);
1235
1236 for (i = 0; i < SEC_MAX_SESSIONS; i++) {
1237 if (sc->sc_sessions[i].ss_used == 0) {
1238 ses = &(sc->sc_sessions[i]);
1239 ses->ss_used = 1;
1240 ses->ss_ivlen = 0;
1241 ses->ss_klen = 0;
1242 ses->ss_mklen = 0;
1243 sid = i;
1244 break;
1245 }
1246 }
1247
1248 SEC_UNLOCK(sc, sessions);
1249
1250 return (sid);
1251 }
1252
1253 static struct sec_session *
1254 sec_get_session(struct sec_softc *sc, u_int sid)
1255 {
1256 struct sec_session *ses;
1257
1258 if (sid >= SEC_MAX_SESSIONS)
1259 return (NULL);
1260
1261 SEC_LOCK(sc, sessions);
1262
1263 ses = &(sc->sc_sessions[sid]);
1264
1265 if (ses->ss_used == 0)
1266 ses = NULL;
1267
1268 SEC_UNLOCK(sc, sessions);
1269
1270 return (ses);
1271 }
1272
1273 static int
1274 sec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
1275 {
1276 struct sec_softc *sc = device_get_softc(dev);
1277 struct sec_eu_methods *eu = sec_eus;
1278 struct cryptoini *enc = NULL;
1279 struct cryptoini *mac = NULL;
1280 struct sec_session *ses;
1281 int error = -1;
1282 int sid;
1283
1284 error = sec_split_cri(cri, &enc, &mac);
1285 if (error)
1286 return (error);
1287
1288 /* Check key lengths */
1289 if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN)
1290 return (E2BIG);
1291
1292 if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN)
1293 return (E2BIG);
1294
1295 /* Only SEC 3.0 supports digests larger than 256 bits */
1296 if (sc->sc_version < 3 && mac && mac->cri_klen > 256)
1297 return (E2BIG);
1298
1299 sid = sec_alloc_session(sc);
1300 if (sid < 0)
1301 return (ENOMEM);
1302
1303 ses = sec_get_session(sc, sid);
1304
1305 /* Find EU for this session */
1306 while (eu->sem_make_desc != NULL) {
1307 error = eu->sem_newsession(sc, ses, enc, mac);
1308 if (error >= 0)
1309 break;
1310
1311 eu++;
1312 }
1313
1314 /* If not found, return EINVAL */
1315 if (error < 0) {
1316 sec_free_session(sc, ses);
1317 return (EINVAL);
1318 }
1319
1320 /* Save cipher key */
1321 if (enc && enc->cri_key) {
1322 ses->ss_klen = enc->cri_klen / 8;
1323 memcpy(ses->ss_key, enc->cri_key, ses->ss_klen);
1324 }
1325
1326 /* Save digest key */
1327 if (mac && mac->cri_key) {
1328 ses->ss_mklen = mac->cri_klen / 8;
1329 memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen);
1330 }
1331
1332 ses->ss_eu = eu;
1333 *sidp = sid;
1334
1335 return (0);
1336 }
1337
1338 static int
1339 sec_freesession(device_t dev, uint64_t tid)
1340 {
1341 struct sec_softc *sc = device_get_softc(dev);
1342 struct sec_session *ses;
1343 int error = 0;
1344
1345 ses = sec_get_session(sc, CRYPTO_SESID2LID(tid));
1346 if (ses == NULL)
1347 return (EINVAL);
1348
1349 sec_free_session(sc, ses);
1350
1351 return (error);
1352 }
1353
1354 static int
1355 sec_process(device_t dev, struct cryptop *crp, int hint)
1356 {
1357 struct sec_softc *sc = device_get_softc(dev);
1358 struct sec_desc *desc = NULL;
1359 struct cryptodesc *mac, *enc;
1360 struct sec_session *ses;
1361 int buftype, error = 0;
1362
1363 /* Check Session ID */
1364 ses = sec_get_session(sc, CRYPTO_SESID2LID(crp->crp_sid));
1365 if (ses == NULL) {
1366 crp->crp_etype = EINVAL;
1367 crypto_done(crp);
1368 return (0);
1369 }
1370
1371 /* Check for input length */
1372 if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) {
1373 crp->crp_etype = E2BIG;
1374 crypto_done(crp);
1375 return (0);
1376 }
1377
1378 /* Get descriptors */
1379 if (sec_split_crp(crp, &enc, &mac)) {
1380 crp->crp_etype = EINVAL;
1381 crypto_done(crp);
1382 return (0);
1383 }
1384
1385 SEC_LOCK(sc, descriptors);
1386 SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1387
1388 /* Block driver if there is no free descriptors or we are going down */
1389 if (SEC_FREE_DESC_CNT(sc) == 0 || sc->sc_shutdown) {
1390 sc->sc_blocked |= CRYPTO_SYMQ;
1391 SEC_UNLOCK(sc, descriptors);
1392 return (ERESTART);
1393 }
1394
1395 /* Prepare descriptor */
1396 desc = SEC_GET_FREE_DESC(sc);
1397 desc->sd_lt_used = 0;
1398 desc->sd_error = 0;
1399 desc->sd_crp = crp;
1400
1401 if (crp->crp_flags & CRYPTO_F_IOV)
1402 buftype = SEC_UIO;
1403 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1404 buftype = SEC_MBUF;
1405 else
1406 buftype = SEC_MEMORY;
1407
1408 if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
1409 if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1410 memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1411 ses->ss_ivlen);
1412 else
1413 arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0);
1414
1415 if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
1416 crypto_copyback(crp->crp_flags, crp->crp_buf,
1417 enc->crd_inject, ses->ss_ivlen,
1418 desc->sd_desc->shd_iv);
1419 } else if (enc) {
1420 if (enc->crd_flags & CRD_F_IV_EXPLICIT)
1421 memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
1422 ses->ss_ivlen);
1423 else
1424 crypto_copydata(crp->crp_flags, crp->crp_buf,
1425 enc->crd_inject, ses->ss_ivlen,
1426 desc->sd_desc->shd_iv);
1427 }
1428
1429 if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
1430 if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1431 ses->ss_klen = enc->crd_klen / 8;
1432 memcpy(ses->ss_key, enc->crd_key, ses->ss_klen);
1433 } else
1434 error = E2BIG;
1435 }
1436
1437 if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
1438 if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
1439 ses->ss_mklen = mac->crd_klen / 8;
1440 memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen);
1441 } else
1442 error = E2BIG;
1443 }
1444
1445 if (!error) {
1446 memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen);
1447 memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen);
1448
1449 error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype);
1450 }
1451
1452 if (error) {
1453 SEC_DESC_FREE_POINTERS(desc);
1454 SEC_DESC_PUT_BACK_LT(sc, desc);
1455 SEC_PUT_BACK_FREE_DESC(sc);
1456 SEC_UNLOCK(sc, descriptors);
1457 crp->crp_etype = error;
1458 crypto_done(crp);
1459 return (0);
1460 }
1461
1462 /*
1463 * Skip DONE interrupt if this is not last request in burst, but only
1464 * if we are running on SEC 3.X. On SEC 2.X we have to enable DONE
1465 * signaling on each descriptor.
1466 */
1467 if ((hint & CRYPTO_HINT_MORE) && sc->sc_version == 3)
1468 desc->sd_desc->shd_dn = 0;
1469 else
1470 desc->sd_desc->shd_dn = 1;
1471
1472 SEC_DESC_SYNC(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1473 SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_POSTREAD |
1474 BUS_DMASYNC_POSTWRITE);
1475 SEC_DESC_FREE2READY(sc);
1476 SEC_UNLOCK(sc, descriptors);
1477
1478 /* Enqueue ready descriptors in hardware */
1479 sec_enqueue(sc);
1480
1481 return (0);
1482 }
1483
1484 static int
1485 sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
1486 struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1487 int buftype)
1488 {
1489 struct sec_hw_desc *hd = desc->sd_desc;
1490 int error;
1491
1492 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1493 hd->shd_eu_sel1 = SEC_EU_NONE;
1494 hd->shd_mode1 = 0;
1495
1496 /* Pointer 0: NULL */
1497 error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1498 if (error)
1499 return (error);
1500
1501 /* Pointer 1: IV IN */
1502 error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
1503 offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1504 if (error)
1505 return (error);
1506
1507 /* Pointer 2: Cipher Key */
1508 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1509 offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1510 if (error)
1511 return (error);
1512
1513 /* Pointer 3: Data IN */
1514 error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip,
1515 enc->crd_len, buftype);
1516 if (error)
1517 return (error);
1518
1519 /* Pointer 4: Data OUT */
1520 error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1521 enc->crd_len, buftype);
1522 if (error)
1523 return (error);
1524
1525 /* Pointer 5: IV OUT (Not used: NULL) */
1526 error = sec_make_pointer_direct(sc, desc, 5, 0, 0);
1527 if (error)
1528 return (error);
1529
1530 /* Pointer 6: NULL */
1531 error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1532
1533 return (error);
1534 }
1535
1536 static int
1537 sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
1538 struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
1539 struct cryptodesc *mac, int buftype)
1540 {
1541 struct sec_hw_desc *hd = desc->sd_desc;
1542 u_int eu, mode, hashlen;
1543 int error;
1544
1545 if (mac->crd_len < enc->crd_len)
1546 return (EINVAL);
1547
1548 if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len)
1549 return (EINVAL);
1550
1551 error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1552 if (error)
1553 return (error);
1554
1555 hd->shd_desc_type = SEC_DT_HMAC_SNOOP;
1556 hd->shd_eu_sel1 = eu;
1557 hd->shd_mode1 = mode;
1558
1559 /* Pointer 0: HMAC Key */
1560 error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
1561 offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen);
1562 if (error)
1563 return (error);
1564
1565 /* Pointer 1: HMAC-Only Data IN */
1566 error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip,
1567 mac->crd_len - enc->crd_len, buftype);
1568 if (error)
1569 return (error);
1570
1571 /* Pointer 2: Cipher Key */
1572 error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
1573 offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
1574 if (error)
1575 return (error);
1576
1577 /* Pointer 3: IV IN */
1578 error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
1579 offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
1580 if (error)
1581 return (error);
1582
1583 /* Pointer 4: Data IN */
1584 error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
1585 enc->crd_len, buftype);
1586 if (error)
1587 return (error);
1588
1589 /* Pointer 5: Data OUT */
1590 error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip,
1591 enc->crd_len, buftype);
1592 if (error)
1593 return (error);
1594
1595 /* Pointer 6: HMAC OUT */
1596 error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject,
1597 hashlen, buftype);
1598
1599 return (error);
1600 }
1601
1602 /* AESU */
1603
1604 static int
1605 sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses,
1606 struct cryptoini *enc, struct cryptoini *mac)
1607 {
1608
1609 if (enc == NULL)
1610 return (-1);
1611
1612 if (enc->cri_alg != CRYPTO_AES_CBC)
1613 return (-1);
1614
1615 ses->ss_ivlen = AES_BLOCK_LEN;
1616
1617 return (0);
1618 }
1619
1620 static int
1621 sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1622 struct sec_desc *desc, struct cryptop *crp, int buftype)
1623 {
1624 struct sec_hw_desc *hd = desc->sd_desc;
1625 struct cryptodesc *enc, *mac;
1626 int error;
1627
1628 error = sec_split_crp(crp, &enc, &mac);
1629 if (error)
1630 return (error);
1631
1632 if (!enc)
1633 return (EINVAL);
1634
1635 hd->shd_eu_sel0 = SEC_EU_AESU;
1636 hd->shd_mode0 = SEC_AESU_MODE_CBC;
1637
1638 if (enc->crd_alg != CRYPTO_AES_CBC)
1639 return (EINVAL);
1640
1641 if (enc->crd_flags & CRD_F_ENCRYPT) {
1642 hd->shd_mode0 |= SEC_AESU_MODE_ED;
1643 hd->shd_dir = 0;
1644 } else
1645 hd->shd_dir = 1;
1646
1647 if (mac)
1648 error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1649 buftype);
1650 else
1651 error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1652 buftype);
1653
1654 return (error);
1655 }
1656
1657 /* DEU */
1658
1659 static int
1660 sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses,
1661 struct cryptoini *enc, struct cryptoini *mac)
1662 {
1663
1664 if (enc == NULL)
1665 return (-1);
1666
1667 switch (enc->cri_alg) {
1668 case CRYPTO_DES_CBC:
1669 case CRYPTO_3DES_CBC:
1670 break;
1671 default:
1672 return (-1);
1673 }
1674
1675 ses->ss_ivlen = DES_BLOCK_LEN;
1676
1677 return (0);
1678 }
1679
1680 static int
1681 sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1682 struct sec_desc *desc, struct cryptop *crp, int buftype)
1683 {
1684 struct sec_hw_desc *hd = desc->sd_desc;
1685 struct cryptodesc *enc, *mac;
1686 int error;
1687
1688 error = sec_split_crp(crp, &enc, &mac);
1689 if (error)
1690 return (error);
1691
1692 if (!enc)
1693 return (EINVAL);
1694
1695 hd->shd_eu_sel0 = SEC_EU_DEU;
1696 hd->shd_mode0 = SEC_DEU_MODE_CBC;
1697
1698 switch (enc->crd_alg) {
1699 case CRYPTO_3DES_CBC:
1700 hd->shd_mode0 |= SEC_DEU_MODE_TS;
1701 break;
1702 case CRYPTO_DES_CBC:
1703 break;
1704 default:
1705 return (EINVAL);
1706 }
1707
1708 if (enc->crd_flags & CRD_F_ENCRYPT) {
1709 hd->shd_mode0 |= SEC_DEU_MODE_ED;
1710 hd->shd_dir = 0;
1711 } else
1712 hd->shd_dir = 1;
1713
1714 if (mac)
1715 error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
1716 buftype);
1717 else
1718 error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
1719 buftype);
1720
1721 return (error);
1722 }
1723
1724 /* MDEU */
1725
1726 static int
1727 sec_mdeu_can_handle(u_int alg)
1728 {
1729 switch (alg) {
1730 case CRYPTO_MD5:
1731 case CRYPTO_SHA1:
1732 case CRYPTO_MD5_HMAC:
1733 case CRYPTO_SHA1_HMAC:
1734 case CRYPTO_SHA2_256_HMAC:
1735 case CRYPTO_SHA2_384_HMAC:
1736 case CRYPTO_SHA2_512_HMAC:
1737 return (1);
1738 default:
1739 return (0);
1740 }
1741 }
1742
1743 static int
1744 sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen)
1745 {
1746
1747 *mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
1748 *eu = SEC_EU_NONE;
1749
1750 switch (crd->crd_alg) {
1751 case CRYPTO_MD5_HMAC:
1752 *mode |= SEC_MDEU_MODE_HMAC;
1753 /* FALLTHROUGH */
1754 case CRYPTO_MD5:
1755 *eu = SEC_EU_MDEU_A;
1756 *mode |= SEC_MDEU_MODE_MD5;
1757 *hashlen = MD5_HASH_LEN;
1758 break;
1759 case CRYPTO_SHA1_HMAC:
1760 *mode |= SEC_MDEU_MODE_HMAC;
1761 /* FALLTHROUGH */
1762 case CRYPTO_SHA1:
1763 *eu = SEC_EU_MDEU_A;
1764 *mode |= SEC_MDEU_MODE_SHA1;
1765 *hashlen = SHA1_HASH_LEN;
1766 break;
1767 case CRYPTO_SHA2_256_HMAC:
1768 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA256;
1769 *eu = SEC_EU_MDEU_A;
1770 break;
1771 case CRYPTO_SHA2_384_HMAC:
1772 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA384;
1773 *eu = SEC_EU_MDEU_B;
1774 break;
1775 case CRYPTO_SHA2_512_HMAC:
1776 *mode |= SEC_MDEU_MODE_HMAC | SEC_MDEU_MODE_SHA512;
1777 *eu = SEC_EU_MDEU_B;
1778 break;
1779 default:
1780 return (EINVAL);
1781 }
1782
1783 if (*mode & SEC_MDEU_MODE_HMAC)
1784 *hashlen = SEC_HMAC_HASH_LEN;
1785
1786 return (0);
1787 }
1788
1789 static int
1790 sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses,
1791 struct cryptoini *enc, struct cryptoini *mac)
1792 {
1793
1794 if (mac && sec_mdeu_can_handle(mac->cri_alg))
1795 return (0);
1796
1797 return (-1);
1798 }
1799
1800 static int
1801 sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses,
1802 struct sec_desc *desc, struct cryptop *crp, int buftype)
1803 {
1804 struct cryptodesc *enc, *mac;
1805 struct sec_hw_desc *hd = desc->sd_desc;
1806 u_int eu, mode, hashlen;
1807 int error;
1808
1809 error = sec_split_crp(crp, &enc, &mac);
1810 if (error)
1811 return (error);
1812
1813 if (enc)
1814 return (EINVAL);
1815
1816 error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
1817 if (error)
1818 return (error);
1819
1820 hd->shd_desc_type = SEC_DT_COMMON_NONSNOOP;
1821 hd->shd_eu_sel0 = eu;
1822 hd->shd_mode0 = mode;
1823 hd->shd_eu_sel1 = SEC_EU_NONE;
1824 hd->shd_mode1 = 0;
1825
1826 /* Pointer 0: NULL */
1827 error = sec_make_pointer_direct(sc, desc, 0, 0, 0);
1828 if (error)
1829 return (error);
1830
1831 /* Pointer 1: Context In (Not used: NULL) */
1832 error = sec_make_pointer_direct(sc, desc, 1, 0, 0);
1833 if (error)
1834 return (error);
1835
1836 /* Pointer 2: HMAC Key (or NULL, depending on digest type) */
1837 if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
1838 error = sec_make_pointer_direct(sc, desc, 2,
1839 desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
1840 shd_mkey), ses->ss_mklen);
1841 else
1842 error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
1843
1844 if (error)
1845 return (error);
1846
1847 /* Pointer 3: Input Data */
1848 error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip,
1849 mac->crd_len, buftype);
1850 if (error)
1851 return (error);
1852
1853 /* Pointer 4: NULL */
1854 error = sec_make_pointer_direct(sc, desc, 4, 0, 0);
1855 if (error)
1856 return (error);
1857
1858 /* Pointer 5: Hash out */
1859 error = sec_make_pointer(sc, desc, 5, crp->crp_buf,
1860 mac->crd_inject, hashlen, buftype);
1861 if (error)
1862 return (error);
1863
1864 /* Pointer 6: NULL */
1865 error = sec_make_pointer_direct(sc, desc, 6, 0, 0);
1866
1867 return (0);
1868 }
Cache object: 7c5e1197009328d720612d9151169115
|