FreeBSD/Linux Kernel Cross Reference
sys/dev/safe/safe.c
1 /*-
2 * Copyright (c) 2003 Sam Leffler, Errno Consulting
3 * Copyright (c) 2003 Global Technology Associates, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/5.3/sys/dev/safe/safe.c 129879 2004-05-30 20:08:47Z phk $");
30
31 /*
32 * SafeNet SafeXcel-1141 hardware crypto accelerator
33 */
34 #include "opt_safe.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/mbuf.h>
43 #include <sys/module.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/sysctl.h>
47 #include <sys/endian.h>
48
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51
52 #include <machine/clock.h>
53 #include <machine/bus.h>
54 #include <machine/resource.h>
55 #include <sys/bus.h>
56 #include <sys/rman.h>
57
58 #include <crypto/sha1.h>
59 #include <opencrypto/cryptodev.h>
60 #include <opencrypto/cryptosoft.h>
61 #include <sys/md5.h>
62 #include <sys/random.h>
63
64 #include <dev/pci/pcivar.h>
65 #include <dev/pci/pcireg.h>
66
67 #ifdef SAFE_RNDTEST
68 #include <dev/rndtest/rndtest.h>
69 #endif
70 #include <dev/safe/safereg.h>
71 #include <dev/safe/safevar.h>
72
73 #ifndef bswap32
74 #define bswap32 NTOHL
75 #endif
76
77 /*
78 * Prototypes and count for the pci_device structure
79 */
80 static int safe_probe(device_t);
81 static int safe_attach(device_t);
82 static int safe_detach(device_t);
83 static int safe_suspend(device_t);
84 static int safe_resume(device_t);
85 static void safe_shutdown(device_t);
86
87 static device_method_t safe_methods[] = {
88 /* Device interface */
89 DEVMETHOD(device_probe, safe_probe),
90 DEVMETHOD(device_attach, safe_attach),
91 DEVMETHOD(device_detach, safe_detach),
92 DEVMETHOD(device_suspend, safe_suspend),
93 DEVMETHOD(device_resume, safe_resume),
94 DEVMETHOD(device_shutdown, safe_shutdown),
95
96 /* bus interface */
97 DEVMETHOD(bus_print_child, bus_generic_print_child),
98 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
99
100 { 0, 0 }
101 };
102 static driver_t safe_driver = {
103 "safe",
104 safe_methods,
105 sizeof (struct safe_softc)
106 };
107 static devclass_t safe_devclass;
108
109 DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0);
110 MODULE_DEPEND(safe, crypto, 1, 1, 1);
111 #ifdef SAFE_RNDTEST
112 MODULE_DEPEND(safe, rndtest, 1, 1, 1);
113 #endif
114
115 static void safe_intr(void *);
116 static int safe_newsession(void *, u_int32_t *, struct cryptoini *);
117 static int safe_freesession(void *, u_int64_t);
118 static int safe_process(void *, struct cryptop *, int);
119 static void safe_callback(struct safe_softc *, struct safe_ringentry *);
120 static void safe_feed(struct safe_softc *, struct safe_ringentry *);
121 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int);
122 #ifndef SAFE_NO_RNG
123 static void safe_rng_init(struct safe_softc *);
124 static void safe_rng(void *);
125 #endif /* SAFE_NO_RNG */
126 static int safe_dma_malloc(struct safe_softc *, bus_size_t,
127 struct safe_dma_alloc *, int);
128 #define safe_dma_sync(_dma, _flags) \
129 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags))
130 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *);
131 static int safe_dmamap_aligned(const struct safe_operand *);
132 static int safe_dmamap_uniform(const struct safe_operand *);
133
134 static void safe_reset_board(struct safe_softc *);
135 static void safe_init_board(struct safe_softc *);
136 static void safe_init_pciregs(device_t dev);
137 static void safe_cleanchip(struct safe_softc *);
138 static void safe_totalreset(struct safe_softc *);
139
140 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *);
141
142 SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD, 0, "SafeNet driver parameters");
143
144 #ifdef SAFE_DEBUG
145 static void safe_dump_dmastatus(struct safe_softc *, const char *);
146 static void safe_dump_ringstate(struct safe_softc *, const char *);
147 static void safe_dump_intrstate(struct safe_softc *, const char *);
148 static void safe_dump_request(struct safe_softc *, const char *,
149 struct safe_ringentry *);
150
151 static struct safe_softc *safec; /* for use by hw.safe.dump */
152
153 static int safe_debug = 0;
154 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug,
155 0, "control debugging msgs");
156 #define DPRINTF(_x) if (safe_debug) printf _x
157 #else
158 #define DPRINTF(_x)
159 #endif
160
161 #define READ_REG(sc,r) \
162 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
163
164 #define WRITE_REG(sc,reg,val) \
165 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
166
167 struct safe_stats safestats;
168 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats,
169 safe_stats, "driver statistics");
170 #ifndef SAFE_NO_RNG
171 static int safe_rnginterval = 1; /* poll once a second */
172 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval,
173 0, "RNG polling interval (secs)");
174 static int safe_rngbufsize = 16; /* 64 bytes each poll */
175 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize,
176 0, "RNG polling buffer size (32-bit words)");
177 static int safe_rngmaxalarm = 8; /* max alarms before reset */
178 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm,
179 0, "RNG max alarms before reset");
180 #endif /* SAFE_NO_RNG */
181
182 static int
183 safe_probe(device_t dev)
184 {
185 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET &&
186 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL)
187 return (0);
188 return (ENXIO);
189 }
190
191 static const char*
192 safe_partname(struct safe_softc *sc)
193 {
194 /* XXX sprintf numbers when not decoded */
195 switch (pci_get_vendor(sc->sc_dev)) {
196 case PCI_VENDOR_SAFENET:
197 switch (pci_get_device(sc->sc_dev)) {
198 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141";
199 }
200 return "SafeNet unknown-part";
201 }
202 return "Unknown-vendor unknown-part";
203 }
204
205 #ifndef SAFE_NO_RNG
206 static void
207 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
208 {
209 random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE);
210 }
211 #endif /* SAFE_NO_RNG */
212
213 static int
214 safe_attach(device_t dev)
215 {
216 struct safe_softc *sc = device_get_softc(dev);
217 u_int32_t raddr;
218 u_int32_t cmd, i, devinfo;
219 int rid;
220
221 bzero(sc, sizeof (*sc));
222 sc->sc_dev = dev;
223
224 /* XXX handle power management */
225
226 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
227 cmd |= PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN;
228 pci_write_config(dev, PCIR_COMMAND, cmd, 4);
229 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
230
231 if (!(cmd & PCIM_CMD_MEMEN)) {
232 device_printf(dev, "failed to enable memory mapping\n");
233 goto bad;
234 }
235
236 if (!(cmd & PCIM_CMD_BUSMASTEREN)) {
237 device_printf(dev, "failed to enable bus mastering\n");
238 goto bad;
239 }
240
241 /*
242 * Setup memory-mapping of PCI registers.
243 */
244 rid = BS_BAR;
245 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
246 RF_ACTIVE);
247 if (sc->sc_sr == NULL) {
248 device_printf(dev, "cannot map register space\n");
249 goto bad;
250 }
251 sc->sc_st = rman_get_bustag(sc->sc_sr);
252 sc->sc_sh = rman_get_bushandle(sc->sc_sr);
253
254 /*
255 * Arrange interrupt line.
256 */
257 rid = 0;
258 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
259 RF_SHAREABLE|RF_ACTIVE);
260 if (sc->sc_irq == NULL) {
261 device_printf(dev, "could not map interrupt\n");
262 goto bad1;
263 }
264 /*
265 * NB: Network code assumes we are blocked with splimp()
266 * so make sure the IRQ is mapped appropriately.
267 */
268 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
269 safe_intr, sc, &sc->sc_ih)) {
270 device_printf(dev, "could not establish interrupt\n");
271 goto bad2;
272 }
273
274 sc->sc_cid = crypto_get_driverid(0);
275 if (sc->sc_cid < 0) {
276 device_printf(dev, "could not get crypto driver id\n");
277 goto bad3;
278 }
279
280 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
281 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
282
283 /*
284 * Setup DMA descriptor area.
285 */
286 if (bus_dma_tag_create(NULL, /* parent */
287 1, /* alignment */
288 SAFE_DMA_BOUNDARY, /* boundary */
289 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
290 BUS_SPACE_MAXADDR, /* highaddr */
291 NULL, NULL, /* filter, filterarg */
292 SAFE_MAX_DMA, /* maxsize */
293 SAFE_MAX_PART, /* nsegments */
294 SAFE_MAX_SSIZE, /* maxsegsize */
295 BUS_DMA_ALLOCNOW, /* flags */
296 NULL, NULL, /* locking */
297 &sc->sc_srcdmat)) {
298 device_printf(dev, "cannot allocate DMA tag\n");
299 goto bad4;
300 }
301 if (bus_dma_tag_create(NULL, /* parent */
302 sizeof(u_int32_t), /* alignment */
303 SAFE_MAX_DSIZE, /* boundary */
304 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
305 BUS_SPACE_MAXADDR, /* highaddr */
306 NULL, NULL, /* filter, filterarg */
307 SAFE_MAX_DMA, /* maxsize */
308 SAFE_MAX_PART, /* nsegments */
309 SAFE_MAX_DSIZE, /* maxsegsize */
310 BUS_DMA_ALLOCNOW, /* flags */
311 NULL, NULL, /* locking */
312 &sc->sc_dstdmat)) {
313 device_printf(dev, "cannot allocate DMA tag\n");
314 goto bad4;
315 }
316
317 /*
318 * Allocate packet engine descriptors.
319 */
320 if (safe_dma_malloc(sc,
321 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
322 &sc->sc_ringalloc, 0)) {
323 device_printf(dev, "cannot allocate PE descriptor ring\n");
324 bus_dma_tag_destroy(sc->sc_srcdmat);
325 goto bad4;
326 }
327 /*
328 * Hookup the static portion of all our data structures.
329 */
330 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
331 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
332 sc->sc_front = sc->sc_ring;
333 sc->sc_back = sc->sc_ring;
334 raddr = sc->sc_ringalloc.dma_paddr;
335 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
336 for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
337 struct safe_ringentry *re = &sc->sc_ring[i];
338
339 re->re_desc.d_sa = raddr +
340 offsetof(struct safe_ringentry, re_sa);
341 re->re_sa.sa_staterec = raddr +
342 offsetof(struct safe_ringentry, re_sastate);
343
344 raddr += sizeof (struct safe_ringentry);
345 }
346 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev),
347 "packet engine ring", MTX_DEF);
348
349 /*
350 * Allocate scatter and gather particle descriptors.
351 */
352 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
353 &sc->sc_spalloc, 0)) {
354 device_printf(dev, "cannot allocate source particle "
355 "descriptor ring\n");
356 mtx_destroy(&sc->sc_ringmtx);
357 safe_dma_free(sc, &sc->sc_ringalloc);
358 bus_dma_tag_destroy(sc->sc_srcdmat);
359 goto bad4;
360 }
361 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
362 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
363 sc->sc_spfree = sc->sc_spring;
364 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
365
366 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
367 &sc->sc_dpalloc, 0)) {
368 device_printf(dev, "cannot allocate destination particle "
369 "descriptor ring\n");
370 mtx_destroy(&sc->sc_ringmtx);
371 safe_dma_free(sc, &sc->sc_spalloc);
372 safe_dma_free(sc, &sc->sc_ringalloc);
373 bus_dma_tag_destroy(sc->sc_dstdmat);
374 goto bad4;
375 }
376 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
377 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
378 sc->sc_dpfree = sc->sc_dpring;
379 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
380
381 device_printf(sc->sc_dev, "%s", safe_partname(sc));
382
383 devinfo = READ_REG(sc, SAFE_DEVINFO);
384 if (devinfo & SAFE_DEVINFO_RNG) {
385 sc->sc_flags |= SAFE_FLAGS_RNG;
386 printf(" rng");
387 }
388 if (devinfo & SAFE_DEVINFO_PKEY) {
389 #if 0
390 printf(" key");
391 sc->sc_flags |= SAFE_FLAGS_KEY;
392 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0,
393 safe_kprocess, sc);
394 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0,
395 safe_kprocess, sc);
396 #endif
397 }
398 if (devinfo & SAFE_DEVINFO_DES) {
399 printf(" des/3des");
400 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
401 safe_newsession, safe_freesession, safe_process, sc);
402 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
403 safe_newsession, safe_freesession, safe_process, sc);
404 }
405 if (devinfo & SAFE_DEVINFO_AES) {
406 printf(" aes");
407 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
408 safe_newsession, safe_freesession, safe_process, sc);
409 }
410 if (devinfo & SAFE_DEVINFO_MD5) {
411 printf(" md5");
412 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0,
413 safe_newsession, safe_freesession, safe_process, sc);
414 }
415 if (devinfo & SAFE_DEVINFO_SHA1) {
416 printf(" sha1");
417 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0,
418 safe_newsession, safe_freesession, safe_process, sc);
419 }
420 printf(" null");
421 crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0,
422 safe_newsession, safe_freesession, safe_process, sc);
423 crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0,
424 safe_newsession, safe_freesession, safe_process, sc);
425 /* XXX other supported algorithms */
426 printf("\n");
427
428 safe_reset_board(sc); /* reset h/w */
429 safe_init_pciregs(dev); /* init pci settings */
430 safe_init_board(sc); /* init h/w */
431
432 #ifndef SAFE_NO_RNG
433 if (sc->sc_flags & SAFE_FLAGS_RNG) {
434 #ifdef SAFE_RNDTEST
435 sc->sc_rndtest = rndtest_attach(dev);
436 if (sc->sc_rndtest)
437 sc->sc_harvest = rndtest_harvest;
438 else
439 sc->sc_harvest = default_harvest;
440 #else
441 sc->sc_harvest = default_harvest;
442 #endif
443 safe_rng_init(sc);
444
445 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
446 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc);
447 }
448 #endif /* SAFE_NO_RNG */
449 #ifdef SAFE_DEBUG
450 safec = sc; /* for use by hw.safe.dump */
451 #endif
452 return (0);
453 bad4:
454 crypto_unregister_all(sc->sc_cid);
455 bad3:
456 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
457 bad2:
458 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
459 bad1:
460 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
461 bad:
462 return (ENXIO);
463 }
464
465 /*
466 * Detach a device that successfully probed.
467 */
468 static int
469 safe_detach(device_t dev)
470 {
471 struct safe_softc *sc = device_get_softc(dev);
472
473 /* XXX wait/abort active ops */
474
475 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
476
477 callout_stop(&sc->sc_rngto);
478
479 crypto_unregister_all(sc->sc_cid);
480
481 #ifdef SAFE_RNDTEST
482 if (sc->sc_rndtest)
483 rndtest_detach(sc->sc_rndtest);
484 #endif
485
486 safe_cleanchip(sc);
487 safe_dma_free(sc, &sc->sc_dpalloc);
488 safe_dma_free(sc, &sc->sc_spalloc);
489 mtx_destroy(&sc->sc_ringmtx);
490 safe_dma_free(sc, &sc->sc_ringalloc);
491
492 bus_generic_detach(dev);
493 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
494 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
495
496 bus_dma_tag_destroy(sc->sc_srcdmat);
497 bus_dma_tag_destroy(sc->sc_dstdmat);
498 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
499
500 return (0);
501 }
502
503 /*
504 * Stop all chip i/o so that the kernel's probe routines don't
505 * get confused by errant DMAs when rebooting.
506 */
507 static void
508 safe_shutdown(device_t dev)
509 {
510 #ifdef notyet
511 safe_stop(device_get_softc(dev));
512 #endif
513 }
514
515 /*
516 * Device suspend routine.
517 */
518 static int
519 safe_suspend(device_t dev)
520 {
521 struct safe_softc *sc = device_get_softc(dev);
522
523 #ifdef notyet
524 /* XXX stop the device and save PCI settings */
525 #endif
526 sc->sc_suspended = 1;
527
528 return (0);
529 }
530
531 static int
532 safe_resume(device_t dev)
533 {
534 struct safe_softc *sc = device_get_softc(dev);
535
536 #ifdef notyet
537 /* XXX retore PCI settings and start the device */
538 #endif
539 sc->sc_suspended = 0;
540 return (0);
541 }
542
543 /*
544 * SafeXcel Interrupt routine
545 */
546 static void
547 safe_intr(void *arg)
548 {
549 struct safe_softc *sc = arg;
550 volatile u_int32_t stat;
551
552 stat = READ_REG(sc, SAFE_HM_STAT);
553 if (stat == 0) /* shared irq, not for us */
554 return;
555
556 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
557
558 if ((stat & SAFE_INT_PE_DDONE)) {
559 /*
560 * Descriptor(s) done; scan the ring and
561 * process completed operations.
562 */
563 mtx_lock(&sc->sc_ringmtx);
564 while (sc->sc_back != sc->sc_front) {
565 struct safe_ringentry *re = sc->sc_back;
566 #ifdef SAFE_DEBUG
567 if (safe_debug) {
568 safe_dump_ringstate(sc, __func__);
569 safe_dump_request(sc, __func__, re);
570 }
571 #endif
572 /*
573 * safe_process marks ring entries that were allocated
574 * but not used with a csr of zero. This insures the
575 * ring front pointer never needs to be set backwards
576 * in the event that an entry is allocated but not used
577 * because of a setup error.
578 */
579 if (re->re_desc.d_csr != 0) {
580 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr))
581 break;
582 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len))
583 break;
584 sc->sc_nqchip--;
585 safe_callback(sc, re);
586 }
587 if (++(sc->sc_back) == sc->sc_ringtop)
588 sc->sc_back = sc->sc_ring;
589 }
590 mtx_unlock(&sc->sc_ringmtx);
591 }
592
593 /*
594 * Check to see if we got any DMA Error
595 */
596 if (stat & SAFE_INT_PE_ERROR) {
597 DPRINTF(("dmaerr dmastat %08x\n",
598 READ_REG(sc, SAFE_PE_DMASTAT)));
599 safestats.st_dmaerr++;
600 safe_totalreset(sc);
601 #if 0
602 safe_feed(sc);
603 #endif
604 }
605
606 if (sc->sc_needwakeup) { /* XXX check high watermark */
607 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
608 DPRINTF(("%s: wakeup crypto %x\n", __func__,
609 sc->sc_needwakeup));
610 sc->sc_needwakeup &= ~wakeup;
611 crypto_unblock(sc->sc_cid, wakeup);
612 }
613 }
614
615 /*
616 * safe_feed() - post a request to chip
617 */
618 static void
619 safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
620 {
621 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE);
622 if (re->re_dst_map != NULL)
623 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
624 BUS_DMASYNC_PREREAD);
625 /* XXX have no smaller granularity */
626 safe_dma_sync(&sc->sc_ringalloc,
627 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
628 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE);
629 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE);
630
631 #ifdef SAFE_DEBUG
632 if (safe_debug) {
633 safe_dump_ringstate(sc, __func__);
634 safe_dump_request(sc, __func__, re);
635 }
636 #endif
637 sc->sc_nqchip++;
638 if (sc->sc_nqchip > safestats.st_maxqchip)
639 safestats.st_maxqchip = sc->sc_nqchip;
640 /* poke h/w to check descriptor ring, any value can be written */
641 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
642 }
643
644 /*
645 * Allocate a new 'session' and return an encoded session id. 'sidp'
646 * contains our registration id, and should contain an encoded session
647 * id on successful allocation.
648 */
649 static int
650 safe_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
651 {
652 #define N(a) (sizeof(a) / sizeof (a[0]))
653 struct cryptoini *c, *encini = NULL, *macini = NULL;
654 struct safe_softc *sc = arg;
655 struct safe_session *ses = NULL;
656 MD5_CTX md5ctx;
657 SHA1_CTX sha1ctx;
658 int i, sesn;
659
660 if (sidp == NULL || cri == NULL || sc == NULL)
661 return (EINVAL);
662
663 for (c = cri; c != NULL; c = c->cri_next) {
664 if (c->cri_alg == CRYPTO_MD5_HMAC ||
665 c->cri_alg == CRYPTO_SHA1_HMAC ||
666 c->cri_alg == CRYPTO_NULL_HMAC) {
667 if (macini)
668 return (EINVAL);
669 macini = c;
670 } else if (c->cri_alg == CRYPTO_DES_CBC ||
671 c->cri_alg == CRYPTO_3DES_CBC ||
672 c->cri_alg == CRYPTO_AES_CBC ||
673 c->cri_alg == CRYPTO_NULL_CBC) {
674 if (encini)
675 return (EINVAL);
676 encini = c;
677 } else
678 return (EINVAL);
679 }
680 if (encini == NULL && macini == NULL)
681 return (EINVAL);
682 if (encini) { /* validate key length */
683 switch (encini->cri_alg) {
684 case CRYPTO_DES_CBC:
685 if (encini->cri_klen != 64)
686 return (EINVAL);
687 break;
688 case CRYPTO_3DES_CBC:
689 if (encini->cri_klen != 192)
690 return (EINVAL);
691 break;
692 case CRYPTO_AES_CBC:
693 if (encini->cri_klen != 128 &&
694 encini->cri_klen != 192 &&
695 encini->cri_klen != 256)
696 return (EINVAL);
697 break;
698 }
699 }
700
701 if (sc->sc_sessions == NULL) {
702 ses = sc->sc_sessions = (struct safe_session *)malloc(
703 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
704 if (ses == NULL)
705 return (ENOMEM);
706 sesn = 0;
707 sc->sc_nsessions = 1;
708 } else {
709 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
710 if (sc->sc_sessions[sesn].ses_used == 0) {
711 ses = &sc->sc_sessions[sesn];
712 break;
713 }
714 }
715
716 if (ses == NULL) {
717 sesn = sc->sc_nsessions;
718 ses = (struct safe_session *)malloc((sesn + 1) *
719 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
720 if (ses == NULL)
721 return (ENOMEM);
722 bcopy(sc->sc_sessions, ses, sesn *
723 sizeof(struct safe_session));
724 bzero(sc->sc_sessions, sesn *
725 sizeof(struct safe_session));
726 free(sc->sc_sessions, M_DEVBUF);
727 sc->sc_sessions = ses;
728 ses = &sc->sc_sessions[sesn];
729 sc->sc_nsessions++;
730 }
731 }
732
733 bzero(ses, sizeof(struct safe_session));
734 ses->ses_used = 1;
735
736 if (encini) {
737 /* get an IV */
738 /* XXX may read fewer than requested */
739 read_random(ses->ses_iv, sizeof(ses->ses_iv));
740
741 ses->ses_klen = encini->cri_klen;
742 bcopy(encini->cri_key, ses->ses_key, ses->ses_klen / 8);
743
744 /* PE is little-endian, insure proper byte order */
745 for (i = 0; i < N(ses->ses_key); i++)
746 ses->ses_key[i] = htole32(ses->ses_key[i]);
747 }
748
749 if (macini) {
750 for (i = 0; i < macini->cri_klen / 8; i++)
751 macini->cri_key[i] ^= HMAC_IPAD_VAL;
752
753 if (macini->cri_alg == CRYPTO_MD5_HMAC) {
754 MD5Init(&md5ctx);
755 MD5Update(&md5ctx, macini->cri_key,
756 macini->cri_klen / 8);
757 MD5Update(&md5ctx, hmac_ipad_buffer,
758 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
759 bcopy(md5ctx.state, ses->ses_hminner,
760 sizeof(md5ctx.state));
761 } else {
762 SHA1Init(&sha1ctx);
763 SHA1Update(&sha1ctx, macini->cri_key,
764 macini->cri_klen / 8);
765 SHA1Update(&sha1ctx, hmac_ipad_buffer,
766 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
767 bcopy(sha1ctx.h.b32, ses->ses_hminner,
768 sizeof(sha1ctx.h.b32));
769 }
770
771 for (i = 0; i < macini->cri_klen / 8; i++)
772 macini->cri_key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
773
774 if (macini->cri_alg == CRYPTO_MD5_HMAC) {
775 MD5Init(&md5ctx);
776 MD5Update(&md5ctx, macini->cri_key,
777 macini->cri_klen / 8);
778 MD5Update(&md5ctx, hmac_opad_buffer,
779 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
780 bcopy(md5ctx.state, ses->ses_hmouter,
781 sizeof(md5ctx.state));
782 } else {
783 SHA1Init(&sha1ctx);
784 SHA1Update(&sha1ctx, macini->cri_key,
785 macini->cri_klen / 8);
786 SHA1Update(&sha1ctx, hmac_opad_buffer,
787 HMAC_BLOCK_LEN - (macini->cri_klen / 8));
788 bcopy(sha1ctx.h.b32, ses->ses_hmouter,
789 sizeof(sha1ctx.h.b32));
790 }
791
792 for (i = 0; i < macini->cri_klen / 8; i++)
793 macini->cri_key[i] ^= HMAC_OPAD_VAL;
794
795 /* PE is little-endian, insure proper byte order */
796 for (i = 0; i < N(ses->ses_hminner); i++) {
797 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
798 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
799 }
800 }
801
802 *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
803 return (0);
804 #undef N
805 }
806
807 /*
808 * Deallocate a session.
809 */
810 static int
811 safe_freesession(void *arg, u_int64_t tid)
812 {
813 struct safe_softc *sc = arg;
814 int session, ret;
815 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
816
817 if (sc == NULL)
818 return (EINVAL);
819
820 session = SAFE_SESSION(sid);
821 if (session < sc->sc_nsessions) {
822 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
823 ret = 0;
824 } else
825 ret = EINVAL;
826 return (ret);
827 }
828
829 static void
830 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
831 {
832 struct safe_operand *op = arg;
833
834 DPRINTF(("%s: mapsize %u nsegs %d error %d\n", __func__,
835 (u_int) mapsize, nsegs, error));
836 if (error != 0)
837 return;
838 op->mapsize = mapsize;
839 op->nsegs = nsegs;
840 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
841 }
842
843 static int
844 safe_process(void *arg, struct cryptop *crp, int hint)
845 {
846 int err = 0, i, nicealign, uniform;
847 struct safe_softc *sc = arg;
848 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
849 int bypass, oplen, ivsize;
850 caddr_t iv;
851 int16_t coffset;
852 struct safe_session *ses;
853 struct safe_ringentry *re;
854 struct safe_sarec *sa;
855 struct safe_pdesc *pd;
856 u_int32_t cmd0, cmd1, staterec;
857
858 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
859 safestats.st_invalid++;
860 return (EINVAL);
861 }
862 if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
863 safestats.st_badsession++;
864 return (EINVAL);
865 }
866
867 mtx_lock(&sc->sc_ringmtx);
868 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
869 safestats.st_ringfull++;
870 sc->sc_needwakeup |= CRYPTO_SYMQ;
871 mtx_unlock(&sc->sc_ringmtx);
872 return (ERESTART);
873 }
874 re = sc->sc_front;
875
876 staterec = re->re_sa.sa_staterec; /* save */
877 /* NB: zero everything but the PE descriptor */
878 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
879 re->re_sa.sa_staterec = staterec; /* restore */
880
881 re->re_crp = crp;
882 re->re_sesn = SAFE_SESSION(crp->crp_sid);
883
884 if (crp->crp_flags & CRYPTO_F_IMBUF) {
885 re->re_src_m = (struct mbuf *)crp->crp_buf;
886 re->re_dst_m = (struct mbuf *)crp->crp_buf;
887 } else if (crp->crp_flags & CRYPTO_F_IOV) {
888 re->re_src_io = (struct uio *)crp->crp_buf;
889 re->re_dst_io = (struct uio *)crp->crp_buf;
890 } else {
891 safestats.st_badflags++;
892 err = EINVAL;
893 goto errout; /* XXX we don't handle contiguous blocks! */
894 }
895
896 sa = &re->re_sa;
897 ses = &sc->sc_sessions[re->re_sesn];
898
899 crd1 = crp->crp_desc;
900 if (crd1 == NULL) {
901 safestats.st_nodesc++;
902 err = EINVAL;
903 goto errout;
904 }
905 crd2 = crd1->crd_next;
906
907 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
908 cmd1 = 0;
909 if (crd2 == NULL) {
910 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
911 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
912 crd1->crd_alg == CRYPTO_NULL_HMAC) {
913 maccrd = crd1;
914 enccrd = NULL;
915 cmd0 |= SAFE_SA_CMD0_OP_HASH;
916 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
917 crd1->crd_alg == CRYPTO_3DES_CBC ||
918 crd1->crd_alg == CRYPTO_AES_CBC ||
919 crd1->crd_alg == CRYPTO_NULL_CBC) {
920 maccrd = NULL;
921 enccrd = crd1;
922 cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
923 } else {
924 safestats.st_badalg++;
925 err = EINVAL;
926 goto errout;
927 }
928 } else {
929 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
930 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
931 crd1->crd_alg == CRYPTO_NULL_HMAC) &&
932 (crd2->crd_alg == CRYPTO_DES_CBC ||
933 crd2->crd_alg == CRYPTO_3DES_CBC ||
934 crd2->crd_alg == CRYPTO_AES_CBC ||
935 crd2->crd_alg == CRYPTO_NULL_CBC) &&
936 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
937 maccrd = crd1;
938 enccrd = crd2;
939 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
940 crd1->crd_alg == CRYPTO_3DES_CBC ||
941 crd1->crd_alg == CRYPTO_AES_CBC ||
942 crd1->crd_alg == CRYPTO_NULL_CBC) &&
943 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
944 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
945 crd2->crd_alg == CRYPTO_NULL_HMAC) &&
946 (crd1->crd_flags & CRD_F_ENCRYPT)) {
947 enccrd = crd1;
948 maccrd = crd2;
949 } else {
950 safestats.st_badalg++;
951 err = EINVAL;
952 goto errout;
953 }
954 cmd0 |= SAFE_SA_CMD0_OP_BOTH;
955 }
956
957 if (enccrd) {
958 if (enccrd->crd_alg == CRYPTO_DES_CBC) {
959 cmd0 |= SAFE_SA_CMD0_DES;
960 cmd1 |= SAFE_SA_CMD1_CBC;
961 ivsize = 2*sizeof(u_int32_t);
962 } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
963 cmd0 |= SAFE_SA_CMD0_3DES;
964 cmd1 |= SAFE_SA_CMD1_CBC;
965 ivsize = 2*sizeof(u_int32_t);
966 } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
967 cmd0 |= SAFE_SA_CMD0_AES;
968 cmd1 |= SAFE_SA_CMD1_CBC;
969 if (ses->ses_klen == 128)
970 cmd1 |= SAFE_SA_CMD1_AES128;
971 else if (ses->ses_klen == 192)
972 cmd1 |= SAFE_SA_CMD1_AES192;
973 else
974 cmd1 |= SAFE_SA_CMD1_AES256;
975 ivsize = 4*sizeof(u_int32_t);
976 } else {
977 cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
978 ivsize = 0;
979 }
980
981 /*
982 * Setup encrypt/decrypt state. When using basic ops
983 * we can't use an inline IV because hash/crypt offset
984 * must be from the end of the IV to the start of the
985 * crypt data and this leaves out the preceding header
986 * from the hash calculation. Instead we place the IV
987 * in the state record and set the hash/crypt offset to
988 * copy both the header+IV.
989 */
990 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
991 cmd0 |= SAFE_SA_CMD0_OUTBOUND;
992
993 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
994 iv = enccrd->crd_iv;
995 else
996 iv = (caddr_t) ses->ses_iv;
997 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
998 if (crp->crp_flags & CRYPTO_F_IMBUF)
999 m_copyback(re->re_src_m,
1000 enccrd->crd_inject, ivsize, iv);
1001 else if (crp->crp_flags & CRYPTO_F_IOV)
1002 cuio_copyback(re->re_src_io,
1003 enccrd->crd_inject, ivsize, iv);
1004 }
1005 bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
1006 cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
1007 re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
1008 } else {
1009 cmd0 |= SAFE_SA_CMD0_INBOUND;
1010
1011 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1012 bcopy(enccrd->crd_iv,
1013 re->re_sastate.sa_saved_iv, ivsize);
1014 else if (crp->crp_flags & CRYPTO_F_IMBUF)
1015 m_copydata(re->re_src_m, enccrd->crd_inject,
1016 ivsize,
1017 (caddr_t)re->re_sastate.sa_saved_iv);
1018 else if (crp->crp_flags & CRYPTO_F_IOV)
1019 cuio_copydata(re->re_src_io, enccrd->crd_inject,
1020 ivsize,
1021 (caddr_t)re->re_sastate.sa_saved_iv);
1022 cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
1023 }
1024 /*
1025 * For basic encryption use the zero pad algorithm.
1026 * This pads results to an 8-byte boundary and
1027 * suppresses padding verification for inbound (i.e.
1028 * decrypt) operations.
1029 *
1030 * NB: Not sure if the 8-byte pad boundary is a problem.
1031 */
1032 cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
1033
1034 /* XXX assert key bufs have the same size */
1035 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
1036 }
1037
1038 if (maccrd) {
1039 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
1040 cmd0 |= SAFE_SA_CMD0_MD5;
1041 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
1042 } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
1043 cmd0 |= SAFE_SA_CMD0_SHA1;
1044 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
1045 } else {
1046 cmd0 |= SAFE_SA_CMD0_HASH_NULL;
1047 }
1048 /*
1049 * Digest data is loaded from the SA and the hash
1050 * result is saved to the state block where we
1051 * retrieve it for return to the caller.
1052 */
1053 /* XXX assert digest bufs have the same size */
1054 bcopy(ses->ses_hminner, sa->sa_indigest,
1055 sizeof(sa->sa_indigest));
1056 bcopy(ses->ses_hmouter, sa->sa_outdigest,
1057 sizeof(sa->sa_outdigest));
1058
1059 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
1060 re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
1061 }
1062
1063 if (enccrd && maccrd) {
1064 /*
1065 * The offset from hash data to the start of
1066 * crypt data is the difference in the skips.
1067 */
1068 bypass = maccrd->crd_skip;
1069 coffset = enccrd->crd_skip - maccrd->crd_skip;
1070 if (coffset < 0) {
1071 DPRINTF(("%s: hash does not precede crypt; "
1072 "mac skip %u enc skip %u\n",
1073 __func__, maccrd->crd_skip, enccrd->crd_skip));
1074 safestats.st_skipmismatch++;
1075 err = EINVAL;
1076 goto errout;
1077 }
1078 oplen = enccrd->crd_skip + enccrd->crd_len;
1079 if (maccrd->crd_skip + maccrd->crd_len != oplen) {
1080 DPRINTF(("%s: hash amount %u != crypt amount %u\n",
1081 __func__, maccrd->crd_skip + maccrd->crd_len,
1082 oplen));
1083 safestats.st_lenmismatch++;
1084 err = EINVAL;
1085 goto errout;
1086 }
1087 #ifdef SAFE_DEBUG
1088 if (safe_debug) {
1089 printf("mac: skip %d, len %d, inject %d\n",
1090 maccrd->crd_skip, maccrd->crd_len,
1091 maccrd->crd_inject);
1092 printf("enc: skip %d, len %d, inject %d\n",
1093 enccrd->crd_skip, enccrd->crd_len,
1094 enccrd->crd_inject);
1095 printf("bypass %d coffset %d oplen %d\n",
1096 bypass, coffset, oplen);
1097 }
1098 #endif
1099 if (coffset & 3) { /* offset must be 32-bit aligned */
1100 DPRINTF(("%s: coffset %u misaligned\n",
1101 __func__, coffset));
1102 safestats.st_coffmisaligned++;
1103 err = EINVAL;
1104 goto errout;
1105 }
1106 coffset >>= 2;
1107 if (coffset > 255) { /* offset must be <256 dwords */
1108 DPRINTF(("%s: coffset %u too big\n",
1109 __func__, coffset));
1110 safestats.st_cofftoobig++;
1111 err = EINVAL;
1112 goto errout;
1113 }
1114 /*
1115 * Tell the hardware to copy the header to the output.
1116 * The header is defined as the data from the end of
1117 * the bypass to the start of data to be encrypted.
1118 * Typically this is the inline IV. Note that you need
1119 * to do this even if src+dst are the same; it appears
1120 * that w/o this bit the crypted data is written
1121 * immediately after the bypass data.
1122 */
1123 cmd1 |= SAFE_SA_CMD1_HDRCOPY;
1124 /*
1125 * Disable IP header mutable bit handling. This is
1126 * needed to get correct HMAC calculations.
1127 */
1128 cmd1 |= SAFE_SA_CMD1_MUTABLE;
1129 } else {
1130 if (enccrd) {
1131 bypass = enccrd->crd_skip;
1132 oplen = bypass + enccrd->crd_len;
1133 } else {
1134 bypass = maccrd->crd_skip;
1135 oplen = bypass + maccrd->crd_len;
1136 }
1137 coffset = 0;
1138 }
1139 /* XXX verify multiple of 4 when using s/g */
1140 if (bypass > 96) { /* bypass offset must be <= 96 bytes */
1141 DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
1142 safestats.st_bypasstoobig++;
1143 err = EINVAL;
1144 goto errout;
1145 }
1146
1147 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) {
1148 safestats.st_nomap++;
1149 err = ENOMEM;
1150 goto errout;
1151 }
1152 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1153 if (bus_dmamap_load_mbuf(sc->sc_srcdmat, re->re_src_map,
1154 re->re_src_m, safe_op_cb,
1155 &re->re_src, BUS_DMA_NOWAIT) != 0) {
1156 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1157 re->re_src_map = NULL;
1158 safestats.st_noload++;
1159 err = ENOMEM;
1160 goto errout;
1161 }
1162 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1163 if (bus_dmamap_load_uio(sc->sc_srcdmat, re->re_src_map,
1164 re->re_src_io, safe_op_cb,
1165 &re->re_src, BUS_DMA_NOWAIT) != 0) {
1166 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1167 re->re_src_map = NULL;
1168 safestats.st_noload++;
1169 err = ENOMEM;
1170 goto errout;
1171 }
1172 }
1173 nicealign = safe_dmamap_aligned(&re->re_src);
1174 uniform = safe_dmamap_uniform(&re->re_src);
1175
1176 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
1177 nicealign, uniform, re->re_src.nsegs));
1178 if (re->re_src.nsegs > 1) {
1179 re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
1180 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
1181 for (i = 0; i < re->re_src_nsegs; i++) {
1182 /* NB: no need to check if there's space */
1183 pd = sc->sc_spfree;
1184 if (++(sc->sc_spfree) == sc->sc_springtop)
1185 sc->sc_spfree = sc->sc_spring;
1186
1187 KASSERT((pd->pd_flags&3) == 0 ||
1188 (pd->pd_flags&3) == SAFE_PD_DONE,
1189 ("bogus source particle descriptor; flags %x",
1190 pd->pd_flags));
1191 pd->pd_addr = re->re_src_segs[i].ds_addr;
1192 pd->pd_size = re->re_src_segs[i].ds_len;
1193 pd->pd_flags = SAFE_PD_READY;
1194 }
1195 cmd0 |= SAFE_SA_CMD0_IGATHER;
1196 } else {
1197 /*
1198 * No need for gather, reference the operand directly.
1199 */
1200 re->re_desc.d_src = re->re_src_segs[0].ds_addr;
1201 }
1202
1203 if (enccrd == NULL && maccrd != NULL) {
1204 /*
1205 * Hash op; no destination needed.
1206 */
1207 } else {
1208 if (crp->crp_flags & CRYPTO_F_IOV) {
1209 if (!nicealign) {
1210 safestats.st_iovmisaligned++;
1211 err = EINVAL;
1212 goto errout;
1213 }
1214 if (uniform != 1) {
1215 /*
1216 * Source is not suitable for direct use as
1217 * the destination. Create a new scatter/gather
1218 * list based on the destination requirements
1219 * and check if that's ok.
1220 */
1221 if (bus_dmamap_create(sc->sc_dstdmat,
1222 BUS_DMA_NOWAIT, &re->re_dst_map)) {
1223 safestats.st_nomap++;
1224 err = ENOMEM;
1225 goto errout;
1226 }
1227 if (bus_dmamap_load_uio(sc->sc_dstdmat,
1228 re->re_dst_map, re->re_dst_io,
1229 safe_op_cb, &re->re_dst,
1230 BUS_DMA_NOWAIT) != 0) {
1231 bus_dmamap_destroy(sc->sc_dstdmat,
1232 re->re_dst_map);
1233 re->re_dst_map = NULL;
1234 safestats.st_noload++;
1235 err = ENOMEM;
1236 goto errout;
1237 }
1238 uniform = safe_dmamap_uniform(&re->re_dst);
1239 if (!uniform) {
1240 /*
1241 * There's no way to handle the DMA
1242 * requirements with this uio. We
1243 * could create a separate DMA area for
1244 * the result and then copy it back,
1245 * but for now we just bail and return
1246 * an error. Note that uio requests
1247 * > SAFE_MAX_DSIZE are handled because
1248 * the DMA map and segment list for the
1249 * destination wil result in a
1250 * destination particle list that does
1251 * the necessary scatter DMA.
1252 */
1253 safestats.st_iovnotuniform++;
1254 err = EINVAL;
1255 goto errout;
1256 }
1257 } else
1258 re->re_dst = re->re_src;
1259 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1260 if (nicealign && uniform == 1) {
1261 /*
1262 * Source layout is suitable for direct
1263 * sharing of the DMA map and segment list.
1264 */
1265 re->re_dst = re->re_src;
1266 } else if (nicealign && uniform == 2) {
1267 /*
1268 * The source is properly aligned but requires a
1269 * different particle list to handle DMA of the
1270 * result. Create a new map and do the load to
1271 * create the segment list. The particle
1272 * descriptor setup code below will handle the
1273 * rest.
1274 */
1275 if (bus_dmamap_create(sc->sc_dstdmat,
1276 BUS_DMA_NOWAIT, &re->re_dst_map)) {
1277 safestats.st_nomap++;
1278 err = ENOMEM;
1279 goto errout;
1280 }
1281 if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
1282 re->re_dst_map, re->re_dst_m,
1283 safe_op_cb, &re->re_dst,
1284 BUS_DMA_NOWAIT) != 0) {
1285 bus_dmamap_destroy(sc->sc_dstdmat,
1286 re->re_dst_map);
1287 re->re_dst_map = NULL;
1288 safestats.st_noload++;
1289 err = ENOMEM;
1290 goto errout;
1291 }
1292 } else { /* !(aligned and/or uniform) */
1293 int totlen, len;
1294 struct mbuf *m, *top, **mp;
1295
1296 /*
1297 * DMA constraints require that we allocate a
1298 * new mbuf chain for the destination. We
1299 * allocate an entire new set of mbufs of
1300 * optimal/required size and then tell the
1301 * hardware to copy any bits that are not
1302 * created as a byproduct of the operation.
1303 */
1304 if (!nicealign)
1305 safestats.st_unaligned++;
1306 if (!uniform)
1307 safestats.st_notuniform++;
1308 totlen = re->re_src_mapsize;
1309 if (re->re_src_m->m_flags & M_PKTHDR) {
1310 len = MHLEN;
1311 MGETHDR(m, M_DONTWAIT, MT_DATA);
1312 if (m && !m_dup_pkthdr(m, re->re_src_m,
1313 M_DONTWAIT)) {
1314 m_free(m);
1315 m = NULL;
1316 }
1317 } else {
1318 len = MLEN;
1319 MGET(m, M_DONTWAIT, MT_DATA);
1320 }
1321 if (m == NULL) {
1322 safestats.st_nombuf++;
1323 err = sc->sc_nqchip ? ERESTART : ENOMEM;
1324 goto errout;
1325 }
1326 if (totlen >= MINCLSIZE) {
1327 MCLGET(m, M_DONTWAIT);
1328 if ((m->m_flags & M_EXT) == 0) {
1329 m_free(m);
1330 safestats.st_nomcl++;
1331 err = sc->sc_nqchip ?
1332 ERESTART : ENOMEM;
1333 goto errout;
1334 }
1335 len = MCLBYTES;
1336 }
1337 m->m_len = len;
1338 top = NULL;
1339 mp = ⊤
1340
1341 while (totlen > 0) {
1342 if (top) {
1343 MGET(m, M_DONTWAIT, MT_DATA);
1344 if (m == NULL) {
1345 m_freem(top);
1346 safestats.st_nombuf++;
1347 err = sc->sc_nqchip ?
1348 ERESTART : ENOMEM;
1349 goto errout;
1350 }
1351 len = MLEN;
1352 }
1353 if (top && totlen >= MINCLSIZE) {
1354 MCLGET(m, M_DONTWAIT);
1355 if ((m->m_flags & M_EXT) == 0) {
1356 *mp = m;
1357 m_freem(top);
1358 safestats.st_nomcl++;
1359 err = sc->sc_nqchip ?
1360 ERESTART : ENOMEM;
1361 goto errout;
1362 }
1363 len = MCLBYTES;
1364 }
1365 m->m_len = len = min(totlen, len);
1366 totlen -= len;
1367 *mp = m;
1368 mp = &m->m_next;
1369 }
1370 re->re_dst_m = top;
1371 if (bus_dmamap_create(sc->sc_dstdmat,
1372 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) {
1373 safestats.st_nomap++;
1374 err = ENOMEM;
1375 goto errout;
1376 }
1377 if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
1378 re->re_dst_map, re->re_dst_m,
1379 safe_op_cb, &re->re_dst,
1380 BUS_DMA_NOWAIT) != 0) {
1381 bus_dmamap_destroy(sc->sc_dstdmat,
1382 re->re_dst_map);
1383 re->re_dst_map = NULL;
1384 safestats.st_noload++;
1385 err = ENOMEM;
1386 goto errout;
1387 }
1388 if (re->re_src.mapsize > oplen) {
1389 /*
1390 * There's data following what the
1391 * hardware will copy for us. If this
1392 * isn't just the ICV (that's going to
1393 * be written on completion), copy it
1394 * to the new mbufs
1395 */
1396 if (!(maccrd &&
1397 (re->re_src.mapsize-oplen) == 12 &&
1398 maccrd->crd_inject == oplen))
1399 safe_mcopy(re->re_src_m,
1400 re->re_dst_m,
1401 oplen);
1402 else
1403 safestats.st_noicvcopy++;
1404 }
1405 }
1406 } else {
1407 safestats.st_badflags++;
1408 err = EINVAL;
1409 goto errout;
1410 }
1411
1412 if (re->re_dst.nsegs > 1) {
1413 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
1414 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
1415 for (i = 0; i < re->re_dst_nsegs; i++) {
1416 pd = sc->sc_dpfree;
1417 KASSERT((pd->pd_flags&3) == 0 ||
1418 (pd->pd_flags&3) == SAFE_PD_DONE,
1419 ("bogus dest particle descriptor; flags %x",
1420 pd->pd_flags));
1421 if (++(sc->sc_dpfree) == sc->sc_dpringtop)
1422 sc->sc_dpfree = sc->sc_dpring;
1423 pd->pd_addr = re->re_dst_segs[i].ds_addr;
1424 pd->pd_flags = SAFE_PD_READY;
1425 }
1426 cmd0 |= SAFE_SA_CMD0_OSCATTER;
1427 } else {
1428 /*
1429 * No need for scatter, reference the operand directly.
1430 */
1431 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
1432 }
1433 }
1434
1435 /*
1436 * All done with setup; fillin the SA command words
1437 * and the packet engine descriptor. The operation
1438 * is now ready for submission to the hardware.
1439 */
1440 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
1441 sa->sa_cmd1 = cmd1
1442 | (coffset << SAFE_SA_CMD1_OFFSET_S)
1443 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
1444 | SAFE_SA_CMD1_SRPCI
1445 ;
1446 /*
1447 * NB: the order of writes is important here. In case the
1448 * chip is scanning the ring because of an outstanding request
1449 * it might nab this one too. In that case we need to make
1450 * sure the setup is complete before we write the length
1451 * field of the descriptor as it signals the descriptor is
1452 * ready for processing.
1453 */
1454 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
1455 if (maccrd)
1456 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
1457 re->re_desc.d_len = oplen
1458 | SAFE_PE_LEN_READY
1459 | (bypass << SAFE_PE_LEN_BYPASS_S)
1460 ;
1461
1462 safestats.st_ipackets++;
1463 safestats.st_ibytes += oplen;
1464
1465 if (++(sc->sc_front) == sc->sc_ringtop)
1466 sc->sc_front = sc->sc_ring;
1467
1468 /* XXX honor batching */
1469 safe_feed(sc, re);
1470 mtx_unlock(&sc->sc_ringmtx);
1471 return (0);
1472
1473 errout:
1474 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
1475 m_freem(re->re_dst_m);
1476
1477 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1478 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1479 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1480 }
1481 if (re->re_src_map != NULL) {
1482 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1483 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1484 }
1485 mtx_unlock(&sc->sc_ringmtx);
1486 if (err != ERESTART) {
1487 crp->crp_etype = err;
1488 crypto_done(crp);
1489 } else {
1490 sc->sc_needwakeup |= CRYPTO_SYMQ;
1491 }
1492 return (err);
1493 }
1494
1495 static void
1496 safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
1497 {
1498 struct cryptop *crp = (struct cryptop *)re->re_crp;
1499 struct cryptodesc *crd;
1500
1501 safestats.st_opackets++;
1502 safestats.st_obytes += re->re_dst.mapsize;
1503
1504 safe_dma_sync(&sc->sc_ringalloc,
1505 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1506 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
1507 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1508 re->re_desc.d_csr,
1509 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
1510 safestats.st_peoperr++;
1511 crp->crp_etype = EIO; /* something more meaningful? */
1512 }
1513 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1514 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
1515 BUS_DMASYNC_POSTREAD);
1516 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1517 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1518 }
1519 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE);
1520 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1521 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1522
1523 /*
1524 * If result was written to a differet mbuf chain, swap
1525 * it in as the return value and reclaim the original.
1526 */
1527 if ((crp->crp_flags & CRYPTO_F_IMBUF) && re->re_src_m != re->re_dst_m) {
1528 m_freem(re->re_src_m);
1529 crp->crp_buf = (caddr_t)re->re_dst_m;
1530 }
1531
1532 if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
1533 /* copy out IV for future use */
1534 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1535 int ivsize;
1536
1537 if (crd->crd_alg == CRYPTO_DES_CBC ||
1538 crd->crd_alg == CRYPTO_3DES_CBC) {
1539 ivsize = 2*sizeof(u_int32_t);
1540 } else if (crd->crd_alg == CRYPTO_AES_CBC) {
1541 ivsize = 4*sizeof(u_int32_t);
1542 } else
1543 continue;
1544 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1545 m_copydata((struct mbuf *)crp->crp_buf,
1546 crd->crd_skip + crd->crd_len - ivsize,
1547 ivsize,
1548 (caddr_t) sc->sc_sessions[re->re_sesn].ses_iv);
1549 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1550 cuio_copydata((struct uio *)crp->crp_buf,
1551 crd->crd_skip + crd->crd_len - ivsize,
1552 ivsize,
1553 (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
1554 }
1555 break;
1556 }
1557 }
1558
1559 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
1560 /* copy out ICV result */
1561 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1562 if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
1563 crd->crd_alg == CRYPTO_SHA1_HMAC ||
1564 crd->crd_alg == CRYPTO_NULL_HMAC))
1565 continue;
1566 if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
1567 /*
1568 * SHA-1 ICV's are byte-swapped; fix 'em up
1569 * before copy them to their destination.
1570 */
1571 bswap32(re->re_sastate.sa_saved_indigest[0]);
1572 bswap32(re->re_sastate.sa_saved_indigest[1]);
1573 bswap32(re->re_sastate.sa_saved_indigest[2]);
1574 }
1575 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1576 m_copyback((struct mbuf *)crp->crp_buf,
1577 crd->crd_inject, 12,
1578 (caddr_t)re->re_sastate.sa_saved_indigest);
1579 } else if (crp->crp_flags & CRYPTO_F_IOV && crp->crp_mac) {
1580 bcopy((caddr_t)re->re_sastate.sa_saved_indigest,
1581 crp->crp_mac, 12);
1582 }
1583 break;
1584 }
1585 }
1586 crypto_done(crp);
1587 }
1588
1589 /*
1590 * Copy all data past offset from srcm to dstm.
1591 */
1592 static void
1593 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset)
1594 {
1595 u_int j, dlen, slen;
1596 caddr_t dptr, sptr;
1597
1598 /*
1599 * Advance src and dst to offset.
1600 */
1601 j = offset;
1602 while (j >= 0) {
1603 if (srcm->m_len > j)
1604 break;
1605 j -= srcm->m_len;
1606 srcm = srcm->m_next;
1607 if (srcm == NULL)
1608 return;
1609 }
1610 sptr = mtod(srcm, caddr_t) + j;
1611 slen = srcm->m_len - j;
1612
1613 j = offset;
1614 while (j >= 0) {
1615 if (dstm->m_len > j)
1616 break;
1617 j -= dstm->m_len;
1618 dstm = dstm->m_next;
1619 if (dstm == NULL)
1620 return;
1621 }
1622 dptr = mtod(dstm, caddr_t) + j;
1623 dlen = dstm->m_len - j;
1624
1625 /*
1626 * Copy everything that remains.
1627 */
1628 for (;;) {
1629 j = min(slen, dlen);
1630 bcopy(sptr, dptr, j);
1631 if (slen == j) {
1632 srcm = srcm->m_next;
1633 if (srcm == NULL)
1634 return;
1635 sptr = srcm->m_data;
1636 slen = srcm->m_len;
1637 } else
1638 sptr += j, slen -= j;
1639 if (dlen == j) {
1640 dstm = dstm->m_next;
1641 if (dstm == NULL)
1642 return;
1643 dptr = dstm->m_data;
1644 dlen = dstm->m_len;
1645 } else
1646 dptr += j, dlen -= j;
1647 }
1648 }
1649
1650 #ifndef SAFE_NO_RNG
1651 #define SAFE_RNG_MAXWAIT 1000
1652
1653 static void
1654 safe_rng_init(struct safe_softc *sc)
1655 {
1656 u_int32_t w, v;
1657 int i;
1658
1659 WRITE_REG(sc, SAFE_RNG_CTRL, 0);
1660 /* use default value according to the manual */
1661 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
1662 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1663
1664 /*
1665 * There is a bug in rev 1.0 of the 1140 that when the RNG
1666 * is brought out of reset the ready status flag does not
1667 * work until the RNG has finished its internal initialization.
1668 *
1669 * So in order to determine the device is through its
1670 * initialization we must read the data register, using the
1671 * status reg in the read in case it is initialized. Then read
1672 * the data register until it changes from the first read.
1673 * Once it changes read the data register until it changes
1674 * again. At this time the RNG is considered initialized.
1675 * This could take between 750ms - 1000ms in time.
1676 */
1677 i = 0;
1678 w = READ_REG(sc, SAFE_RNG_OUT);
1679 do {
1680 v = READ_REG(sc, SAFE_RNG_OUT);
1681 if (v != w) {
1682 w = v;
1683 break;
1684 }
1685 DELAY(10);
1686 } while (++i < SAFE_RNG_MAXWAIT);
1687
1688 /* Wait Until data changes again */
1689 i = 0;
1690 do {
1691 v = READ_REG(sc, SAFE_RNG_OUT);
1692 if (v != w)
1693 break;
1694 DELAY(10);
1695 } while (++i < SAFE_RNG_MAXWAIT);
1696 }
1697
1698 static __inline void
1699 safe_rng_disable_short_cycle(struct safe_softc *sc)
1700 {
1701 WRITE_REG(sc, SAFE_RNG_CTRL,
1702 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
1703 }
1704
1705 static __inline void
1706 safe_rng_enable_short_cycle(struct safe_softc *sc)
1707 {
1708 WRITE_REG(sc, SAFE_RNG_CTRL,
1709 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
1710 }
1711
1712 static __inline u_int32_t
1713 safe_rng_read(struct safe_softc *sc)
1714 {
1715 int i;
1716
1717 i = 0;
1718 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
1719 ;
1720 return READ_REG(sc, SAFE_RNG_OUT);
1721 }
1722
1723 static void
1724 safe_rng(void *arg)
1725 {
1726 struct safe_softc *sc = arg;
1727 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */
1728 u_int maxwords;
1729 int i;
1730
1731 safestats.st_rng++;
1732 /*
1733 * Fetch the next block of data.
1734 */
1735 maxwords = safe_rngbufsize;
1736 if (maxwords > SAFE_RNG_MAXBUFSIZ)
1737 maxwords = SAFE_RNG_MAXBUFSIZ;
1738 retry:
1739 for (i = 0; i < maxwords; i++)
1740 buf[i] = safe_rng_read(sc);
1741 /*
1742 * Check the comparator alarm count and reset the h/w if
1743 * it exceeds our threshold. This guards against the
1744 * hardware oscillators resonating with external signals.
1745 */
1746 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
1747 u_int32_t freq_inc, w;
1748
1749 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
1750 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
1751 safestats.st_rngalarm++;
1752 safe_rng_enable_short_cycle(sc);
1753 freq_inc = 18;
1754 for (i = 0; i < 64; i++) {
1755 w = READ_REG(sc, SAFE_RNG_CNFG);
1756 freq_inc = ((w + freq_inc) & 0x3fL);
1757 w = ((w & ~0x3fL) | freq_inc);
1758 WRITE_REG(sc, SAFE_RNG_CNFG, w);
1759
1760 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1761
1762 (void) safe_rng_read(sc);
1763 DELAY(25);
1764
1765 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
1766 safe_rng_disable_short_cycle(sc);
1767 goto retry;
1768 }
1769 freq_inc = 1;
1770 }
1771 safe_rng_disable_short_cycle(sc);
1772 } else
1773 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1774
1775 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t));
1776 callout_reset(&sc->sc_rngto,
1777 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc);
1778 }
1779 #endif /* SAFE_NO_RNG */
1780
1781 static void
1782 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1783 {
1784 bus_addr_t *paddr = (bus_addr_t*) arg;
1785 *paddr = segs->ds_addr;
1786 }
1787
1788 static int
1789 safe_dma_malloc(
1790 struct safe_softc *sc,
1791 bus_size_t size,
1792 struct safe_dma_alloc *dma,
1793 int mapflags
1794 )
1795 {
1796 int r;
1797
1798 r = bus_dma_tag_create(NULL, /* parent */
1799 sizeof(u_int32_t), 0, /* alignment, bounds */
1800 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1801 BUS_SPACE_MAXADDR, /* highaddr */
1802 NULL, NULL, /* filter, filterarg */
1803 size, /* maxsize */
1804 1, /* nsegments */
1805 size, /* maxsegsize */
1806 BUS_DMA_ALLOCNOW, /* flags */
1807 NULL, NULL, /* locking */
1808 &dma->dma_tag);
1809 if (r != 0) {
1810 device_printf(sc->sc_dev, "safe_dma_malloc: "
1811 "bus_dma_tag_create failed; error %u\n", r);
1812 goto fail_0;
1813 }
1814
1815 r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map);
1816 if (r != 0) {
1817 device_printf(sc->sc_dev, "safe_dma_malloc: "
1818 "bus_dmamap_create failed; error %u\n", r);
1819 goto fail_1;
1820 }
1821
1822 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1823 BUS_DMA_NOWAIT, &dma->dma_map);
1824 if (r != 0) {
1825 device_printf(sc->sc_dev, "safe_dma_malloc: "
1826 "bus_dmammem_alloc failed; size %zu, error %u\n",
1827 size, r);
1828 goto fail_2;
1829 }
1830
1831 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1832 size,
1833 safe_dmamap_cb,
1834 &dma->dma_paddr,
1835 mapflags | BUS_DMA_NOWAIT);
1836 if (r != 0) {
1837 device_printf(sc->sc_dev, "safe_dma_malloc: "
1838 "bus_dmamap_load failed; error %u\n", r);
1839 goto fail_3;
1840 }
1841
1842 dma->dma_size = size;
1843 return (0);
1844
1845 fail_3:
1846 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1847 fail_2:
1848 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1849 fail_1:
1850 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1851 bus_dma_tag_destroy(dma->dma_tag);
1852 fail_0:
1853 dma->dma_map = NULL;
1854 dma->dma_tag = NULL;
1855 return (r);
1856 }
1857
1858 static void
1859 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma)
1860 {
1861 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1862 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1863 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1864 bus_dma_tag_destroy(dma->dma_tag);
1865 }
1866
1867 /*
1868 * Resets the board. Values in the regesters are left as is
1869 * from the reset (i.e. initial values are assigned elsewhere).
1870 */
1871 static void
1872 safe_reset_board(struct safe_softc *sc)
1873 {
1874 u_int32_t v;
1875 /*
1876 * Reset the device. The manual says no delay
1877 * is needed between marking and clearing reset.
1878 */
1879 v = READ_REG(sc, SAFE_PE_DMACFG) &~
1880 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
1881 SAFE_PE_DMACFG_SGRESET);
1882 WRITE_REG(sc, SAFE_PE_DMACFG, v
1883 | SAFE_PE_DMACFG_PERESET
1884 | SAFE_PE_DMACFG_PDRRESET
1885 | SAFE_PE_DMACFG_SGRESET);
1886 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1887 }
1888
1889 /*
1890 * Initialize registers we need to touch only once.
1891 */
1892 static void
1893 safe_init_board(struct safe_softc *sc)
1894 {
1895 u_int32_t v, dwords;
1896
1897 v = READ_REG(sc, SAFE_PE_DMACFG);;
1898 v &=~ SAFE_PE_DMACFG_PEMODE;
1899 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
1900 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
1901 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
1902 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
1903 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
1904 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
1905 ;
1906 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1907 #if 0
1908 /* XXX select byte swap based on host byte order */
1909 WRITE_REG(sc, SAFE_ENDIAN, 0x1b);
1910 #endif
1911 if (sc->sc_chiprev == SAFE_REV(1,0)) {
1912 /*
1913 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
1914 * "target mode transfers" done while the chip is DMA'ing
1915 * >1020 bytes cause the hardware to lockup. To avoid this
1916 * we reduce the max PCI transfer size and use small source
1917 * particle descriptors (<= 256 bytes).
1918 */
1919 WRITE_REG(sc, SAFE_DMA_CFG, 256);
1920 device_printf(sc->sc_dev,
1921 "Reduce max DMA size to %u words for rev %u.%u WAR\n",
1922 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff,
1923 SAFE_REV_MAJ(sc->sc_chiprev),
1924 SAFE_REV_MIN(sc->sc_chiprev));
1925 }
1926
1927 /* NB: operands+results are overlaid */
1928 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
1929 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
1930 /*
1931 * Configure ring entry size and number of items in the ring.
1932 */
1933 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
1934 ("PE ring entry not 32-bit aligned!"));
1935 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
1936 WRITE_REG(sc, SAFE_PE_RINGCFG,
1937 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
1938 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
1939
1940 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
1941 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
1942 WRITE_REG(sc, SAFE_PE_PARTSIZE,
1943 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
1944 /*
1945 * NB: destination particles are fixed size. We use
1946 * an mbuf cluster and require all results go to
1947 * clusters or smaller.
1948 */
1949 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE);
1950
1951 /* it's now safe to enable PE mode, do it */
1952 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
1953
1954 /*
1955 * Configure hardware to use level-triggered interrupts and
1956 * to interrupt after each descriptor is processed.
1957 */
1958 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
1959 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
1960 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
1961 }
1962
1963 /*
1964 * Init PCI registers
1965 */
1966 static void
1967 safe_init_pciregs(device_t dev)
1968 {
1969 }
1970
1971 /*
1972 * Clean up after a chip crash.
1973 * It is assumed that the caller in splimp()
1974 */
1975 static void
1976 safe_cleanchip(struct safe_softc *sc)
1977 {
1978
1979 if (sc->sc_nqchip != 0) {
1980 struct safe_ringentry *re = sc->sc_back;
1981
1982 while (re != sc->sc_front) {
1983 if (re->re_desc.d_csr != 0)
1984 safe_free_entry(sc, re);
1985 if (++re == sc->sc_ringtop)
1986 re = sc->sc_ring;
1987 }
1988 sc->sc_back = re;
1989 sc->sc_nqchip = 0;
1990 }
1991 }
1992
1993 /*
1994 * free a safe_q
1995 * It is assumed that the caller is within splimp().
1996 */
1997 static int
1998 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
1999 {
2000 struct cryptop *crp;
2001
2002 /*
2003 * Free header MCR
2004 */
2005 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
2006 m_freem(re->re_dst_m);
2007
2008 crp = (struct cryptop *)re->re_crp;
2009
2010 re->re_desc.d_csr = 0;
2011
2012 crp->crp_etype = EFAULT;
2013 crypto_done(crp);
2014 return(0);
2015 }
2016
2017 /*
2018 * Routine to reset the chip and clean up.
2019 * It is assumed that the caller is in splimp()
2020 */
2021 static void
2022 safe_totalreset(struct safe_softc *sc)
2023 {
2024 safe_reset_board(sc);
2025 safe_init_board(sc);
2026 safe_cleanchip(sc);
2027 }
2028
2029 /*
2030 * Is the operand suitable aligned for direct DMA. Each
2031 * segment must be aligned on a 32-bit boundary and all
2032 * but the last segment must be a multiple of 4 bytes.
2033 */
2034 static int
2035 safe_dmamap_aligned(const struct safe_operand *op)
2036 {
2037 int i;
2038
2039 for (i = 0; i < op->nsegs; i++) {
2040 if (op->segs[i].ds_addr & 3)
2041 return (0);
2042 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
2043 return (0);
2044 }
2045 return (1);
2046 }
2047
2048 /*
2049 * Is the operand suitable for direct DMA as the destination
2050 * of an operation. The hardware requires that each ``particle''
2051 * but the last in an operation result have the same size. We
2052 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
2053 * 0 if some segment is not a multiple of of this size, 1 if all
2054 * segments are exactly this size, or 2 if segments are at worst
2055 * a multple of this size.
2056 */
2057 static int
2058 safe_dmamap_uniform(const struct safe_operand *op)
2059 {
2060 int result = 1;
2061
2062 if (op->nsegs > 0) {
2063 int i;
2064
2065 for (i = 0; i < op->nsegs-1; i++) {
2066 if (op->segs[i].ds_len % SAFE_MAX_DSIZE)
2067 return (0);
2068 if (op->segs[i].ds_len != SAFE_MAX_DSIZE)
2069 result = 2;
2070 }
2071 }
2072 return (result);
2073 }
2074
2075 #ifdef SAFE_DEBUG
2076 static void
2077 safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
2078 {
2079 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
2080 , tag
2081 , READ_REG(sc, SAFE_DMA_ENDIAN)
2082 , READ_REG(sc, SAFE_DMA_SRCADDR)
2083 , READ_REG(sc, SAFE_DMA_DSTADDR)
2084 , READ_REG(sc, SAFE_DMA_STAT)
2085 );
2086 }
2087
2088 static void
2089 safe_dump_intrstate(struct safe_softc *sc, const char *tag)
2090 {
2091 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
2092 , tag
2093 , READ_REG(sc, SAFE_HI_CFG)
2094 , READ_REG(sc, SAFE_HI_MASK)
2095 , READ_REG(sc, SAFE_HI_DESC_CNT)
2096 , READ_REG(sc, SAFE_HU_STAT)
2097 , READ_REG(sc, SAFE_HM_STAT)
2098 );
2099 }
2100
2101 static void
2102 safe_dump_ringstate(struct safe_softc *sc, const char *tag)
2103 {
2104 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
2105
2106 /* NB: assume caller has lock on ring */
2107 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
2108 tag,
2109 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
2110 (unsigned long)(sc->sc_back - sc->sc_ring),
2111 (unsigned long)(sc->sc_front - sc->sc_ring));
2112 }
2113
2114 static void
2115 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
2116 {
2117 int ix, nsegs;
2118
2119 ix = re - sc->sc_ring;
2120 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
2121 , tag
2122 , re, ix
2123 , re->re_desc.d_csr
2124 , re->re_desc.d_src
2125 , re->re_desc.d_dst
2126 , re->re_desc.d_sa
2127 , re->re_desc.d_len
2128 );
2129 if (re->re_src.nsegs > 1) {
2130 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
2131 sizeof(struct safe_pdesc);
2132 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
2133 printf(" spd[%u] %p: %p size %u flags %x"
2134 , ix, &sc->sc_spring[ix]
2135 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
2136 , sc->sc_spring[ix].pd_size
2137 , sc->sc_spring[ix].pd_flags
2138 );
2139 if (sc->sc_spring[ix].pd_size == 0)
2140 printf(" (zero!)");
2141 printf("\n");
2142 if (++ix == SAFE_TOTAL_SPART)
2143 ix = 0;
2144 }
2145 }
2146 if (re->re_dst.nsegs > 1) {
2147 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
2148 sizeof(struct safe_pdesc);
2149 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
2150 printf(" dpd[%u] %p: %p flags %x\n"
2151 , ix, &sc->sc_dpring[ix]
2152 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
2153 , sc->sc_dpring[ix].pd_flags
2154 );
2155 if (++ix == SAFE_TOTAL_DPART)
2156 ix = 0;
2157 }
2158 }
2159 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
2160 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
2161 printf("sa: key %x %x %x %x %x %x %x %x\n"
2162 , re->re_sa.sa_key[0]
2163 , re->re_sa.sa_key[1]
2164 , re->re_sa.sa_key[2]
2165 , re->re_sa.sa_key[3]
2166 , re->re_sa.sa_key[4]
2167 , re->re_sa.sa_key[5]
2168 , re->re_sa.sa_key[6]
2169 , re->re_sa.sa_key[7]
2170 );
2171 printf("sa: indigest %x %x %x %x %x\n"
2172 , re->re_sa.sa_indigest[0]
2173 , re->re_sa.sa_indigest[1]
2174 , re->re_sa.sa_indigest[2]
2175 , re->re_sa.sa_indigest[3]
2176 , re->re_sa.sa_indigest[4]
2177 );
2178 printf("sa: outdigest %x %x %x %x %x\n"
2179 , re->re_sa.sa_outdigest[0]
2180 , re->re_sa.sa_outdigest[1]
2181 , re->re_sa.sa_outdigest[2]
2182 , re->re_sa.sa_outdigest[3]
2183 , re->re_sa.sa_outdigest[4]
2184 );
2185 printf("sr: iv %x %x %x %x\n"
2186 , re->re_sastate.sa_saved_iv[0]
2187 , re->re_sastate.sa_saved_iv[1]
2188 , re->re_sastate.sa_saved_iv[2]
2189 , re->re_sastate.sa_saved_iv[3]
2190 );
2191 printf("sr: hashbc %u indigest %x %x %x %x %x\n"
2192 , re->re_sastate.sa_saved_hashbc
2193 , re->re_sastate.sa_saved_indigest[0]
2194 , re->re_sastate.sa_saved_indigest[1]
2195 , re->re_sastate.sa_saved_indigest[2]
2196 , re->re_sastate.sa_saved_indigest[3]
2197 , re->re_sastate.sa_saved_indigest[4]
2198 );
2199 }
2200
2201 static void
2202 safe_dump_ring(struct safe_softc *sc, const char *tag)
2203 {
2204 mtx_lock(&sc->sc_ringmtx);
2205 printf("\nSafeNet Ring State:\n");
2206 safe_dump_intrstate(sc, tag);
2207 safe_dump_dmastatus(sc, tag);
2208 safe_dump_ringstate(sc, tag);
2209 if (sc->sc_nqchip) {
2210 struct safe_ringentry *re = sc->sc_back;
2211 do {
2212 safe_dump_request(sc, tag, re);
2213 if (++re == sc->sc_ringtop)
2214 re = sc->sc_ring;
2215 } while (re != sc->sc_front);
2216 }
2217 mtx_unlock(&sc->sc_ringmtx);
2218 }
2219
2220 static int
2221 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS)
2222 {
2223 char dmode[64];
2224 int error;
2225
2226 strncpy(dmode, "", sizeof(dmode) - 1);
2227 dmode[sizeof(dmode) - 1] = '\0';
2228 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req);
2229
2230 if (error == 0 && req->newptr != NULL) {
2231 struct safe_softc *sc = safec;
2232
2233 if (!sc)
2234 return EINVAL;
2235 if (strncmp(dmode, "dma", 3) == 0)
2236 safe_dump_dmastatus(sc, "safe0");
2237 else if (strncmp(dmode, "int", 3) == 0)
2238 safe_dump_intrstate(sc, "safe0");
2239 else if (strncmp(dmode, "ring", 4) == 0)
2240 safe_dump_ring(sc, "safe0");
2241 else
2242 return EINVAL;
2243 }
2244 return error;
2245 }
2246 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, CTLTYPE_STRING | CTLFLAG_RW,
2247 0, 0, sysctl_hw_safe_dump, "A", "Dump driver state");
2248 #endif /* SAFE_DEBUG */
Cache object: f010b9b84bcd78f2416002ae79dd490d
|