FreeBSD/Linux Kernel Cross Reference
sys/dev/safe/safe.c
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2003 Sam Leffler, Errno Consulting
5 * Copyright (c) 2003 Global Technology Associates, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 /*
34 * SafeNet SafeXcel-1141 hardware crypto accelerator
35 */
36 #include "opt_safe.h"
37
38 #include <sys/param.h>
39 #include <sys/systm.h>
40 #include <sys/proc.h>
41 #include <sys/errno.h>
42 #include <sys/malloc.h>
43 #include <sys/kernel.h>
44 #include <sys/mbuf.h>
45 #include <sys/module.h>
46 #include <sys/lock.h>
47 #include <sys/mutex.h>
48 #include <sys/sysctl.h>
49 #include <sys/endian.h>
50 #include <sys/uio.h>
51
52 #include <vm/vm.h>
53 #include <vm/pmap.h>
54
55 #include <machine/bus.h>
56 #include <machine/resource.h>
57 #include <sys/bus.h>
58 #include <sys/rman.h>
59
60 #include <opencrypto/cryptodev.h>
61 #include <opencrypto/xform_auth.h>
62 #include <sys/random.h>
63 #include <sys/kobj.h>
64
65 #include "cryptodev_if.h"
66
67 #include <dev/pci/pcivar.h>
68 #include <dev/pci/pcireg.h>
69
70 #ifdef SAFE_RNDTEST
71 #include <dev/rndtest/rndtest.h>
72 #endif
73 #include <dev/safe/safereg.h>
74 #include <dev/safe/safevar.h>
75
76 #ifndef bswap32
77 #define bswap32 NTOHL
78 #endif
79
80 /*
81 * Prototypes and count for the pci_device structure
82 */
83 static int safe_probe(device_t);
84 static int safe_attach(device_t);
85 static int safe_detach(device_t);
86 static int safe_suspend(device_t);
87 static int safe_resume(device_t);
88 static int safe_shutdown(device_t);
89
90 static int safe_probesession(device_t, const struct crypto_session_params *);
91 static int safe_newsession(device_t, crypto_session_t,
92 const struct crypto_session_params *);
93 static int safe_process(device_t, struct cryptop *, int);
94
95 static device_method_t safe_methods[] = {
96 /* Device interface */
97 DEVMETHOD(device_probe, safe_probe),
98 DEVMETHOD(device_attach, safe_attach),
99 DEVMETHOD(device_detach, safe_detach),
100 DEVMETHOD(device_suspend, safe_suspend),
101 DEVMETHOD(device_resume, safe_resume),
102 DEVMETHOD(device_shutdown, safe_shutdown),
103
104 /* crypto device methods */
105 DEVMETHOD(cryptodev_probesession, safe_probesession),
106 DEVMETHOD(cryptodev_newsession, safe_newsession),
107 DEVMETHOD(cryptodev_process, safe_process),
108
109 DEVMETHOD_END
110 };
111
112 static driver_t safe_driver = {
113 "safe",
114 safe_methods,
115 sizeof (struct safe_softc)
116 };
117
118 DRIVER_MODULE(safe, pci, safe_driver, 0, 0);
119 MODULE_DEPEND(safe, crypto, 1, 1, 1);
120 #ifdef SAFE_RNDTEST
121 MODULE_DEPEND(safe, rndtest, 1, 1, 1);
122 #endif
123
124 static void safe_intr(void *);
125 static void safe_callback(struct safe_softc *, struct safe_ringentry *);
126 static void safe_feed(struct safe_softc *, struct safe_ringentry *);
127 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int);
128 #ifndef SAFE_NO_RNG
129 static void safe_rng_init(struct safe_softc *);
130 static void safe_rng(void *);
131 #endif /* SAFE_NO_RNG */
132 static int safe_dma_malloc(struct safe_softc *, bus_size_t,
133 struct safe_dma_alloc *, int);
134 #define safe_dma_sync(_dma, _flags) \
135 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags))
136 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *);
137 static int safe_dmamap_aligned(const struct safe_operand *);
138 static int safe_dmamap_uniform(const struct safe_operand *);
139
140 static void safe_reset_board(struct safe_softc *);
141 static void safe_init_board(struct safe_softc *);
142 static void safe_init_pciregs(device_t dev);
143 static void safe_cleanchip(struct safe_softc *);
144 static void safe_totalreset(struct safe_softc *);
145
146 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *);
147
148 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
149 "SafeNet driver parameters");
150
151 #ifdef SAFE_DEBUG
152 static void safe_dump_dmastatus(struct safe_softc *, const char *);
153 static void safe_dump_ringstate(struct safe_softc *, const char *);
154 static void safe_dump_intrstate(struct safe_softc *, const char *);
155 static void safe_dump_request(struct safe_softc *, const char *,
156 struct safe_ringentry *);
157
158 static struct safe_softc *safec; /* for use by hw.safe.dump */
159
160 static int safe_debug = 0;
161 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug,
162 0, "control debugging msgs");
163 #define DPRINTF(_x) if (safe_debug) printf _x
164 #else
165 #define DPRINTF(_x)
166 #endif
167
168 #define READ_REG(sc,r) \
169 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
170
171 #define WRITE_REG(sc,reg,val) \
172 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
173
174 struct safe_stats safestats;
175 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats,
176 safe_stats, "driver statistics");
177 #ifndef SAFE_NO_RNG
178 static int safe_rnginterval = 1; /* poll once a second */
179 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval,
180 0, "RNG polling interval (secs)");
181 static int safe_rngbufsize = 16; /* 64 bytes each poll */
182 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize,
183 0, "RNG polling buffer size (32-bit words)");
184 static int safe_rngmaxalarm = 8; /* max alarms before reset */
185 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm,
186 0, "RNG max alarms before reset");
187 #endif /* SAFE_NO_RNG */
188
189 static int
190 safe_probe(device_t dev)
191 {
192 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET &&
193 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL)
194 return (BUS_PROBE_DEFAULT);
195 return (ENXIO);
196 }
197
198 static const char*
199 safe_partname(struct safe_softc *sc)
200 {
201 /* XXX sprintf numbers when not decoded */
202 switch (pci_get_vendor(sc->sc_dev)) {
203 case PCI_VENDOR_SAFENET:
204 switch (pci_get_device(sc->sc_dev)) {
205 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141";
206 }
207 return "SafeNet unknown-part";
208 }
209 return "Unknown-vendor unknown-part";
210 }
211
212 #ifndef SAFE_NO_RNG
213 static void
214 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
215 {
216 /* MarkM: FIX!! Check that this does not swamp the harvester! */
217 random_harvest_queue(buf, count, RANDOM_PURE_SAFE);
218 }
219 #endif /* SAFE_NO_RNG */
220
221 static int
222 safe_attach(device_t dev)
223 {
224 struct safe_softc *sc = device_get_softc(dev);
225 u_int32_t raddr;
226 u_int32_t i;
227 int rid;
228
229 bzero(sc, sizeof (*sc));
230 sc->sc_dev = dev;
231
232 /* XXX handle power management */
233
234 pci_enable_busmaster(dev);
235
236 /*
237 * Setup memory-mapping of PCI registers.
238 */
239 rid = BS_BAR;
240 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
241 RF_ACTIVE);
242 if (sc->sc_sr == NULL) {
243 device_printf(dev, "cannot map register space\n");
244 goto bad;
245 }
246 sc->sc_st = rman_get_bustag(sc->sc_sr);
247 sc->sc_sh = rman_get_bushandle(sc->sc_sr);
248
249 /*
250 * Arrange interrupt line.
251 */
252 rid = 0;
253 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
254 RF_SHAREABLE|RF_ACTIVE);
255 if (sc->sc_irq == NULL) {
256 device_printf(dev, "could not map interrupt\n");
257 goto bad1;
258 }
259 /*
260 * NB: Network code assumes we are blocked with splimp()
261 * so make sure the IRQ is mapped appropriately.
262 */
263 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
264 NULL, safe_intr, sc, &sc->sc_ih)) {
265 device_printf(dev, "could not establish interrupt\n");
266 goto bad2;
267 }
268
269 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safe_session),
270 CRYPTOCAP_F_HARDWARE);
271 if (sc->sc_cid < 0) {
272 device_printf(dev, "could not get crypto driver id\n");
273 goto bad3;
274 }
275
276 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
277 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
278
279 /*
280 * Setup DMA descriptor area.
281 */
282 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
283 1, /* alignment */
284 SAFE_DMA_BOUNDARY, /* boundary */
285 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
286 BUS_SPACE_MAXADDR, /* highaddr */
287 NULL, NULL, /* filter, filterarg */
288 SAFE_MAX_DMA, /* maxsize */
289 SAFE_MAX_PART, /* nsegments */
290 SAFE_MAX_SSIZE, /* maxsegsize */
291 BUS_DMA_ALLOCNOW, /* flags */
292 NULL, NULL, /* locking */
293 &sc->sc_srcdmat)) {
294 device_printf(dev, "cannot allocate DMA tag\n");
295 goto bad4;
296 }
297 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
298 1, /* alignment */
299 SAFE_MAX_DSIZE, /* boundary */
300 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
301 BUS_SPACE_MAXADDR, /* highaddr */
302 NULL, NULL, /* filter, filterarg */
303 SAFE_MAX_DMA, /* maxsize */
304 SAFE_MAX_PART, /* nsegments */
305 SAFE_MAX_DSIZE, /* maxsegsize */
306 BUS_DMA_ALLOCNOW, /* flags */
307 NULL, NULL, /* locking */
308 &sc->sc_dstdmat)) {
309 device_printf(dev, "cannot allocate DMA tag\n");
310 goto bad4;
311 }
312
313 /*
314 * Allocate packet engine descriptors.
315 */
316 if (safe_dma_malloc(sc,
317 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
318 &sc->sc_ringalloc, 0)) {
319 device_printf(dev, "cannot allocate PE descriptor ring\n");
320 bus_dma_tag_destroy(sc->sc_srcdmat);
321 goto bad4;
322 }
323 /*
324 * Hookup the static portion of all our data structures.
325 */
326 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
327 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
328 sc->sc_front = sc->sc_ring;
329 sc->sc_back = sc->sc_ring;
330 raddr = sc->sc_ringalloc.dma_paddr;
331 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
332 for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
333 struct safe_ringentry *re = &sc->sc_ring[i];
334
335 re->re_desc.d_sa = raddr +
336 offsetof(struct safe_ringentry, re_sa);
337 re->re_sa.sa_staterec = raddr +
338 offsetof(struct safe_ringentry, re_sastate);
339
340 raddr += sizeof (struct safe_ringentry);
341 }
342 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev),
343 "packet engine ring", MTX_DEF);
344
345 /*
346 * Allocate scatter and gather particle descriptors.
347 */
348 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
349 &sc->sc_spalloc, 0)) {
350 device_printf(dev, "cannot allocate source particle "
351 "descriptor ring\n");
352 mtx_destroy(&sc->sc_ringmtx);
353 safe_dma_free(sc, &sc->sc_ringalloc);
354 bus_dma_tag_destroy(sc->sc_srcdmat);
355 goto bad4;
356 }
357 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
358 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
359 sc->sc_spfree = sc->sc_spring;
360 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
361
362 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
363 &sc->sc_dpalloc, 0)) {
364 device_printf(dev, "cannot allocate destination particle "
365 "descriptor ring\n");
366 mtx_destroy(&sc->sc_ringmtx);
367 safe_dma_free(sc, &sc->sc_spalloc);
368 safe_dma_free(sc, &sc->sc_ringalloc);
369 bus_dma_tag_destroy(sc->sc_dstdmat);
370 goto bad4;
371 }
372 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
373 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
374 sc->sc_dpfree = sc->sc_dpring;
375 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
376
377 device_printf(sc->sc_dev, "%s", safe_partname(sc));
378
379 sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO);
380 if (sc->sc_devinfo & SAFE_DEVINFO_RNG) {
381 sc->sc_flags |= SAFE_FLAGS_RNG;
382 printf(" rng");
383 }
384 if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) {
385 #if 0
386 printf(" key");
387 sc->sc_flags |= SAFE_FLAGS_KEY;
388 #endif
389 }
390 if (sc->sc_devinfo & SAFE_DEVINFO_DES) {
391 printf(" des/3des");
392 }
393 if (sc->sc_devinfo & SAFE_DEVINFO_AES) {
394 printf(" aes");
395 }
396 if (sc->sc_devinfo & SAFE_DEVINFO_MD5) {
397 printf(" md5");
398 }
399 if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) {
400 printf(" sha1");
401 }
402 /* XXX other supported algorithms */
403 printf("\n");
404
405 safe_reset_board(sc); /* reset h/w */
406 safe_init_pciregs(dev); /* init pci settings */
407 safe_init_board(sc); /* init h/w */
408
409 #ifndef SAFE_NO_RNG
410 if (sc->sc_flags & SAFE_FLAGS_RNG) {
411 #ifdef SAFE_RNDTEST
412 sc->sc_rndtest = rndtest_attach(dev);
413 if (sc->sc_rndtest)
414 sc->sc_harvest = rndtest_harvest;
415 else
416 sc->sc_harvest = default_harvest;
417 #else
418 sc->sc_harvest = default_harvest;
419 #endif
420 safe_rng_init(sc);
421
422 callout_init(&sc->sc_rngto, 1);
423 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc);
424 }
425 #endif /* SAFE_NO_RNG */
426 #ifdef SAFE_DEBUG
427 safec = sc; /* for use by hw.safe.dump */
428 #endif
429 return (0);
430 bad4:
431 crypto_unregister_all(sc->sc_cid);
432 bad3:
433 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
434 bad2:
435 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
436 bad1:
437 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
438 bad:
439 return (ENXIO);
440 }
441
442 /*
443 * Detach a device that successfully probed.
444 */
445 static int
446 safe_detach(device_t dev)
447 {
448 struct safe_softc *sc = device_get_softc(dev);
449
450 /* XXX wait/abort active ops */
451
452 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
453
454 callout_stop(&sc->sc_rngto);
455
456 crypto_unregister_all(sc->sc_cid);
457
458 #ifdef SAFE_RNDTEST
459 if (sc->sc_rndtest)
460 rndtest_detach(sc->sc_rndtest);
461 #endif
462
463 safe_cleanchip(sc);
464 safe_dma_free(sc, &sc->sc_dpalloc);
465 safe_dma_free(sc, &sc->sc_spalloc);
466 mtx_destroy(&sc->sc_ringmtx);
467 safe_dma_free(sc, &sc->sc_ringalloc);
468
469 bus_generic_detach(dev);
470 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
471 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
472
473 bus_dma_tag_destroy(sc->sc_srcdmat);
474 bus_dma_tag_destroy(sc->sc_dstdmat);
475 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
476
477 return (0);
478 }
479
480 /*
481 * Stop all chip i/o so that the kernel's probe routines don't
482 * get confused by errant DMAs when rebooting.
483 */
484 static int
485 safe_shutdown(device_t dev)
486 {
487 #ifdef notyet
488 safe_stop(device_get_softc(dev));
489 #endif
490 return (0);
491 }
492
493 /*
494 * Device suspend routine.
495 */
496 static int
497 safe_suspend(device_t dev)
498 {
499 struct safe_softc *sc = device_get_softc(dev);
500
501 #ifdef notyet
502 /* XXX stop the device and save PCI settings */
503 #endif
504 sc->sc_suspended = 1;
505
506 return (0);
507 }
508
509 static int
510 safe_resume(device_t dev)
511 {
512 struct safe_softc *sc = device_get_softc(dev);
513
514 #ifdef notyet
515 /* XXX retore PCI settings and start the device */
516 #endif
517 sc->sc_suspended = 0;
518 return (0);
519 }
520
521 /*
522 * SafeXcel Interrupt routine
523 */
524 static void
525 safe_intr(void *arg)
526 {
527 struct safe_softc *sc = arg;
528 volatile u_int32_t stat;
529
530 stat = READ_REG(sc, SAFE_HM_STAT);
531 if (stat == 0) /* shared irq, not for us */
532 return;
533
534 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
535
536 if ((stat & SAFE_INT_PE_DDONE)) {
537 /*
538 * Descriptor(s) done; scan the ring and
539 * process completed operations.
540 */
541 mtx_lock(&sc->sc_ringmtx);
542 while (sc->sc_back != sc->sc_front) {
543 struct safe_ringentry *re = sc->sc_back;
544 #ifdef SAFE_DEBUG
545 if (safe_debug) {
546 safe_dump_ringstate(sc, __func__);
547 safe_dump_request(sc, __func__, re);
548 }
549 #endif
550 /*
551 * safe_process marks ring entries that were allocated
552 * but not used with a csr of zero. This insures the
553 * ring front pointer never needs to be set backwards
554 * in the event that an entry is allocated but not used
555 * because of a setup error.
556 */
557 if (re->re_desc.d_csr != 0) {
558 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr))
559 break;
560 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len))
561 break;
562 sc->sc_nqchip--;
563 safe_callback(sc, re);
564 }
565 if (++(sc->sc_back) == sc->sc_ringtop)
566 sc->sc_back = sc->sc_ring;
567 }
568 mtx_unlock(&sc->sc_ringmtx);
569 }
570
571 /*
572 * Check to see if we got any DMA Error
573 */
574 if (stat & SAFE_INT_PE_ERROR) {
575 DPRINTF(("dmaerr dmastat %08x\n",
576 READ_REG(sc, SAFE_PE_DMASTAT)));
577 safestats.st_dmaerr++;
578 safe_totalreset(sc);
579 #if 0
580 safe_feed(sc);
581 #endif
582 }
583
584 if (sc->sc_needwakeup) { /* XXX check high watermark */
585 int wakeup = sc->sc_needwakeup & CRYPTO_SYMQ;
586 DPRINTF(("%s: wakeup crypto %x\n", __func__,
587 sc->sc_needwakeup));
588 sc->sc_needwakeup &= ~wakeup;
589 crypto_unblock(sc->sc_cid, wakeup);
590 }
591 }
592
593 /*
594 * safe_feed() - post a request to chip
595 */
596 static void
597 safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
598 {
599 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE);
600 if (re->re_dst_map != NULL)
601 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
602 BUS_DMASYNC_PREREAD);
603 /* XXX have no smaller granularity */
604 safe_dma_sync(&sc->sc_ringalloc,
605 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
606 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE);
607 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE);
608
609 #ifdef SAFE_DEBUG
610 if (safe_debug) {
611 safe_dump_ringstate(sc, __func__);
612 safe_dump_request(sc, __func__, re);
613 }
614 #endif
615 sc->sc_nqchip++;
616 if (sc->sc_nqchip > safestats.st_maxqchip)
617 safestats.st_maxqchip = sc->sc_nqchip;
618 /* poke h/w to check descriptor ring, any value can be written */
619 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
620 }
621
622 #define N(a) (sizeof(a) / sizeof (a[0]))
623 static void
624 safe_setup_enckey(struct safe_session *ses, const void *key)
625 {
626 int i;
627
628 bcopy(key, ses->ses_key, ses->ses_klen);
629
630 /* PE is little-endian, insure proper byte order */
631 for (i = 0; i < N(ses->ses_key); i++)
632 ses->ses_key[i] = htole32(ses->ses_key[i]);
633 }
634
635 static void
636 safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key,
637 int klen)
638 {
639 SHA1_CTX sha1ctx;
640 int i;
641
642 hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx);
643 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
644
645 hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx);
646 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
647
648 explicit_bzero(&sha1ctx, sizeof(sha1ctx));
649
650 /* PE is little-endian, insure proper byte order */
651 for (i = 0; i < N(ses->ses_hminner); i++) {
652 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
653 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
654 }
655 }
656 #undef N
657
658 static bool
659 safe_auth_supported(struct safe_softc *sc,
660 const struct crypto_session_params *csp)
661 {
662
663 switch (csp->csp_auth_alg) {
664 case CRYPTO_SHA1_HMAC:
665 if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0)
666 return (false);
667 break;
668 default:
669 return (false);
670 }
671 return (true);
672 }
673
674 static bool
675 safe_cipher_supported(struct safe_softc *sc,
676 const struct crypto_session_params *csp)
677 {
678
679 switch (csp->csp_cipher_alg) {
680 case CRYPTO_AES_CBC:
681 if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0)
682 return (false);
683 if (csp->csp_ivlen != 16)
684 return (false);
685 if (csp->csp_cipher_klen != 16 &&
686 csp->csp_cipher_klen != 24 &&
687 csp->csp_cipher_klen != 32)
688 return (false);
689 break;
690 }
691 return (true);
692 }
693
694 static int
695 safe_probesession(device_t dev, const struct crypto_session_params *csp)
696 {
697 struct safe_softc *sc = device_get_softc(dev);
698
699 if (csp->csp_flags != 0)
700 return (EINVAL);
701 switch (csp->csp_mode) {
702 case CSP_MODE_DIGEST:
703 if (!safe_auth_supported(sc, csp))
704 return (EINVAL);
705 break;
706 case CSP_MODE_CIPHER:
707 if (!safe_cipher_supported(sc, csp))
708 return (EINVAL);
709 break;
710 case CSP_MODE_ETA:
711 if (!safe_auth_supported(sc, csp) ||
712 !safe_cipher_supported(sc, csp))
713 return (EINVAL);
714 break;
715 default:
716 return (EINVAL);
717 }
718
719 return (CRYPTODEV_PROBE_HARDWARE);
720 }
721
722 /*
723 * Allocate a new 'session'.
724 */
725 static int
726 safe_newsession(device_t dev, crypto_session_t cses,
727 const struct crypto_session_params *csp)
728 {
729 struct safe_session *ses;
730
731 ses = crypto_get_driver_session(cses);
732 if (csp->csp_cipher_alg != 0) {
733 ses->ses_klen = csp->csp_cipher_klen;
734 if (csp->csp_cipher_key != NULL)
735 safe_setup_enckey(ses, csp->csp_cipher_key);
736 }
737
738 if (csp->csp_auth_alg != 0) {
739 ses->ses_mlen = csp->csp_auth_mlen;
740 if (ses->ses_mlen == 0) {
741 ses->ses_mlen = SHA1_HASH_LEN;
742 }
743
744 if (csp->csp_auth_key != NULL) {
745 safe_setup_mackey(ses, csp->csp_auth_alg,
746 csp->csp_auth_key, csp->csp_auth_klen);
747 }
748 }
749
750 return (0);
751 }
752
753 static void
754 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error)
755 {
756 struct safe_operand *op = arg;
757
758 DPRINTF(("%s: nsegs %d error %d\n", __func__,
759 nsegs, error));
760 if (error != 0)
761 return;
762 op->nsegs = nsegs;
763 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
764 }
765
766 static int
767 safe_process(device_t dev, struct cryptop *crp, int hint)
768 {
769 struct safe_softc *sc = device_get_softc(dev);
770 const struct crypto_session_params *csp;
771 int err = 0, i, nicealign, uniform;
772 int bypass, oplen;
773 int16_t coffset;
774 struct safe_session *ses;
775 struct safe_ringentry *re;
776 struct safe_sarec *sa;
777 struct safe_pdesc *pd;
778 u_int32_t cmd0, cmd1, staterec;
779
780 mtx_lock(&sc->sc_ringmtx);
781 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
782 safestats.st_ringfull++;
783 sc->sc_needwakeup |= CRYPTO_SYMQ;
784 mtx_unlock(&sc->sc_ringmtx);
785 return (ERESTART);
786 }
787 re = sc->sc_front;
788
789 staterec = re->re_sa.sa_staterec; /* save */
790 /* NB: zero everything but the PE descriptor */
791 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
792 re->re_sa.sa_staterec = staterec; /* restore */
793
794 re->re_crp = crp;
795
796 sa = &re->re_sa;
797 ses = crypto_get_driver_session(crp->crp_session);
798 csp = crypto_get_params(crp->crp_session);
799
800 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
801 cmd1 = 0;
802 switch (csp->csp_mode) {
803 case CSP_MODE_DIGEST:
804 cmd0 |= SAFE_SA_CMD0_OP_HASH;
805 break;
806 case CSP_MODE_CIPHER:
807 cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
808 break;
809 case CSP_MODE_ETA:
810 cmd0 |= SAFE_SA_CMD0_OP_BOTH;
811 break;
812 }
813
814 if (csp->csp_cipher_alg != 0) {
815 if (crp->crp_cipher_key != NULL)
816 safe_setup_enckey(ses, crp->crp_cipher_key);
817
818 switch (csp->csp_cipher_alg) {
819 case CRYPTO_AES_CBC:
820 cmd0 |= SAFE_SA_CMD0_AES;
821 cmd1 |= SAFE_SA_CMD1_CBC;
822 if (ses->ses_klen * 8 == 128)
823 cmd1 |= SAFE_SA_CMD1_AES128;
824 else if (ses->ses_klen * 8 == 192)
825 cmd1 |= SAFE_SA_CMD1_AES192;
826 else
827 cmd1 |= SAFE_SA_CMD1_AES256;
828 }
829
830 /*
831 * Setup encrypt/decrypt state. When using basic ops
832 * we can't use an inline IV because hash/crypt offset
833 * must be from the end of the IV to the start of the
834 * crypt data and this leaves out the preceding header
835 * from the hash calculation. Instead we place the IV
836 * in the state record and set the hash/crypt offset to
837 * copy both the header+IV.
838 */
839 crypto_read_iv(crp, re->re_sastate.sa_saved_iv);
840 cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
841
842 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
843 cmd0 |= SAFE_SA_CMD0_OUTBOUND;
844
845 /*
846 * XXX: I suspect we don't need this since we
847 * don't save the returned IV.
848 */
849 cmd0 |= SAFE_SA_CMD0_SAVEIV;
850 } else {
851 cmd0 |= SAFE_SA_CMD0_INBOUND;
852 }
853 /*
854 * For basic encryption use the zero pad algorithm.
855 * This pads results to an 8-byte boundary and
856 * suppresses padding verification for inbound (i.e.
857 * decrypt) operations.
858 *
859 * NB: Not sure if the 8-byte pad boundary is a problem.
860 */
861 cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
862
863 /* XXX assert key bufs have the same size */
864 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
865 }
866
867 if (csp->csp_auth_alg != 0) {
868 if (crp->crp_auth_key != NULL) {
869 safe_setup_mackey(ses, csp->csp_auth_alg,
870 crp->crp_auth_key, csp->csp_auth_klen);
871 }
872
873 switch (csp->csp_auth_alg) {
874 case CRYPTO_SHA1_HMAC:
875 cmd0 |= SAFE_SA_CMD0_SHA1;
876 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
877 break;
878 }
879
880 /*
881 * Digest data is loaded from the SA and the hash
882 * result is saved to the state block where we
883 * retrieve it for return to the caller.
884 */
885 /* XXX assert digest bufs have the same size */
886 bcopy(ses->ses_hminner, sa->sa_indigest,
887 sizeof(sa->sa_indigest));
888 bcopy(ses->ses_hmouter, sa->sa_outdigest,
889 sizeof(sa->sa_outdigest));
890
891 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
892 re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
893 }
894
895 if (csp->csp_mode == CSP_MODE_ETA) {
896 /*
897 * The driver only supports ETA requests where there
898 * is no gap between the AAD and payload.
899 */
900 if (crp->crp_aad_length != 0 &&
901 crp->crp_aad_start + crp->crp_aad_length !=
902 crp->crp_payload_start) {
903 safestats.st_lenmismatch++;
904 err = EINVAL;
905 goto errout;
906 }
907 if (crp->crp_aad_length != 0)
908 bypass = crp->crp_aad_start;
909 else
910 bypass = crp->crp_payload_start;
911 coffset = crp->crp_aad_length;
912 oplen = crp->crp_payload_start + crp->crp_payload_length;
913 #ifdef SAFE_DEBUG
914 if (safe_debug) {
915 printf("AAD: skip %d, len %d, digest %d\n",
916 crp->crp_aad_start, crp->crp_aad_length,
917 crp->crp_digest_start);
918 printf("payload: skip %d, len %d, IV %d\n",
919 crp->crp_payload_start, crp->crp_payload_length,
920 crp->crp_iv_start);
921 printf("bypass %d coffset %d oplen %d\n",
922 bypass, coffset, oplen);
923 }
924 #endif
925 if (coffset & 3) { /* offset must be 32-bit aligned */
926 DPRINTF(("%s: coffset %u misaligned\n",
927 __func__, coffset));
928 safestats.st_coffmisaligned++;
929 err = EINVAL;
930 goto errout;
931 }
932 coffset >>= 2;
933 if (coffset > 255) { /* offset must be <256 dwords */
934 DPRINTF(("%s: coffset %u too big\n",
935 __func__, coffset));
936 safestats.st_cofftoobig++;
937 err = EINVAL;
938 goto errout;
939 }
940 /*
941 * Tell the hardware to copy the header to the output.
942 * The header is defined as the data from the end of
943 * the bypass to the start of data to be encrypted.
944 * Typically this is the inline IV. Note that you need
945 * to do this even if src+dst are the same; it appears
946 * that w/o this bit the crypted data is written
947 * immediately after the bypass data.
948 */
949 cmd1 |= SAFE_SA_CMD1_HDRCOPY;
950 /*
951 * Disable IP header mutable bit handling. This is
952 * needed to get correct HMAC calculations.
953 */
954 cmd1 |= SAFE_SA_CMD1_MUTABLE;
955 } else {
956 bypass = crp->crp_payload_start;
957 oplen = bypass + crp->crp_payload_length;
958 coffset = 0;
959 }
960 /* XXX verify multiple of 4 when using s/g */
961 if (bypass > 96) { /* bypass offset must be <= 96 bytes */
962 DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
963 safestats.st_bypasstoobig++;
964 err = EINVAL;
965 goto errout;
966 }
967
968 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) {
969 safestats.st_nomap++;
970 err = ENOMEM;
971 goto errout;
972 }
973 if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb,
974 &re->re_src, BUS_DMA_NOWAIT) != 0) {
975 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
976 re->re_src_map = NULL;
977 safestats.st_noload++;
978 err = ENOMEM;
979 goto errout;
980 }
981 re->re_src_mapsize = crypto_buffer_len(&crp->crp_buf);
982 nicealign = safe_dmamap_aligned(&re->re_src);
983 uniform = safe_dmamap_uniform(&re->re_src);
984
985 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
986 nicealign, uniform, re->re_src.nsegs));
987 if (re->re_src.nsegs > 1) {
988 re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
989 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
990 for (i = 0; i < re->re_src_nsegs; i++) {
991 /* NB: no need to check if there's space */
992 pd = sc->sc_spfree;
993 if (++(sc->sc_spfree) == sc->sc_springtop)
994 sc->sc_spfree = sc->sc_spring;
995
996 KASSERT((pd->pd_flags&3) == 0 ||
997 (pd->pd_flags&3) == SAFE_PD_DONE,
998 ("bogus source particle descriptor; flags %x",
999 pd->pd_flags));
1000 pd->pd_addr = re->re_src_segs[i].ds_addr;
1001 pd->pd_size = re->re_src_segs[i].ds_len;
1002 pd->pd_flags = SAFE_PD_READY;
1003 }
1004 cmd0 |= SAFE_SA_CMD0_IGATHER;
1005 } else {
1006 /*
1007 * No need for gather, reference the operand directly.
1008 */
1009 re->re_desc.d_src = re->re_src_segs[0].ds_addr;
1010 }
1011
1012 if (csp->csp_mode == CSP_MODE_DIGEST) {
1013 /*
1014 * Hash op; no destination needed.
1015 */
1016 } else {
1017 if (nicealign && uniform == 1) {
1018 /*
1019 * Source layout is suitable for direct
1020 * sharing of the DMA map and segment list.
1021 */
1022 re->re_dst = re->re_src;
1023 } else if (nicealign && uniform == 2) {
1024 /*
1025 * The source is properly aligned but requires a
1026 * different particle list to handle DMA of the
1027 * result. Create a new map and do the load to
1028 * create the segment list. The particle
1029 * descriptor setup code below will handle the
1030 * rest.
1031 */
1032 if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT,
1033 &re->re_dst_map)) {
1034 safestats.st_nomap++;
1035 err = ENOMEM;
1036 goto errout;
1037 }
1038 if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map,
1039 crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) !=
1040 0) {
1041 bus_dmamap_destroy(sc->sc_dstdmat,
1042 re->re_dst_map);
1043 re->re_dst_map = NULL;
1044 safestats.st_noload++;
1045 err = ENOMEM;
1046 goto errout;
1047 }
1048 } else if (crp->crp_buf.cb_type == CRYPTO_BUF_MBUF) {
1049 int totlen, len;
1050 struct mbuf *m, *top, **mp;
1051
1052 /*
1053 * DMA constraints require that we allocate a
1054 * new mbuf chain for the destination. We
1055 * allocate an entire new set of mbufs of
1056 * optimal/required size and then tell the
1057 * hardware to copy any bits that are not
1058 * created as a byproduct of the operation.
1059 */
1060 if (!nicealign)
1061 safestats.st_unaligned++;
1062 if (!uniform)
1063 safestats.st_notuniform++;
1064 totlen = re->re_src_mapsize;
1065 if (crp->crp_buf.cb_mbuf->m_flags & M_PKTHDR) {
1066 len = MHLEN;
1067 MGETHDR(m, M_NOWAIT, MT_DATA);
1068 if (m && !m_dup_pkthdr(m, crp->crp_buf.cb_mbuf,
1069 M_NOWAIT)) {
1070 m_free(m);
1071 m = NULL;
1072 }
1073 } else {
1074 len = MLEN;
1075 MGET(m, M_NOWAIT, MT_DATA);
1076 }
1077 if (m == NULL) {
1078 safestats.st_nombuf++;
1079 err = sc->sc_nqchip ? ERESTART : ENOMEM;
1080 goto errout;
1081 }
1082 if (totlen >= MINCLSIZE) {
1083 if (!(MCLGET(m, M_NOWAIT))) {
1084 m_free(m);
1085 safestats.st_nomcl++;
1086 err = sc->sc_nqchip ?
1087 ERESTART : ENOMEM;
1088 goto errout;
1089 }
1090 len = MCLBYTES;
1091 }
1092 m->m_len = len;
1093 top = NULL;
1094 mp = ⊤
1095
1096 while (totlen > 0) {
1097 if (top) {
1098 MGET(m, M_NOWAIT, MT_DATA);
1099 if (m == NULL) {
1100 m_freem(top);
1101 safestats.st_nombuf++;
1102 err = sc->sc_nqchip ?
1103 ERESTART : ENOMEM;
1104 goto errout;
1105 }
1106 len = MLEN;
1107 }
1108 if (top && totlen >= MINCLSIZE) {
1109 if (!(MCLGET(m, M_NOWAIT))) {
1110 *mp = m;
1111 m_freem(top);
1112 safestats.st_nomcl++;
1113 err = sc->sc_nqchip ?
1114 ERESTART : ENOMEM;
1115 goto errout;
1116 }
1117 len = MCLBYTES;
1118 }
1119 m->m_len = len = min(totlen, len);
1120 totlen -= len;
1121 *mp = m;
1122 mp = &m->m_next;
1123 }
1124 re->re_dst_m = top;
1125 if (bus_dmamap_create(sc->sc_dstdmat,
1126 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) {
1127 safestats.st_nomap++;
1128 err = ENOMEM;
1129 goto errout;
1130 }
1131 if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat,
1132 re->re_dst_map, top, re->re_dst_segs,
1133 &re->re_dst_nsegs, 0) != 0) {
1134 bus_dmamap_destroy(sc->sc_dstdmat,
1135 re->re_dst_map);
1136 re->re_dst_map = NULL;
1137 safestats.st_noload++;
1138 err = ENOMEM;
1139 goto errout;
1140 }
1141 re->re_dst_mapsize = re->re_src_mapsize;
1142 if (re->re_src.mapsize > oplen) {
1143 /*
1144 * There's data following what the
1145 * hardware will copy for us. If this
1146 * isn't just the ICV (that's going to
1147 * be written on completion), copy it
1148 * to the new mbufs
1149 */
1150 if (!(csp->csp_mode == CSP_MODE_ETA &&
1151 (re->re_src.mapsize-oplen) == ses->ses_mlen &&
1152 crp->crp_digest_start == oplen))
1153 safe_mcopy(crp->crp_buf.cb_mbuf,
1154 re->re_dst_m, oplen);
1155 else
1156 safestats.st_noicvcopy++;
1157 }
1158 } else {
1159 if (!nicealign) {
1160 safestats.st_iovmisaligned++;
1161 err = EINVAL;
1162 goto errout;
1163 } else {
1164 /*
1165 * There's no way to handle the DMA
1166 * requirements with this uio. We
1167 * could create a separate DMA area for
1168 * the result and then copy it back,
1169 * but for now we just bail and return
1170 * an error. Note that uio requests
1171 * > SAFE_MAX_DSIZE are handled because
1172 * the DMA map and segment list for the
1173 * destination wil result in a
1174 * destination particle list that does
1175 * the necessary scatter DMA.
1176 */
1177 safestats.st_iovnotuniform++;
1178 err = EINVAL;
1179 goto errout;
1180 }
1181 }
1182
1183 if (re->re_dst.nsegs > 1) {
1184 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
1185 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
1186 for (i = 0; i < re->re_dst_nsegs; i++) {
1187 pd = sc->sc_dpfree;
1188 KASSERT((pd->pd_flags&3) == 0 ||
1189 (pd->pd_flags&3) == SAFE_PD_DONE,
1190 ("bogus dest particle descriptor; flags %x",
1191 pd->pd_flags));
1192 if (++(sc->sc_dpfree) == sc->sc_dpringtop)
1193 sc->sc_dpfree = sc->sc_dpring;
1194 pd->pd_addr = re->re_dst_segs[i].ds_addr;
1195 pd->pd_flags = SAFE_PD_READY;
1196 }
1197 cmd0 |= SAFE_SA_CMD0_OSCATTER;
1198 } else {
1199 /*
1200 * No need for scatter, reference the operand directly.
1201 */
1202 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
1203 }
1204 }
1205
1206 /*
1207 * All done with setup; fillin the SA command words
1208 * and the packet engine descriptor. The operation
1209 * is now ready for submission to the hardware.
1210 */
1211 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
1212 sa->sa_cmd1 = cmd1
1213 | (coffset << SAFE_SA_CMD1_OFFSET_S)
1214 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
1215 | SAFE_SA_CMD1_SRPCI
1216 ;
1217 /*
1218 * NB: the order of writes is important here. In case the
1219 * chip is scanning the ring because of an outstanding request
1220 * it might nab this one too. In that case we need to make
1221 * sure the setup is complete before we write the length
1222 * field of the descriptor as it signals the descriptor is
1223 * ready for processing.
1224 */
1225 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
1226 if (csp->csp_auth_alg != 0)
1227 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
1228 re->re_desc.d_len = oplen
1229 | SAFE_PE_LEN_READY
1230 | (bypass << SAFE_PE_LEN_BYPASS_S)
1231 ;
1232
1233 safestats.st_ipackets++;
1234 safestats.st_ibytes += oplen;
1235
1236 if (++(sc->sc_front) == sc->sc_ringtop)
1237 sc->sc_front = sc->sc_ring;
1238
1239 /* XXX honor batching */
1240 safe_feed(sc, re);
1241 mtx_unlock(&sc->sc_ringmtx);
1242 return (0);
1243
1244 errout:
1245 if (re->re_dst_m != NULL)
1246 m_freem(re->re_dst_m);
1247
1248 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1249 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1250 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1251 }
1252 if (re->re_src_map != NULL) {
1253 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1254 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1255 }
1256 mtx_unlock(&sc->sc_ringmtx);
1257 if (err != ERESTART) {
1258 crp->crp_etype = err;
1259 crypto_done(crp);
1260 err = 0;
1261 } else {
1262 sc->sc_needwakeup |= CRYPTO_SYMQ;
1263 }
1264 return (err);
1265 }
1266
1267 static void
1268 safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
1269 {
1270 const struct crypto_session_params *csp;
1271 struct cryptop *crp = (struct cryptop *)re->re_crp;
1272 struct safe_session *ses;
1273 uint8_t hash[HASH_MAX_LEN];
1274
1275 ses = crypto_get_driver_session(crp->crp_session);
1276 csp = crypto_get_params(crp->crp_session);
1277
1278 safestats.st_opackets++;
1279 safestats.st_obytes += re->re_dst.mapsize;
1280
1281 safe_dma_sync(&sc->sc_ringalloc,
1282 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1283 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
1284 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1285 re->re_desc.d_csr,
1286 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
1287 safestats.st_peoperr++;
1288 crp->crp_etype = EIO; /* something more meaningful? */
1289 }
1290
1291 /*
1292 * XXX: Should crp_buf.cb_mbuf be updated to re->re_dst_m if
1293 * it is non-NULL?
1294 */
1295
1296 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1297 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
1298 BUS_DMASYNC_POSTREAD);
1299 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1300 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1301 }
1302 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE);
1303 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1304 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1305
1306 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
1307 if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) {
1308 /*
1309 * SHA-1 ICV's are byte-swapped; fix 'em up
1310 * before copying them to their destination.
1311 */
1312 re->re_sastate.sa_saved_indigest[0] =
1313 bswap32(re->re_sastate.sa_saved_indigest[0]);
1314 re->re_sastate.sa_saved_indigest[1] =
1315 bswap32(re->re_sastate.sa_saved_indigest[1]);
1316 re->re_sastate.sa_saved_indigest[2] =
1317 bswap32(re->re_sastate.sa_saved_indigest[2]);
1318 }
1319
1320 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
1321 crypto_copydata(crp, crp->crp_digest_start,
1322 ses->ses_mlen, hash);
1323 if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest,
1324 hash, ses->ses_mlen) != 0)
1325 crp->crp_etype = EBADMSG;
1326 } else
1327 crypto_copyback(crp, crp->crp_digest_start,
1328 ses->ses_mlen, re->re_sastate.sa_saved_indigest);
1329 }
1330 crypto_done(crp);
1331 }
1332
1333 /*
1334 * Copy all data past offset from srcm to dstm.
1335 */
1336 static void
1337 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset)
1338 {
1339 u_int j, dlen, slen;
1340 caddr_t dptr, sptr;
1341
1342 /*
1343 * Advance src and dst to offset.
1344 */
1345 j = offset;
1346 while (j >= srcm->m_len) {
1347 j -= srcm->m_len;
1348 srcm = srcm->m_next;
1349 if (srcm == NULL)
1350 return;
1351 }
1352 sptr = mtod(srcm, caddr_t) + j;
1353 slen = srcm->m_len - j;
1354
1355 j = offset;
1356 while (j >= dstm->m_len) {
1357 j -= dstm->m_len;
1358 dstm = dstm->m_next;
1359 if (dstm == NULL)
1360 return;
1361 }
1362 dptr = mtod(dstm, caddr_t) + j;
1363 dlen = dstm->m_len - j;
1364
1365 /*
1366 * Copy everything that remains.
1367 */
1368 for (;;) {
1369 j = min(slen, dlen);
1370 bcopy(sptr, dptr, j);
1371 if (slen == j) {
1372 srcm = srcm->m_next;
1373 if (srcm == NULL)
1374 return;
1375 sptr = srcm->m_data;
1376 slen = srcm->m_len;
1377 } else
1378 sptr += j, slen -= j;
1379 if (dlen == j) {
1380 dstm = dstm->m_next;
1381 if (dstm == NULL)
1382 return;
1383 dptr = dstm->m_data;
1384 dlen = dstm->m_len;
1385 } else
1386 dptr += j, dlen -= j;
1387 }
1388 }
1389
1390 #ifndef SAFE_NO_RNG
1391 #define SAFE_RNG_MAXWAIT 1000
1392
1393 static void
1394 safe_rng_init(struct safe_softc *sc)
1395 {
1396 u_int32_t w, v;
1397 int i;
1398
1399 WRITE_REG(sc, SAFE_RNG_CTRL, 0);
1400 /* use default value according to the manual */
1401 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
1402 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1403
1404 /*
1405 * There is a bug in rev 1.0 of the 1140 that when the RNG
1406 * is brought out of reset the ready status flag does not
1407 * work until the RNG has finished its internal initialization.
1408 *
1409 * So in order to determine the device is through its
1410 * initialization we must read the data register, using the
1411 * status reg in the read in case it is initialized. Then read
1412 * the data register until it changes from the first read.
1413 * Once it changes read the data register until it changes
1414 * again. At this time the RNG is considered initialized.
1415 * This could take between 750ms - 1000ms in time.
1416 */
1417 i = 0;
1418 w = READ_REG(sc, SAFE_RNG_OUT);
1419 do {
1420 v = READ_REG(sc, SAFE_RNG_OUT);
1421 if (v != w) {
1422 w = v;
1423 break;
1424 }
1425 DELAY(10);
1426 } while (++i < SAFE_RNG_MAXWAIT);
1427
1428 /* Wait Until data changes again */
1429 i = 0;
1430 do {
1431 v = READ_REG(sc, SAFE_RNG_OUT);
1432 if (v != w)
1433 break;
1434 DELAY(10);
1435 } while (++i < SAFE_RNG_MAXWAIT);
1436 }
1437
1438 static __inline void
1439 safe_rng_disable_short_cycle(struct safe_softc *sc)
1440 {
1441 WRITE_REG(sc, SAFE_RNG_CTRL,
1442 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
1443 }
1444
1445 static __inline void
1446 safe_rng_enable_short_cycle(struct safe_softc *sc)
1447 {
1448 WRITE_REG(sc, SAFE_RNG_CTRL,
1449 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
1450 }
1451
1452 static __inline u_int32_t
1453 safe_rng_read(struct safe_softc *sc)
1454 {
1455 int i;
1456
1457 i = 0;
1458 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
1459 ;
1460 return READ_REG(sc, SAFE_RNG_OUT);
1461 }
1462
1463 static void
1464 safe_rng(void *arg)
1465 {
1466 struct safe_softc *sc = arg;
1467 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */
1468 u_int maxwords;
1469 int i;
1470
1471 safestats.st_rng++;
1472 /*
1473 * Fetch the next block of data.
1474 */
1475 maxwords = safe_rngbufsize;
1476 if (maxwords > SAFE_RNG_MAXBUFSIZ)
1477 maxwords = SAFE_RNG_MAXBUFSIZ;
1478 retry:
1479 for (i = 0; i < maxwords; i++)
1480 buf[i] = safe_rng_read(sc);
1481 /*
1482 * Check the comparator alarm count and reset the h/w if
1483 * it exceeds our threshold. This guards against the
1484 * hardware oscillators resonating with external signals.
1485 */
1486 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
1487 u_int32_t freq_inc, w;
1488
1489 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
1490 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
1491 safestats.st_rngalarm++;
1492 safe_rng_enable_short_cycle(sc);
1493 freq_inc = 18;
1494 for (i = 0; i < 64; i++) {
1495 w = READ_REG(sc, SAFE_RNG_CNFG);
1496 freq_inc = ((w + freq_inc) & 0x3fL);
1497 w = ((w & ~0x3fL) | freq_inc);
1498 WRITE_REG(sc, SAFE_RNG_CNFG, w);
1499
1500 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1501
1502 (void) safe_rng_read(sc);
1503 DELAY(25);
1504
1505 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
1506 safe_rng_disable_short_cycle(sc);
1507 goto retry;
1508 }
1509 freq_inc = 1;
1510 }
1511 safe_rng_disable_short_cycle(sc);
1512 } else
1513 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1514
1515 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t));
1516 callout_reset(&sc->sc_rngto,
1517 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc);
1518 }
1519 #endif /* SAFE_NO_RNG */
1520
1521 static void
1522 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1523 {
1524 bus_addr_t *paddr = (bus_addr_t*) arg;
1525 *paddr = segs->ds_addr;
1526 }
1527
1528 static int
1529 safe_dma_malloc(
1530 struct safe_softc *sc,
1531 bus_size_t size,
1532 struct safe_dma_alloc *dma,
1533 int mapflags
1534 )
1535 {
1536 int r;
1537
1538 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
1539 sizeof(u_int32_t), 0, /* alignment, bounds */
1540 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1541 BUS_SPACE_MAXADDR, /* highaddr */
1542 NULL, NULL, /* filter, filterarg */
1543 size, /* maxsize */
1544 1, /* nsegments */
1545 size, /* maxsegsize */
1546 BUS_DMA_ALLOCNOW, /* flags */
1547 NULL, NULL, /* locking */
1548 &dma->dma_tag);
1549 if (r != 0) {
1550 device_printf(sc->sc_dev, "safe_dma_malloc: "
1551 "bus_dma_tag_create failed; error %u\n", r);
1552 goto fail_0;
1553 }
1554
1555 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1556 BUS_DMA_NOWAIT, &dma->dma_map);
1557 if (r != 0) {
1558 device_printf(sc->sc_dev, "safe_dma_malloc: "
1559 "bus_dmammem_alloc failed; size %ju, error %u\n",
1560 (uintmax_t)size, r);
1561 goto fail_1;
1562 }
1563
1564 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1565 size,
1566 safe_dmamap_cb,
1567 &dma->dma_paddr,
1568 mapflags | BUS_DMA_NOWAIT);
1569 if (r != 0) {
1570 device_printf(sc->sc_dev, "safe_dma_malloc: "
1571 "bus_dmamap_load failed; error %u\n", r);
1572 goto fail_2;
1573 }
1574
1575 dma->dma_size = size;
1576 return (0);
1577
1578 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1579 fail_2:
1580 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1581 fail_1:
1582 bus_dma_tag_destroy(dma->dma_tag);
1583 fail_0:
1584 dma->dma_tag = NULL;
1585 return (r);
1586 }
1587
1588 static void
1589 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma)
1590 {
1591 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1592 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1593 bus_dma_tag_destroy(dma->dma_tag);
1594 }
1595
1596 /*
1597 * Resets the board. Values in the regesters are left as is
1598 * from the reset (i.e. initial values are assigned elsewhere).
1599 */
1600 static void
1601 safe_reset_board(struct safe_softc *sc)
1602 {
1603 u_int32_t v;
1604 /*
1605 * Reset the device. The manual says no delay
1606 * is needed between marking and clearing reset.
1607 */
1608 v = READ_REG(sc, SAFE_PE_DMACFG) &~
1609 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
1610 SAFE_PE_DMACFG_SGRESET);
1611 WRITE_REG(sc, SAFE_PE_DMACFG, v
1612 | SAFE_PE_DMACFG_PERESET
1613 | SAFE_PE_DMACFG_PDRRESET
1614 | SAFE_PE_DMACFG_SGRESET);
1615 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1616 }
1617
1618 /*
1619 * Initialize registers we need to touch only once.
1620 */
1621 static void
1622 safe_init_board(struct safe_softc *sc)
1623 {
1624 u_int32_t v, dwords;
1625
1626 v = READ_REG(sc, SAFE_PE_DMACFG);
1627 v &=~ SAFE_PE_DMACFG_PEMODE;
1628 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
1629 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
1630 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
1631 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
1632 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
1633 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
1634 ;
1635 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1636 #if 0
1637 /* XXX select byte swap based on host byte order */
1638 WRITE_REG(sc, SAFE_ENDIAN, 0x1b);
1639 #endif
1640 if (sc->sc_chiprev == SAFE_REV(1,0)) {
1641 /*
1642 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
1643 * "target mode transfers" done while the chip is DMA'ing
1644 * >1020 bytes cause the hardware to lockup. To avoid this
1645 * we reduce the max PCI transfer size and use small source
1646 * particle descriptors (<= 256 bytes).
1647 */
1648 WRITE_REG(sc, SAFE_DMA_CFG, 256);
1649 device_printf(sc->sc_dev,
1650 "Reduce max DMA size to %u words for rev %u.%u WAR\n",
1651 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff,
1652 SAFE_REV_MAJ(sc->sc_chiprev),
1653 SAFE_REV_MIN(sc->sc_chiprev));
1654 }
1655
1656 /* NB: operands+results are overlaid */
1657 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
1658 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
1659 /*
1660 * Configure ring entry size and number of items in the ring.
1661 */
1662 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
1663 ("PE ring entry not 32-bit aligned!"));
1664 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
1665 WRITE_REG(sc, SAFE_PE_RINGCFG,
1666 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
1667 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
1668
1669 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
1670 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
1671 WRITE_REG(sc, SAFE_PE_PARTSIZE,
1672 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
1673 /*
1674 * NB: destination particles are fixed size. We use
1675 * an mbuf cluster and require all results go to
1676 * clusters or smaller.
1677 */
1678 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE);
1679
1680 /* it's now safe to enable PE mode, do it */
1681 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
1682
1683 /*
1684 * Configure hardware to use level-triggered interrupts and
1685 * to interrupt after each descriptor is processed.
1686 */
1687 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
1688 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
1689 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
1690 }
1691
1692 /*
1693 * Init PCI registers
1694 */
1695 static void
1696 safe_init_pciregs(device_t dev)
1697 {
1698 }
1699
1700 /*
1701 * Clean up after a chip crash.
1702 * It is assumed that the caller in splimp()
1703 */
1704 static void
1705 safe_cleanchip(struct safe_softc *sc)
1706 {
1707
1708 if (sc->sc_nqchip != 0) {
1709 struct safe_ringentry *re = sc->sc_back;
1710
1711 while (re != sc->sc_front) {
1712 if (re->re_desc.d_csr != 0)
1713 safe_free_entry(sc, re);
1714 if (++re == sc->sc_ringtop)
1715 re = sc->sc_ring;
1716 }
1717 sc->sc_back = re;
1718 sc->sc_nqchip = 0;
1719 }
1720 }
1721
1722 /*
1723 * free a safe_q
1724 * It is assumed that the caller is within splimp().
1725 */
1726 static int
1727 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
1728 {
1729 struct cryptop *crp;
1730
1731 /*
1732 * Free header MCR
1733 */
1734 if (re->re_dst_m != NULL)
1735 m_freem(re->re_dst_m);
1736
1737 crp = (struct cryptop *)re->re_crp;
1738
1739 re->re_desc.d_csr = 0;
1740
1741 crp->crp_etype = EFAULT;
1742 crypto_done(crp);
1743 return(0);
1744 }
1745
1746 /*
1747 * Routine to reset the chip and clean up.
1748 * It is assumed that the caller is in splimp()
1749 */
1750 static void
1751 safe_totalreset(struct safe_softc *sc)
1752 {
1753 safe_reset_board(sc);
1754 safe_init_board(sc);
1755 safe_cleanchip(sc);
1756 }
1757
1758 /*
1759 * Is the operand suitable aligned for direct DMA. Each
1760 * segment must be aligned on a 32-bit boundary and all
1761 * but the last segment must be a multiple of 4 bytes.
1762 */
1763 static int
1764 safe_dmamap_aligned(const struct safe_operand *op)
1765 {
1766 int i;
1767
1768 for (i = 0; i < op->nsegs; i++) {
1769 if (op->segs[i].ds_addr & 3)
1770 return (0);
1771 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
1772 return (0);
1773 }
1774 return (1);
1775 }
1776
1777 /*
1778 * Is the operand suitable for direct DMA as the destination
1779 * of an operation. The hardware requires that each ``particle''
1780 * but the last in an operation result have the same size. We
1781 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
1782 * 0 if some segment is not a multiple of of this size, 1 if all
1783 * segments are exactly this size, or 2 if segments are at worst
1784 * a multiple of this size.
1785 */
1786 static int
1787 safe_dmamap_uniform(const struct safe_operand *op)
1788 {
1789 int result = 1;
1790
1791 if (op->nsegs > 0) {
1792 int i;
1793
1794 for (i = 0; i < op->nsegs-1; i++) {
1795 if (op->segs[i].ds_len % SAFE_MAX_DSIZE)
1796 return (0);
1797 if (op->segs[i].ds_len != SAFE_MAX_DSIZE)
1798 result = 2;
1799 }
1800 }
1801 return (result);
1802 }
1803
1804 #ifdef SAFE_DEBUG
1805 static void
1806 safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
1807 {
1808 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
1809 , tag
1810 , READ_REG(sc, SAFE_DMA_ENDIAN)
1811 , READ_REG(sc, SAFE_DMA_SRCADDR)
1812 , READ_REG(sc, SAFE_DMA_DSTADDR)
1813 , READ_REG(sc, SAFE_DMA_STAT)
1814 );
1815 }
1816
1817 static void
1818 safe_dump_intrstate(struct safe_softc *sc, const char *tag)
1819 {
1820 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
1821 , tag
1822 , READ_REG(sc, SAFE_HI_CFG)
1823 , READ_REG(sc, SAFE_HI_MASK)
1824 , READ_REG(sc, SAFE_HI_DESC_CNT)
1825 , READ_REG(sc, SAFE_HU_STAT)
1826 , READ_REG(sc, SAFE_HM_STAT)
1827 );
1828 }
1829
1830 static void
1831 safe_dump_ringstate(struct safe_softc *sc, const char *tag)
1832 {
1833 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
1834
1835 /* NB: assume caller has lock on ring */
1836 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
1837 tag,
1838 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
1839 (unsigned long)(sc->sc_back - sc->sc_ring),
1840 (unsigned long)(sc->sc_front - sc->sc_ring));
1841 }
1842
1843 static void
1844 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
1845 {
1846 int ix, nsegs;
1847
1848 ix = re - sc->sc_ring;
1849 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
1850 , tag
1851 , re, ix
1852 , re->re_desc.d_csr
1853 , re->re_desc.d_src
1854 , re->re_desc.d_dst
1855 , re->re_desc.d_sa
1856 , re->re_desc.d_len
1857 );
1858 if (re->re_src.nsegs > 1) {
1859 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
1860 sizeof(struct safe_pdesc);
1861 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
1862 printf(" spd[%u] %p: %p size %u flags %x"
1863 , ix, &sc->sc_spring[ix]
1864 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
1865 , sc->sc_spring[ix].pd_size
1866 , sc->sc_spring[ix].pd_flags
1867 );
1868 if (sc->sc_spring[ix].pd_size == 0)
1869 printf(" (zero!)");
1870 printf("\n");
1871 if (++ix == SAFE_TOTAL_SPART)
1872 ix = 0;
1873 }
1874 }
1875 if (re->re_dst.nsegs > 1) {
1876 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
1877 sizeof(struct safe_pdesc);
1878 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
1879 printf(" dpd[%u] %p: %p flags %x\n"
1880 , ix, &sc->sc_dpring[ix]
1881 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
1882 , sc->sc_dpring[ix].pd_flags
1883 );
1884 if (++ix == SAFE_TOTAL_DPART)
1885 ix = 0;
1886 }
1887 }
1888 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
1889 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
1890 printf("sa: key %x %x %x %x %x %x %x %x\n"
1891 , re->re_sa.sa_key[0]
1892 , re->re_sa.sa_key[1]
1893 , re->re_sa.sa_key[2]
1894 , re->re_sa.sa_key[3]
1895 , re->re_sa.sa_key[4]
1896 , re->re_sa.sa_key[5]
1897 , re->re_sa.sa_key[6]
1898 , re->re_sa.sa_key[7]
1899 );
1900 printf("sa: indigest %x %x %x %x %x\n"
1901 , re->re_sa.sa_indigest[0]
1902 , re->re_sa.sa_indigest[1]
1903 , re->re_sa.sa_indigest[2]
1904 , re->re_sa.sa_indigest[3]
1905 , re->re_sa.sa_indigest[4]
1906 );
1907 printf("sa: outdigest %x %x %x %x %x\n"
1908 , re->re_sa.sa_outdigest[0]
1909 , re->re_sa.sa_outdigest[1]
1910 , re->re_sa.sa_outdigest[2]
1911 , re->re_sa.sa_outdigest[3]
1912 , re->re_sa.sa_outdigest[4]
1913 );
1914 printf("sr: iv %x %x %x %x\n"
1915 , re->re_sastate.sa_saved_iv[0]
1916 , re->re_sastate.sa_saved_iv[1]
1917 , re->re_sastate.sa_saved_iv[2]
1918 , re->re_sastate.sa_saved_iv[3]
1919 );
1920 printf("sr: hashbc %u indigest %x %x %x %x %x\n"
1921 , re->re_sastate.sa_saved_hashbc
1922 , re->re_sastate.sa_saved_indigest[0]
1923 , re->re_sastate.sa_saved_indigest[1]
1924 , re->re_sastate.sa_saved_indigest[2]
1925 , re->re_sastate.sa_saved_indigest[3]
1926 , re->re_sastate.sa_saved_indigest[4]
1927 );
1928 }
1929
1930 static void
1931 safe_dump_ring(struct safe_softc *sc, const char *tag)
1932 {
1933 mtx_lock(&sc->sc_ringmtx);
1934 printf("\nSafeNet Ring State:\n");
1935 safe_dump_intrstate(sc, tag);
1936 safe_dump_dmastatus(sc, tag);
1937 safe_dump_ringstate(sc, tag);
1938 if (sc->sc_nqchip) {
1939 struct safe_ringentry *re = sc->sc_back;
1940 do {
1941 safe_dump_request(sc, tag, re);
1942 if (++re == sc->sc_ringtop)
1943 re = sc->sc_ring;
1944 } while (re != sc->sc_front);
1945 }
1946 mtx_unlock(&sc->sc_ringmtx);
1947 }
1948
1949 static int
1950 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS)
1951 {
1952 char dmode[64];
1953 int error;
1954
1955 strncpy(dmode, "", sizeof(dmode) - 1);
1956 dmode[sizeof(dmode) - 1] = '\0';
1957 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req);
1958
1959 if (error == 0 && req->newptr != NULL) {
1960 struct safe_softc *sc = safec;
1961
1962 if (!sc)
1963 return EINVAL;
1964 if (strncmp(dmode, "dma", 3) == 0)
1965 safe_dump_dmastatus(sc, "safe0");
1966 else if (strncmp(dmode, "int", 3) == 0)
1967 safe_dump_intrstate(sc, "safe0");
1968 else if (strncmp(dmode, "ring", 4) == 0)
1969 safe_dump_ring(sc, "safe0");
1970 else
1971 return EINVAL;
1972 }
1973 return error;
1974 }
1975 SYSCTL_PROC(_hw_safe, OID_AUTO, dump,
1976 CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_NEEDGIANT, 0, 0,
1977 sysctl_hw_safe_dump, "A",
1978 "Dump driver state");
1979 #endif /* SAFE_DEBUG */
Cache object: c2f238265b3edbf259a04695ab1b58b6
|