FreeBSD/Linux Kernel Cross Reference
sys/dev/safe/safe.c
1 /*-
2 * Copyright (c) 2003 Sam Leffler, Errno Consulting
3 * Copyright (c) 2003 Global Technology Associates, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/9.2/sys/dev/safe/safe.c 254364 2013-08-15 12:19:16Z scottl $");
30
31 /*
32 * SafeNet SafeXcel-1141 hardware crypto accelerator
33 */
34 #include "opt_safe.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/mbuf.h>
43 #include <sys/module.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/sysctl.h>
47 #include <sys/endian.h>
48
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <sys/bus.h>
55 #include <sys/rman.h>
56
57 #include <crypto/sha1.h>
58 #include <opencrypto/cryptodev.h>
59 #include <opencrypto/cryptosoft.h>
60 #include <sys/md5.h>
61 #include <sys/random.h>
62 #include <sys/kobj.h>
63
64 #include "cryptodev_if.h"
65
66 #include <dev/pci/pcivar.h>
67 #include <dev/pci/pcireg.h>
68
69 #ifdef SAFE_RNDTEST
70 #include <dev/rndtest/rndtest.h>
71 #endif
72 #include <dev/safe/safereg.h>
73 #include <dev/safe/safevar.h>
74
75 #ifndef bswap32
76 #define bswap32 NTOHL
77 #endif
78
79 /*
80 * Prototypes and count for the pci_device structure
81 */
82 static int safe_probe(device_t);
83 static int safe_attach(device_t);
84 static int safe_detach(device_t);
85 static int safe_suspend(device_t);
86 static int safe_resume(device_t);
87 static int safe_shutdown(device_t);
88
89 static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
90 static int safe_freesession(device_t, u_int64_t);
91 static int safe_process(device_t, struct cryptop *, int);
92
93 static device_method_t safe_methods[] = {
94 /* Device interface */
95 DEVMETHOD(device_probe, safe_probe),
96 DEVMETHOD(device_attach, safe_attach),
97 DEVMETHOD(device_detach, safe_detach),
98 DEVMETHOD(device_suspend, safe_suspend),
99 DEVMETHOD(device_resume, safe_resume),
100 DEVMETHOD(device_shutdown, safe_shutdown),
101
102 /* crypto device methods */
103 DEVMETHOD(cryptodev_newsession, safe_newsession),
104 DEVMETHOD(cryptodev_freesession,safe_freesession),
105 DEVMETHOD(cryptodev_process, safe_process),
106
107 DEVMETHOD_END
108 };
109 static driver_t safe_driver = {
110 "safe",
111 safe_methods,
112 sizeof (struct safe_softc)
113 };
114 static devclass_t safe_devclass;
115
116 DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0);
117 MODULE_DEPEND(safe, crypto, 1, 1, 1);
118 #ifdef SAFE_RNDTEST
119 MODULE_DEPEND(safe, rndtest, 1, 1, 1);
120 #endif
121
122 static void safe_intr(void *);
123 static void safe_callback(struct safe_softc *, struct safe_ringentry *);
124 static void safe_feed(struct safe_softc *, struct safe_ringentry *);
125 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int);
126 #ifndef SAFE_NO_RNG
127 static void safe_rng_init(struct safe_softc *);
128 static void safe_rng(void *);
129 #endif /* SAFE_NO_RNG */
130 static int safe_dma_malloc(struct safe_softc *, bus_size_t,
131 struct safe_dma_alloc *, int);
132 #define safe_dma_sync(_dma, _flags) \
133 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags))
134 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *);
135 static int safe_dmamap_aligned(const struct safe_operand *);
136 static int safe_dmamap_uniform(const struct safe_operand *);
137
138 static void safe_reset_board(struct safe_softc *);
139 static void safe_init_board(struct safe_softc *);
140 static void safe_init_pciregs(device_t dev);
141 static void safe_cleanchip(struct safe_softc *);
142 static void safe_totalreset(struct safe_softc *);
143
144 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *);
145
146 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD, 0,
147 "SafeNet driver parameters");
148
149 #ifdef SAFE_DEBUG
150 static void safe_dump_dmastatus(struct safe_softc *, const char *);
151 static void safe_dump_ringstate(struct safe_softc *, const char *);
152 static void safe_dump_intrstate(struct safe_softc *, const char *);
153 static void safe_dump_request(struct safe_softc *, const char *,
154 struct safe_ringentry *);
155
156 static struct safe_softc *safec; /* for use by hw.safe.dump */
157
158 static int safe_debug = 0;
159 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug,
160 0, "control debugging msgs");
161 #define DPRINTF(_x) if (safe_debug) printf _x
162 #else
163 #define DPRINTF(_x)
164 #endif
165
166 #define READ_REG(sc,r) \
167 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
168
169 #define WRITE_REG(sc,reg,val) \
170 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
171
172 struct safe_stats safestats;
173 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats,
174 safe_stats, "driver statistics");
175 #ifndef SAFE_NO_RNG
176 static int safe_rnginterval = 1; /* poll once a second */
177 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval,
178 0, "RNG polling interval (secs)");
179 static int safe_rngbufsize = 16; /* 64 bytes each poll */
180 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize,
181 0, "RNG polling buffer size (32-bit words)");
182 static int safe_rngmaxalarm = 8; /* max alarms before reset */
183 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm,
184 0, "RNG max alarms before reset");
185 #endif /* SAFE_NO_RNG */
186
187 static int
188 safe_probe(device_t dev)
189 {
190 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET &&
191 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL)
192 return (BUS_PROBE_DEFAULT);
193 return (ENXIO);
194 }
195
196 static const char*
197 safe_partname(struct safe_softc *sc)
198 {
199 /* XXX sprintf numbers when not decoded */
200 switch (pci_get_vendor(sc->sc_dev)) {
201 case PCI_VENDOR_SAFENET:
202 switch (pci_get_device(sc->sc_dev)) {
203 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141";
204 }
205 return "SafeNet unknown-part";
206 }
207 return "Unknown-vendor unknown-part";
208 }
209
210 #ifndef SAFE_NO_RNG
211 static void
212 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
213 {
214 random_harvest(buf, count, count*NBBY, 0, RANDOM_PURE);
215 }
216 #endif /* SAFE_NO_RNG */
217
218 static int
219 safe_attach(device_t dev)
220 {
221 struct safe_softc *sc = device_get_softc(dev);
222 u_int32_t raddr;
223 u_int32_t i, devinfo;
224 int rid;
225
226 bzero(sc, sizeof (*sc));
227 sc->sc_dev = dev;
228
229 /* XXX handle power management */
230
231 pci_enable_busmaster(dev);
232
233 /*
234 * Setup memory-mapping of PCI registers.
235 */
236 rid = BS_BAR;
237 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
238 RF_ACTIVE);
239 if (sc->sc_sr == NULL) {
240 device_printf(dev, "cannot map register space\n");
241 goto bad;
242 }
243 sc->sc_st = rman_get_bustag(sc->sc_sr);
244 sc->sc_sh = rman_get_bushandle(sc->sc_sr);
245
246 /*
247 * Arrange interrupt line.
248 */
249 rid = 0;
250 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
251 RF_SHAREABLE|RF_ACTIVE);
252 if (sc->sc_irq == NULL) {
253 device_printf(dev, "could not map interrupt\n");
254 goto bad1;
255 }
256 /*
257 * NB: Network code assumes we are blocked with splimp()
258 * so make sure the IRQ is mapped appropriately.
259 */
260 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
261 NULL, safe_intr, sc, &sc->sc_ih)) {
262 device_printf(dev, "could not establish interrupt\n");
263 goto bad2;
264 }
265
266 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
267 if (sc->sc_cid < 0) {
268 device_printf(dev, "could not get crypto driver id\n");
269 goto bad3;
270 }
271
272 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
273 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
274
275 /*
276 * Setup DMA descriptor area.
277 */
278 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
279 1, /* alignment */
280 SAFE_DMA_BOUNDARY, /* boundary */
281 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
282 BUS_SPACE_MAXADDR, /* highaddr */
283 NULL, NULL, /* filter, filterarg */
284 SAFE_MAX_DMA, /* maxsize */
285 SAFE_MAX_PART, /* nsegments */
286 SAFE_MAX_SSIZE, /* maxsegsize */
287 BUS_DMA_ALLOCNOW, /* flags */
288 NULL, NULL, /* locking */
289 &sc->sc_srcdmat)) {
290 device_printf(dev, "cannot allocate DMA tag\n");
291 goto bad4;
292 }
293 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
294 1, /* alignment */
295 SAFE_MAX_DSIZE, /* boundary */
296 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
297 BUS_SPACE_MAXADDR, /* highaddr */
298 NULL, NULL, /* filter, filterarg */
299 SAFE_MAX_DMA, /* maxsize */
300 SAFE_MAX_PART, /* nsegments */
301 SAFE_MAX_DSIZE, /* maxsegsize */
302 BUS_DMA_ALLOCNOW, /* flags */
303 NULL, NULL, /* locking */
304 &sc->sc_dstdmat)) {
305 device_printf(dev, "cannot allocate DMA tag\n");
306 goto bad4;
307 }
308
309 /*
310 * Allocate packet engine descriptors.
311 */
312 if (safe_dma_malloc(sc,
313 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
314 &sc->sc_ringalloc, 0)) {
315 device_printf(dev, "cannot allocate PE descriptor ring\n");
316 bus_dma_tag_destroy(sc->sc_srcdmat);
317 goto bad4;
318 }
319 /*
320 * Hookup the static portion of all our data structures.
321 */
322 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
323 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
324 sc->sc_front = sc->sc_ring;
325 sc->sc_back = sc->sc_ring;
326 raddr = sc->sc_ringalloc.dma_paddr;
327 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
328 for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
329 struct safe_ringentry *re = &sc->sc_ring[i];
330
331 re->re_desc.d_sa = raddr +
332 offsetof(struct safe_ringentry, re_sa);
333 re->re_sa.sa_staterec = raddr +
334 offsetof(struct safe_ringentry, re_sastate);
335
336 raddr += sizeof (struct safe_ringentry);
337 }
338 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev),
339 "packet engine ring", MTX_DEF);
340
341 /*
342 * Allocate scatter and gather particle descriptors.
343 */
344 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
345 &sc->sc_spalloc, 0)) {
346 device_printf(dev, "cannot allocate source particle "
347 "descriptor ring\n");
348 mtx_destroy(&sc->sc_ringmtx);
349 safe_dma_free(sc, &sc->sc_ringalloc);
350 bus_dma_tag_destroy(sc->sc_srcdmat);
351 goto bad4;
352 }
353 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
354 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
355 sc->sc_spfree = sc->sc_spring;
356 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
357
358 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
359 &sc->sc_dpalloc, 0)) {
360 device_printf(dev, "cannot allocate destination particle "
361 "descriptor ring\n");
362 mtx_destroy(&sc->sc_ringmtx);
363 safe_dma_free(sc, &sc->sc_spalloc);
364 safe_dma_free(sc, &sc->sc_ringalloc);
365 bus_dma_tag_destroy(sc->sc_dstdmat);
366 goto bad4;
367 }
368 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
369 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
370 sc->sc_dpfree = sc->sc_dpring;
371 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
372
373 device_printf(sc->sc_dev, "%s", safe_partname(sc));
374
375 devinfo = READ_REG(sc, SAFE_DEVINFO);
376 if (devinfo & SAFE_DEVINFO_RNG) {
377 sc->sc_flags |= SAFE_FLAGS_RNG;
378 printf(" rng");
379 }
380 if (devinfo & SAFE_DEVINFO_PKEY) {
381 #if 0
382 printf(" key");
383 sc->sc_flags |= SAFE_FLAGS_KEY;
384 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
385 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
386 #endif
387 }
388 if (devinfo & SAFE_DEVINFO_DES) {
389 printf(" des/3des");
390 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
391 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
392 }
393 if (devinfo & SAFE_DEVINFO_AES) {
394 printf(" aes");
395 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
396 }
397 if (devinfo & SAFE_DEVINFO_MD5) {
398 printf(" md5");
399 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
400 }
401 if (devinfo & SAFE_DEVINFO_SHA1) {
402 printf(" sha1");
403 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
404 }
405 printf(" null");
406 crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
407 crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
408 /* XXX other supported algorithms */
409 printf("\n");
410
411 safe_reset_board(sc); /* reset h/w */
412 safe_init_pciregs(dev); /* init pci settings */
413 safe_init_board(sc); /* init h/w */
414
415 #ifndef SAFE_NO_RNG
416 if (sc->sc_flags & SAFE_FLAGS_RNG) {
417 #ifdef SAFE_RNDTEST
418 sc->sc_rndtest = rndtest_attach(dev);
419 if (sc->sc_rndtest)
420 sc->sc_harvest = rndtest_harvest;
421 else
422 sc->sc_harvest = default_harvest;
423 #else
424 sc->sc_harvest = default_harvest;
425 #endif
426 safe_rng_init(sc);
427
428 callout_init(&sc->sc_rngto, CALLOUT_MPSAFE);
429 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc);
430 }
431 #endif /* SAFE_NO_RNG */
432 #ifdef SAFE_DEBUG
433 safec = sc; /* for use by hw.safe.dump */
434 #endif
435 return (0);
436 bad4:
437 crypto_unregister_all(sc->sc_cid);
438 bad3:
439 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
440 bad2:
441 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
442 bad1:
443 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
444 bad:
445 return (ENXIO);
446 }
447
448 /*
449 * Detach a device that successfully probed.
450 */
451 static int
452 safe_detach(device_t dev)
453 {
454 struct safe_softc *sc = device_get_softc(dev);
455
456 /* XXX wait/abort active ops */
457
458 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
459
460 callout_stop(&sc->sc_rngto);
461
462 crypto_unregister_all(sc->sc_cid);
463
464 #ifdef SAFE_RNDTEST
465 if (sc->sc_rndtest)
466 rndtest_detach(sc->sc_rndtest);
467 #endif
468
469 safe_cleanchip(sc);
470 safe_dma_free(sc, &sc->sc_dpalloc);
471 safe_dma_free(sc, &sc->sc_spalloc);
472 mtx_destroy(&sc->sc_ringmtx);
473 safe_dma_free(sc, &sc->sc_ringalloc);
474
475 bus_generic_detach(dev);
476 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
477 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
478
479 bus_dma_tag_destroy(sc->sc_srcdmat);
480 bus_dma_tag_destroy(sc->sc_dstdmat);
481 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
482
483 return (0);
484 }
485
486 /*
487 * Stop all chip i/o so that the kernel's probe routines don't
488 * get confused by errant DMAs when rebooting.
489 */
490 static int
491 safe_shutdown(device_t dev)
492 {
493 #ifdef notyet
494 safe_stop(device_get_softc(dev));
495 #endif
496 return (0);
497 }
498
499 /*
500 * Device suspend routine.
501 */
502 static int
503 safe_suspend(device_t dev)
504 {
505 struct safe_softc *sc = device_get_softc(dev);
506
507 #ifdef notyet
508 /* XXX stop the device and save PCI settings */
509 #endif
510 sc->sc_suspended = 1;
511
512 return (0);
513 }
514
515 static int
516 safe_resume(device_t dev)
517 {
518 struct safe_softc *sc = device_get_softc(dev);
519
520 #ifdef notyet
521 /* XXX retore PCI settings and start the device */
522 #endif
523 sc->sc_suspended = 0;
524 return (0);
525 }
526
527 /*
528 * SafeXcel Interrupt routine
529 */
530 static void
531 safe_intr(void *arg)
532 {
533 struct safe_softc *sc = arg;
534 volatile u_int32_t stat;
535
536 stat = READ_REG(sc, SAFE_HM_STAT);
537 if (stat == 0) /* shared irq, not for us */
538 return;
539
540 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
541
542 if ((stat & SAFE_INT_PE_DDONE)) {
543 /*
544 * Descriptor(s) done; scan the ring and
545 * process completed operations.
546 */
547 mtx_lock(&sc->sc_ringmtx);
548 while (sc->sc_back != sc->sc_front) {
549 struct safe_ringentry *re = sc->sc_back;
550 #ifdef SAFE_DEBUG
551 if (safe_debug) {
552 safe_dump_ringstate(sc, __func__);
553 safe_dump_request(sc, __func__, re);
554 }
555 #endif
556 /*
557 * safe_process marks ring entries that were allocated
558 * but not used with a csr of zero. This insures the
559 * ring front pointer never needs to be set backwards
560 * in the event that an entry is allocated but not used
561 * because of a setup error.
562 */
563 if (re->re_desc.d_csr != 0) {
564 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr))
565 break;
566 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len))
567 break;
568 sc->sc_nqchip--;
569 safe_callback(sc, re);
570 }
571 if (++(sc->sc_back) == sc->sc_ringtop)
572 sc->sc_back = sc->sc_ring;
573 }
574 mtx_unlock(&sc->sc_ringmtx);
575 }
576
577 /*
578 * Check to see if we got any DMA Error
579 */
580 if (stat & SAFE_INT_PE_ERROR) {
581 DPRINTF(("dmaerr dmastat %08x\n",
582 READ_REG(sc, SAFE_PE_DMASTAT)));
583 safestats.st_dmaerr++;
584 safe_totalreset(sc);
585 #if 0
586 safe_feed(sc);
587 #endif
588 }
589
590 if (sc->sc_needwakeup) { /* XXX check high watermark */
591 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
592 DPRINTF(("%s: wakeup crypto %x\n", __func__,
593 sc->sc_needwakeup));
594 sc->sc_needwakeup &= ~wakeup;
595 crypto_unblock(sc->sc_cid, wakeup);
596 }
597 }
598
599 /*
600 * safe_feed() - post a request to chip
601 */
602 static void
603 safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
604 {
605 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE);
606 if (re->re_dst_map != NULL)
607 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
608 BUS_DMASYNC_PREREAD);
609 /* XXX have no smaller granularity */
610 safe_dma_sync(&sc->sc_ringalloc,
611 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
612 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE);
613 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE);
614
615 #ifdef SAFE_DEBUG
616 if (safe_debug) {
617 safe_dump_ringstate(sc, __func__);
618 safe_dump_request(sc, __func__, re);
619 }
620 #endif
621 sc->sc_nqchip++;
622 if (sc->sc_nqchip > safestats.st_maxqchip)
623 safestats.st_maxqchip = sc->sc_nqchip;
624 /* poke h/w to check descriptor ring, any value can be written */
625 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
626 }
627
628 #define N(a) (sizeof(a) / sizeof (a[0]))
629 static void
630 safe_setup_enckey(struct safe_session *ses, caddr_t key)
631 {
632 int i;
633
634 bcopy(key, ses->ses_key, ses->ses_klen / 8);
635
636 /* PE is little-endian, insure proper byte order */
637 for (i = 0; i < N(ses->ses_key); i++)
638 ses->ses_key[i] = htole32(ses->ses_key[i]);
639 }
640
641 static void
642 safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
643 {
644 MD5_CTX md5ctx;
645 SHA1_CTX sha1ctx;
646 int i;
647
648
649 for (i = 0; i < klen; i++)
650 key[i] ^= HMAC_IPAD_VAL;
651
652 if (algo == CRYPTO_MD5_HMAC) {
653 MD5Init(&md5ctx);
654 MD5Update(&md5ctx, key, klen);
655 MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
656 bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state));
657 } else {
658 SHA1Init(&sha1ctx);
659 SHA1Update(&sha1ctx, key, klen);
660 SHA1Update(&sha1ctx, hmac_ipad_buffer,
661 SHA1_HMAC_BLOCK_LEN - klen);
662 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
663 }
664
665 for (i = 0; i < klen; i++)
666 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
667
668 if (algo == CRYPTO_MD5_HMAC) {
669 MD5Init(&md5ctx);
670 MD5Update(&md5ctx, key, klen);
671 MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
672 bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state));
673 } else {
674 SHA1Init(&sha1ctx);
675 SHA1Update(&sha1ctx, key, klen);
676 SHA1Update(&sha1ctx, hmac_opad_buffer,
677 SHA1_HMAC_BLOCK_LEN - klen);
678 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
679 }
680
681 for (i = 0; i < klen; i++)
682 key[i] ^= HMAC_OPAD_VAL;
683
684 /* PE is little-endian, insure proper byte order */
685 for (i = 0; i < N(ses->ses_hminner); i++) {
686 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
687 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
688 }
689 }
690 #undef N
691
692 /*
693 * Allocate a new 'session' and return an encoded session id. 'sidp'
694 * contains our registration id, and should contain an encoded session
695 * id on successful allocation.
696 */
697 static int
698 safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
699 {
700 struct safe_softc *sc = device_get_softc(dev);
701 struct cryptoini *c, *encini = NULL, *macini = NULL;
702 struct safe_session *ses = NULL;
703 int sesn;
704
705 if (sidp == NULL || cri == NULL || sc == NULL)
706 return (EINVAL);
707
708 for (c = cri; c != NULL; c = c->cri_next) {
709 if (c->cri_alg == CRYPTO_MD5_HMAC ||
710 c->cri_alg == CRYPTO_SHA1_HMAC ||
711 c->cri_alg == CRYPTO_NULL_HMAC) {
712 if (macini)
713 return (EINVAL);
714 macini = c;
715 } else if (c->cri_alg == CRYPTO_DES_CBC ||
716 c->cri_alg == CRYPTO_3DES_CBC ||
717 c->cri_alg == CRYPTO_AES_CBC ||
718 c->cri_alg == CRYPTO_NULL_CBC) {
719 if (encini)
720 return (EINVAL);
721 encini = c;
722 } else
723 return (EINVAL);
724 }
725 if (encini == NULL && macini == NULL)
726 return (EINVAL);
727 if (encini) { /* validate key length */
728 switch (encini->cri_alg) {
729 case CRYPTO_DES_CBC:
730 if (encini->cri_klen != 64)
731 return (EINVAL);
732 break;
733 case CRYPTO_3DES_CBC:
734 if (encini->cri_klen != 192)
735 return (EINVAL);
736 break;
737 case CRYPTO_AES_CBC:
738 if (encini->cri_klen != 128 &&
739 encini->cri_klen != 192 &&
740 encini->cri_klen != 256)
741 return (EINVAL);
742 break;
743 }
744 }
745
746 if (sc->sc_sessions == NULL) {
747 ses = sc->sc_sessions = (struct safe_session *)malloc(
748 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
749 if (ses == NULL)
750 return (ENOMEM);
751 sesn = 0;
752 sc->sc_nsessions = 1;
753 } else {
754 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
755 if (sc->sc_sessions[sesn].ses_used == 0) {
756 ses = &sc->sc_sessions[sesn];
757 break;
758 }
759 }
760
761 if (ses == NULL) {
762 sesn = sc->sc_nsessions;
763 ses = (struct safe_session *)malloc((sesn + 1) *
764 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
765 if (ses == NULL)
766 return (ENOMEM);
767 bcopy(sc->sc_sessions, ses, sesn *
768 sizeof(struct safe_session));
769 bzero(sc->sc_sessions, sesn *
770 sizeof(struct safe_session));
771 free(sc->sc_sessions, M_DEVBUF);
772 sc->sc_sessions = ses;
773 ses = &sc->sc_sessions[sesn];
774 sc->sc_nsessions++;
775 }
776 }
777
778 bzero(ses, sizeof(struct safe_session));
779 ses->ses_used = 1;
780
781 if (encini) {
782 /* get an IV */
783 /* XXX may read fewer than requested */
784 read_random(ses->ses_iv, sizeof(ses->ses_iv));
785
786 ses->ses_klen = encini->cri_klen;
787 if (encini->cri_key != NULL)
788 safe_setup_enckey(ses, encini->cri_key);
789 }
790
791 if (macini) {
792 ses->ses_mlen = macini->cri_mlen;
793 if (ses->ses_mlen == 0) {
794 if (macini->cri_alg == CRYPTO_MD5_HMAC)
795 ses->ses_mlen = MD5_HASH_LEN;
796 else
797 ses->ses_mlen = SHA1_HASH_LEN;
798 }
799
800 if (macini->cri_key != NULL) {
801 safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
802 macini->cri_klen / 8);
803 }
804 }
805
806 *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
807 return (0);
808 }
809
810 /*
811 * Deallocate a session.
812 */
813 static int
814 safe_freesession(device_t dev, u_int64_t tid)
815 {
816 struct safe_softc *sc = device_get_softc(dev);
817 int session, ret;
818 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
819
820 if (sc == NULL)
821 return (EINVAL);
822
823 session = SAFE_SESSION(sid);
824 if (session < sc->sc_nsessions) {
825 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
826 ret = 0;
827 } else
828 ret = EINVAL;
829 return (ret);
830 }
831
832 static void
833 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
834 {
835 struct safe_operand *op = arg;
836
837 DPRINTF(("%s: mapsize %u nsegs %d error %d\n", __func__,
838 (u_int) mapsize, nsegs, error));
839 if (error != 0)
840 return;
841 op->mapsize = mapsize;
842 op->nsegs = nsegs;
843 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
844 }
845
846 static int
847 safe_process(device_t dev, struct cryptop *crp, int hint)
848 {
849 struct safe_softc *sc = device_get_softc(dev);
850 int err = 0, i, nicealign, uniform;
851 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
852 int bypass, oplen, ivsize;
853 caddr_t iv;
854 int16_t coffset;
855 struct safe_session *ses;
856 struct safe_ringentry *re;
857 struct safe_sarec *sa;
858 struct safe_pdesc *pd;
859 u_int32_t cmd0, cmd1, staterec;
860
861 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
862 safestats.st_invalid++;
863 return (EINVAL);
864 }
865 if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
866 safestats.st_badsession++;
867 return (EINVAL);
868 }
869
870 mtx_lock(&sc->sc_ringmtx);
871 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
872 safestats.st_ringfull++;
873 sc->sc_needwakeup |= CRYPTO_SYMQ;
874 mtx_unlock(&sc->sc_ringmtx);
875 return (ERESTART);
876 }
877 re = sc->sc_front;
878
879 staterec = re->re_sa.sa_staterec; /* save */
880 /* NB: zero everything but the PE descriptor */
881 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
882 re->re_sa.sa_staterec = staterec; /* restore */
883
884 re->re_crp = crp;
885 re->re_sesn = SAFE_SESSION(crp->crp_sid);
886
887 if (crp->crp_flags & CRYPTO_F_IMBUF) {
888 re->re_src_m = (struct mbuf *)crp->crp_buf;
889 re->re_dst_m = (struct mbuf *)crp->crp_buf;
890 } else if (crp->crp_flags & CRYPTO_F_IOV) {
891 re->re_src_io = (struct uio *)crp->crp_buf;
892 re->re_dst_io = (struct uio *)crp->crp_buf;
893 } else {
894 safestats.st_badflags++;
895 err = EINVAL;
896 goto errout; /* XXX we don't handle contiguous blocks! */
897 }
898
899 sa = &re->re_sa;
900 ses = &sc->sc_sessions[re->re_sesn];
901
902 crd1 = crp->crp_desc;
903 if (crd1 == NULL) {
904 safestats.st_nodesc++;
905 err = EINVAL;
906 goto errout;
907 }
908 crd2 = crd1->crd_next;
909
910 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
911 cmd1 = 0;
912 if (crd2 == NULL) {
913 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
914 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
915 crd1->crd_alg == CRYPTO_NULL_HMAC) {
916 maccrd = crd1;
917 enccrd = NULL;
918 cmd0 |= SAFE_SA_CMD0_OP_HASH;
919 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
920 crd1->crd_alg == CRYPTO_3DES_CBC ||
921 crd1->crd_alg == CRYPTO_AES_CBC ||
922 crd1->crd_alg == CRYPTO_NULL_CBC) {
923 maccrd = NULL;
924 enccrd = crd1;
925 cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
926 } else {
927 safestats.st_badalg++;
928 err = EINVAL;
929 goto errout;
930 }
931 } else {
932 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
933 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
934 crd1->crd_alg == CRYPTO_NULL_HMAC) &&
935 (crd2->crd_alg == CRYPTO_DES_CBC ||
936 crd2->crd_alg == CRYPTO_3DES_CBC ||
937 crd2->crd_alg == CRYPTO_AES_CBC ||
938 crd2->crd_alg == CRYPTO_NULL_CBC) &&
939 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
940 maccrd = crd1;
941 enccrd = crd2;
942 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
943 crd1->crd_alg == CRYPTO_3DES_CBC ||
944 crd1->crd_alg == CRYPTO_AES_CBC ||
945 crd1->crd_alg == CRYPTO_NULL_CBC) &&
946 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
947 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
948 crd2->crd_alg == CRYPTO_NULL_HMAC) &&
949 (crd1->crd_flags & CRD_F_ENCRYPT)) {
950 enccrd = crd1;
951 maccrd = crd2;
952 } else {
953 safestats.st_badalg++;
954 err = EINVAL;
955 goto errout;
956 }
957 cmd0 |= SAFE_SA_CMD0_OP_BOTH;
958 }
959
960 if (enccrd) {
961 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
962 safe_setup_enckey(ses, enccrd->crd_key);
963
964 if (enccrd->crd_alg == CRYPTO_DES_CBC) {
965 cmd0 |= SAFE_SA_CMD0_DES;
966 cmd1 |= SAFE_SA_CMD1_CBC;
967 ivsize = 2*sizeof(u_int32_t);
968 } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
969 cmd0 |= SAFE_SA_CMD0_3DES;
970 cmd1 |= SAFE_SA_CMD1_CBC;
971 ivsize = 2*sizeof(u_int32_t);
972 } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
973 cmd0 |= SAFE_SA_CMD0_AES;
974 cmd1 |= SAFE_SA_CMD1_CBC;
975 if (ses->ses_klen == 128)
976 cmd1 |= SAFE_SA_CMD1_AES128;
977 else if (ses->ses_klen == 192)
978 cmd1 |= SAFE_SA_CMD1_AES192;
979 else
980 cmd1 |= SAFE_SA_CMD1_AES256;
981 ivsize = 4*sizeof(u_int32_t);
982 } else {
983 cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
984 ivsize = 0;
985 }
986
987 /*
988 * Setup encrypt/decrypt state. When using basic ops
989 * we can't use an inline IV because hash/crypt offset
990 * must be from the end of the IV to the start of the
991 * crypt data and this leaves out the preceding header
992 * from the hash calculation. Instead we place the IV
993 * in the state record and set the hash/crypt offset to
994 * copy both the header+IV.
995 */
996 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
997 cmd0 |= SAFE_SA_CMD0_OUTBOUND;
998
999 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1000 iv = enccrd->crd_iv;
1001 else
1002 iv = (caddr_t) ses->ses_iv;
1003 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1004 crypto_copyback(crp->crp_flags, crp->crp_buf,
1005 enccrd->crd_inject, ivsize, iv);
1006 }
1007 bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
1008 cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
1009 re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
1010 } else {
1011 cmd0 |= SAFE_SA_CMD0_INBOUND;
1012
1013 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
1014 bcopy(enccrd->crd_iv,
1015 re->re_sastate.sa_saved_iv, ivsize);
1016 } else {
1017 crypto_copydata(crp->crp_flags, crp->crp_buf,
1018 enccrd->crd_inject, ivsize,
1019 (caddr_t)re->re_sastate.sa_saved_iv);
1020 }
1021 cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
1022 }
1023 /*
1024 * For basic encryption use the zero pad algorithm.
1025 * This pads results to an 8-byte boundary and
1026 * suppresses padding verification for inbound (i.e.
1027 * decrypt) operations.
1028 *
1029 * NB: Not sure if the 8-byte pad boundary is a problem.
1030 */
1031 cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
1032
1033 /* XXX assert key bufs have the same size */
1034 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
1035 }
1036
1037 if (maccrd) {
1038 if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
1039 safe_setup_mackey(ses, maccrd->crd_alg,
1040 maccrd->crd_key, maccrd->crd_klen / 8);
1041 }
1042
1043 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
1044 cmd0 |= SAFE_SA_CMD0_MD5;
1045 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
1046 } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
1047 cmd0 |= SAFE_SA_CMD0_SHA1;
1048 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
1049 } else {
1050 cmd0 |= SAFE_SA_CMD0_HASH_NULL;
1051 }
1052 /*
1053 * Digest data is loaded from the SA and the hash
1054 * result is saved to the state block where we
1055 * retrieve it for return to the caller.
1056 */
1057 /* XXX assert digest bufs have the same size */
1058 bcopy(ses->ses_hminner, sa->sa_indigest,
1059 sizeof(sa->sa_indigest));
1060 bcopy(ses->ses_hmouter, sa->sa_outdigest,
1061 sizeof(sa->sa_outdigest));
1062
1063 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
1064 re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
1065 }
1066
1067 if (enccrd && maccrd) {
1068 /*
1069 * The offset from hash data to the start of
1070 * crypt data is the difference in the skips.
1071 */
1072 bypass = maccrd->crd_skip;
1073 coffset = enccrd->crd_skip - maccrd->crd_skip;
1074 if (coffset < 0) {
1075 DPRINTF(("%s: hash does not precede crypt; "
1076 "mac skip %u enc skip %u\n",
1077 __func__, maccrd->crd_skip, enccrd->crd_skip));
1078 safestats.st_skipmismatch++;
1079 err = EINVAL;
1080 goto errout;
1081 }
1082 oplen = enccrd->crd_skip + enccrd->crd_len;
1083 if (maccrd->crd_skip + maccrd->crd_len != oplen) {
1084 DPRINTF(("%s: hash amount %u != crypt amount %u\n",
1085 __func__, maccrd->crd_skip + maccrd->crd_len,
1086 oplen));
1087 safestats.st_lenmismatch++;
1088 err = EINVAL;
1089 goto errout;
1090 }
1091 #ifdef SAFE_DEBUG
1092 if (safe_debug) {
1093 printf("mac: skip %d, len %d, inject %d\n",
1094 maccrd->crd_skip, maccrd->crd_len,
1095 maccrd->crd_inject);
1096 printf("enc: skip %d, len %d, inject %d\n",
1097 enccrd->crd_skip, enccrd->crd_len,
1098 enccrd->crd_inject);
1099 printf("bypass %d coffset %d oplen %d\n",
1100 bypass, coffset, oplen);
1101 }
1102 #endif
1103 if (coffset & 3) { /* offset must be 32-bit aligned */
1104 DPRINTF(("%s: coffset %u misaligned\n",
1105 __func__, coffset));
1106 safestats.st_coffmisaligned++;
1107 err = EINVAL;
1108 goto errout;
1109 }
1110 coffset >>= 2;
1111 if (coffset > 255) { /* offset must be <256 dwords */
1112 DPRINTF(("%s: coffset %u too big\n",
1113 __func__, coffset));
1114 safestats.st_cofftoobig++;
1115 err = EINVAL;
1116 goto errout;
1117 }
1118 /*
1119 * Tell the hardware to copy the header to the output.
1120 * The header is defined as the data from the end of
1121 * the bypass to the start of data to be encrypted.
1122 * Typically this is the inline IV. Note that you need
1123 * to do this even if src+dst are the same; it appears
1124 * that w/o this bit the crypted data is written
1125 * immediately after the bypass data.
1126 */
1127 cmd1 |= SAFE_SA_CMD1_HDRCOPY;
1128 /*
1129 * Disable IP header mutable bit handling. This is
1130 * needed to get correct HMAC calculations.
1131 */
1132 cmd1 |= SAFE_SA_CMD1_MUTABLE;
1133 } else {
1134 if (enccrd) {
1135 bypass = enccrd->crd_skip;
1136 oplen = bypass + enccrd->crd_len;
1137 } else {
1138 bypass = maccrd->crd_skip;
1139 oplen = bypass + maccrd->crd_len;
1140 }
1141 coffset = 0;
1142 }
1143 /* XXX verify multiple of 4 when using s/g */
1144 if (bypass > 96) { /* bypass offset must be <= 96 bytes */
1145 DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
1146 safestats.st_bypasstoobig++;
1147 err = EINVAL;
1148 goto errout;
1149 }
1150
1151 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) {
1152 safestats.st_nomap++;
1153 err = ENOMEM;
1154 goto errout;
1155 }
1156 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1157 if (bus_dmamap_load_mbuf(sc->sc_srcdmat, re->re_src_map,
1158 re->re_src_m, safe_op_cb,
1159 &re->re_src, BUS_DMA_NOWAIT) != 0) {
1160 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1161 re->re_src_map = NULL;
1162 safestats.st_noload++;
1163 err = ENOMEM;
1164 goto errout;
1165 }
1166 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1167 if (bus_dmamap_load_uio(sc->sc_srcdmat, re->re_src_map,
1168 re->re_src_io, safe_op_cb,
1169 &re->re_src, BUS_DMA_NOWAIT) != 0) {
1170 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1171 re->re_src_map = NULL;
1172 safestats.st_noload++;
1173 err = ENOMEM;
1174 goto errout;
1175 }
1176 }
1177 nicealign = safe_dmamap_aligned(&re->re_src);
1178 uniform = safe_dmamap_uniform(&re->re_src);
1179
1180 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
1181 nicealign, uniform, re->re_src.nsegs));
1182 if (re->re_src.nsegs > 1) {
1183 re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
1184 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
1185 for (i = 0; i < re->re_src_nsegs; i++) {
1186 /* NB: no need to check if there's space */
1187 pd = sc->sc_spfree;
1188 if (++(sc->sc_spfree) == sc->sc_springtop)
1189 sc->sc_spfree = sc->sc_spring;
1190
1191 KASSERT((pd->pd_flags&3) == 0 ||
1192 (pd->pd_flags&3) == SAFE_PD_DONE,
1193 ("bogus source particle descriptor; flags %x",
1194 pd->pd_flags));
1195 pd->pd_addr = re->re_src_segs[i].ds_addr;
1196 pd->pd_size = re->re_src_segs[i].ds_len;
1197 pd->pd_flags = SAFE_PD_READY;
1198 }
1199 cmd0 |= SAFE_SA_CMD0_IGATHER;
1200 } else {
1201 /*
1202 * No need for gather, reference the operand directly.
1203 */
1204 re->re_desc.d_src = re->re_src_segs[0].ds_addr;
1205 }
1206
1207 if (enccrd == NULL && maccrd != NULL) {
1208 /*
1209 * Hash op; no destination needed.
1210 */
1211 } else {
1212 if (crp->crp_flags & CRYPTO_F_IOV) {
1213 if (!nicealign) {
1214 safestats.st_iovmisaligned++;
1215 err = EINVAL;
1216 goto errout;
1217 }
1218 if (uniform != 1) {
1219 /*
1220 * Source is not suitable for direct use as
1221 * the destination. Create a new scatter/gather
1222 * list based on the destination requirements
1223 * and check if that's ok.
1224 */
1225 if (bus_dmamap_create(sc->sc_dstdmat,
1226 BUS_DMA_NOWAIT, &re->re_dst_map)) {
1227 safestats.st_nomap++;
1228 err = ENOMEM;
1229 goto errout;
1230 }
1231 if (bus_dmamap_load_uio(sc->sc_dstdmat,
1232 re->re_dst_map, re->re_dst_io,
1233 safe_op_cb, &re->re_dst,
1234 BUS_DMA_NOWAIT) != 0) {
1235 bus_dmamap_destroy(sc->sc_dstdmat,
1236 re->re_dst_map);
1237 re->re_dst_map = NULL;
1238 safestats.st_noload++;
1239 err = ENOMEM;
1240 goto errout;
1241 }
1242 uniform = safe_dmamap_uniform(&re->re_dst);
1243 if (!uniform) {
1244 /*
1245 * There's no way to handle the DMA
1246 * requirements with this uio. We
1247 * could create a separate DMA area for
1248 * the result and then copy it back,
1249 * but for now we just bail and return
1250 * an error. Note that uio requests
1251 * > SAFE_MAX_DSIZE are handled because
1252 * the DMA map and segment list for the
1253 * destination wil result in a
1254 * destination particle list that does
1255 * the necessary scatter DMA.
1256 */
1257 safestats.st_iovnotuniform++;
1258 err = EINVAL;
1259 goto errout;
1260 }
1261 } else
1262 re->re_dst = re->re_src;
1263 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1264 if (nicealign && uniform == 1) {
1265 /*
1266 * Source layout is suitable for direct
1267 * sharing of the DMA map and segment list.
1268 */
1269 re->re_dst = re->re_src;
1270 } else if (nicealign && uniform == 2) {
1271 /*
1272 * The source is properly aligned but requires a
1273 * different particle list to handle DMA of the
1274 * result. Create a new map and do the load to
1275 * create the segment list. The particle
1276 * descriptor setup code below will handle the
1277 * rest.
1278 */
1279 if (bus_dmamap_create(sc->sc_dstdmat,
1280 BUS_DMA_NOWAIT, &re->re_dst_map)) {
1281 safestats.st_nomap++;
1282 err = ENOMEM;
1283 goto errout;
1284 }
1285 if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
1286 re->re_dst_map, re->re_dst_m,
1287 safe_op_cb, &re->re_dst,
1288 BUS_DMA_NOWAIT) != 0) {
1289 bus_dmamap_destroy(sc->sc_dstdmat,
1290 re->re_dst_map);
1291 re->re_dst_map = NULL;
1292 safestats.st_noload++;
1293 err = ENOMEM;
1294 goto errout;
1295 }
1296 } else { /* !(aligned and/or uniform) */
1297 int totlen, len;
1298 struct mbuf *m, *top, **mp;
1299
1300 /*
1301 * DMA constraints require that we allocate a
1302 * new mbuf chain for the destination. We
1303 * allocate an entire new set of mbufs of
1304 * optimal/required size and then tell the
1305 * hardware to copy any bits that are not
1306 * created as a byproduct of the operation.
1307 */
1308 if (!nicealign)
1309 safestats.st_unaligned++;
1310 if (!uniform)
1311 safestats.st_notuniform++;
1312 totlen = re->re_src_mapsize;
1313 if (re->re_src_m->m_flags & M_PKTHDR) {
1314 len = MHLEN;
1315 MGETHDR(m, M_NOWAIT, MT_DATA);
1316 if (m && !m_dup_pkthdr(m, re->re_src_m,
1317 M_NOWAIT)) {
1318 m_free(m);
1319 m = NULL;
1320 }
1321 } else {
1322 len = MLEN;
1323 MGET(m, M_NOWAIT, MT_DATA);
1324 }
1325 if (m == NULL) {
1326 safestats.st_nombuf++;
1327 err = sc->sc_nqchip ? ERESTART : ENOMEM;
1328 goto errout;
1329 }
1330 if (totlen >= MINCLSIZE) {
1331 MCLGET(m, M_NOWAIT);
1332 if ((m->m_flags & M_EXT) == 0) {
1333 m_free(m);
1334 safestats.st_nomcl++;
1335 err = sc->sc_nqchip ?
1336 ERESTART : ENOMEM;
1337 goto errout;
1338 }
1339 len = MCLBYTES;
1340 }
1341 m->m_len = len;
1342 top = NULL;
1343 mp = ⊤
1344
1345 while (totlen > 0) {
1346 if (top) {
1347 MGET(m, M_NOWAIT, MT_DATA);
1348 if (m == NULL) {
1349 m_freem(top);
1350 safestats.st_nombuf++;
1351 err = sc->sc_nqchip ?
1352 ERESTART : ENOMEM;
1353 goto errout;
1354 }
1355 len = MLEN;
1356 }
1357 if (top && totlen >= MINCLSIZE) {
1358 MCLGET(m, M_NOWAIT);
1359 if ((m->m_flags & M_EXT) == 0) {
1360 *mp = m;
1361 m_freem(top);
1362 safestats.st_nomcl++;
1363 err = sc->sc_nqchip ?
1364 ERESTART : ENOMEM;
1365 goto errout;
1366 }
1367 len = MCLBYTES;
1368 }
1369 m->m_len = len = min(totlen, len);
1370 totlen -= len;
1371 *mp = m;
1372 mp = &m->m_next;
1373 }
1374 re->re_dst_m = top;
1375 if (bus_dmamap_create(sc->sc_dstdmat,
1376 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) {
1377 safestats.st_nomap++;
1378 err = ENOMEM;
1379 goto errout;
1380 }
1381 if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
1382 re->re_dst_map, re->re_dst_m,
1383 safe_op_cb, &re->re_dst,
1384 BUS_DMA_NOWAIT) != 0) {
1385 bus_dmamap_destroy(sc->sc_dstdmat,
1386 re->re_dst_map);
1387 re->re_dst_map = NULL;
1388 safestats.st_noload++;
1389 err = ENOMEM;
1390 goto errout;
1391 }
1392 if (re->re_src.mapsize > oplen) {
1393 /*
1394 * There's data following what the
1395 * hardware will copy for us. If this
1396 * isn't just the ICV (that's going to
1397 * be written on completion), copy it
1398 * to the new mbufs
1399 */
1400 if (!(maccrd &&
1401 (re->re_src.mapsize-oplen) == 12 &&
1402 maccrd->crd_inject == oplen))
1403 safe_mcopy(re->re_src_m,
1404 re->re_dst_m,
1405 oplen);
1406 else
1407 safestats.st_noicvcopy++;
1408 }
1409 }
1410 } else {
1411 safestats.st_badflags++;
1412 err = EINVAL;
1413 goto errout;
1414 }
1415
1416 if (re->re_dst.nsegs > 1) {
1417 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
1418 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
1419 for (i = 0; i < re->re_dst_nsegs; i++) {
1420 pd = sc->sc_dpfree;
1421 KASSERT((pd->pd_flags&3) == 0 ||
1422 (pd->pd_flags&3) == SAFE_PD_DONE,
1423 ("bogus dest particle descriptor; flags %x",
1424 pd->pd_flags));
1425 if (++(sc->sc_dpfree) == sc->sc_dpringtop)
1426 sc->sc_dpfree = sc->sc_dpring;
1427 pd->pd_addr = re->re_dst_segs[i].ds_addr;
1428 pd->pd_flags = SAFE_PD_READY;
1429 }
1430 cmd0 |= SAFE_SA_CMD0_OSCATTER;
1431 } else {
1432 /*
1433 * No need for scatter, reference the operand directly.
1434 */
1435 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
1436 }
1437 }
1438
1439 /*
1440 * All done with setup; fillin the SA command words
1441 * and the packet engine descriptor. The operation
1442 * is now ready for submission to the hardware.
1443 */
1444 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
1445 sa->sa_cmd1 = cmd1
1446 | (coffset << SAFE_SA_CMD1_OFFSET_S)
1447 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
1448 | SAFE_SA_CMD1_SRPCI
1449 ;
1450 /*
1451 * NB: the order of writes is important here. In case the
1452 * chip is scanning the ring because of an outstanding request
1453 * it might nab this one too. In that case we need to make
1454 * sure the setup is complete before we write the length
1455 * field of the descriptor as it signals the descriptor is
1456 * ready for processing.
1457 */
1458 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
1459 if (maccrd)
1460 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
1461 re->re_desc.d_len = oplen
1462 | SAFE_PE_LEN_READY
1463 | (bypass << SAFE_PE_LEN_BYPASS_S)
1464 ;
1465
1466 safestats.st_ipackets++;
1467 safestats.st_ibytes += oplen;
1468
1469 if (++(sc->sc_front) == sc->sc_ringtop)
1470 sc->sc_front = sc->sc_ring;
1471
1472 /* XXX honor batching */
1473 safe_feed(sc, re);
1474 mtx_unlock(&sc->sc_ringmtx);
1475 return (0);
1476
1477 errout:
1478 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
1479 m_freem(re->re_dst_m);
1480
1481 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1482 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1483 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1484 }
1485 if (re->re_src_map != NULL) {
1486 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1487 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1488 }
1489 mtx_unlock(&sc->sc_ringmtx);
1490 if (err != ERESTART) {
1491 crp->crp_etype = err;
1492 crypto_done(crp);
1493 } else {
1494 sc->sc_needwakeup |= CRYPTO_SYMQ;
1495 }
1496 return (err);
1497 }
1498
1499 static void
1500 safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
1501 {
1502 struct cryptop *crp = (struct cryptop *)re->re_crp;
1503 struct cryptodesc *crd;
1504
1505 safestats.st_opackets++;
1506 safestats.st_obytes += re->re_dst.mapsize;
1507
1508 safe_dma_sync(&sc->sc_ringalloc,
1509 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1510 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
1511 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1512 re->re_desc.d_csr,
1513 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
1514 safestats.st_peoperr++;
1515 crp->crp_etype = EIO; /* something more meaningful? */
1516 }
1517 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1518 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
1519 BUS_DMASYNC_POSTREAD);
1520 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1521 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1522 }
1523 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE);
1524 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1525 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1526
1527 /*
1528 * If result was written to a differet mbuf chain, swap
1529 * it in as the return value and reclaim the original.
1530 */
1531 if ((crp->crp_flags & CRYPTO_F_IMBUF) && re->re_src_m != re->re_dst_m) {
1532 m_freem(re->re_src_m);
1533 crp->crp_buf = (caddr_t)re->re_dst_m;
1534 }
1535
1536 if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
1537 /* copy out IV for future use */
1538 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1539 int ivsize;
1540
1541 if (crd->crd_alg == CRYPTO_DES_CBC ||
1542 crd->crd_alg == CRYPTO_3DES_CBC) {
1543 ivsize = 2*sizeof(u_int32_t);
1544 } else if (crd->crd_alg == CRYPTO_AES_CBC) {
1545 ivsize = 4*sizeof(u_int32_t);
1546 } else
1547 continue;
1548 crypto_copydata(crp->crp_flags, crp->crp_buf,
1549 crd->crd_skip + crd->crd_len - ivsize, ivsize,
1550 (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
1551 break;
1552 }
1553 }
1554
1555 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
1556 /* copy out ICV result */
1557 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1558 if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
1559 crd->crd_alg == CRYPTO_SHA1_HMAC ||
1560 crd->crd_alg == CRYPTO_NULL_HMAC))
1561 continue;
1562 if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
1563 /*
1564 * SHA-1 ICV's are byte-swapped; fix 'em up
1565 * before copy them to their destination.
1566 */
1567 re->re_sastate.sa_saved_indigest[0] =
1568 bswap32(re->re_sastate.sa_saved_indigest[0]);
1569 re->re_sastate.sa_saved_indigest[1] =
1570 bswap32(re->re_sastate.sa_saved_indigest[1]);
1571 re->re_sastate.sa_saved_indigest[2] =
1572 bswap32(re->re_sastate.sa_saved_indigest[2]);
1573 }
1574 crypto_copyback(crp->crp_flags, crp->crp_buf,
1575 crd->crd_inject,
1576 sc->sc_sessions[re->re_sesn].ses_mlen,
1577 (caddr_t)re->re_sastate.sa_saved_indigest);
1578 break;
1579 }
1580 }
1581 crypto_done(crp);
1582 }
1583
1584 /*
1585 * Copy all data past offset from srcm to dstm.
1586 */
1587 static void
1588 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset)
1589 {
1590 u_int j, dlen, slen;
1591 caddr_t dptr, sptr;
1592
1593 /*
1594 * Advance src and dst to offset.
1595 */
1596 j = offset;
1597 while (j >= 0) {
1598 if (srcm->m_len > j)
1599 break;
1600 j -= srcm->m_len;
1601 srcm = srcm->m_next;
1602 if (srcm == NULL)
1603 return;
1604 }
1605 sptr = mtod(srcm, caddr_t) + j;
1606 slen = srcm->m_len - j;
1607
1608 j = offset;
1609 while (j >= 0) {
1610 if (dstm->m_len > j)
1611 break;
1612 j -= dstm->m_len;
1613 dstm = dstm->m_next;
1614 if (dstm == NULL)
1615 return;
1616 }
1617 dptr = mtod(dstm, caddr_t) + j;
1618 dlen = dstm->m_len - j;
1619
1620 /*
1621 * Copy everything that remains.
1622 */
1623 for (;;) {
1624 j = min(slen, dlen);
1625 bcopy(sptr, dptr, j);
1626 if (slen == j) {
1627 srcm = srcm->m_next;
1628 if (srcm == NULL)
1629 return;
1630 sptr = srcm->m_data;
1631 slen = srcm->m_len;
1632 } else
1633 sptr += j, slen -= j;
1634 if (dlen == j) {
1635 dstm = dstm->m_next;
1636 if (dstm == NULL)
1637 return;
1638 dptr = dstm->m_data;
1639 dlen = dstm->m_len;
1640 } else
1641 dptr += j, dlen -= j;
1642 }
1643 }
1644
1645 #ifndef SAFE_NO_RNG
1646 #define SAFE_RNG_MAXWAIT 1000
1647
1648 static void
1649 safe_rng_init(struct safe_softc *sc)
1650 {
1651 u_int32_t w, v;
1652 int i;
1653
1654 WRITE_REG(sc, SAFE_RNG_CTRL, 0);
1655 /* use default value according to the manual */
1656 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
1657 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1658
1659 /*
1660 * There is a bug in rev 1.0 of the 1140 that when the RNG
1661 * is brought out of reset the ready status flag does not
1662 * work until the RNG has finished its internal initialization.
1663 *
1664 * So in order to determine the device is through its
1665 * initialization we must read the data register, using the
1666 * status reg in the read in case it is initialized. Then read
1667 * the data register until it changes from the first read.
1668 * Once it changes read the data register until it changes
1669 * again. At this time the RNG is considered initialized.
1670 * This could take between 750ms - 1000ms in time.
1671 */
1672 i = 0;
1673 w = READ_REG(sc, SAFE_RNG_OUT);
1674 do {
1675 v = READ_REG(sc, SAFE_RNG_OUT);
1676 if (v != w) {
1677 w = v;
1678 break;
1679 }
1680 DELAY(10);
1681 } while (++i < SAFE_RNG_MAXWAIT);
1682
1683 /* Wait Until data changes again */
1684 i = 0;
1685 do {
1686 v = READ_REG(sc, SAFE_RNG_OUT);
1687 if (v != w)
1688 break;
1689 DELAY(10);
1690 } while (++i < SAFE_RNG_MAXWAIT);
1691 }
1692
1693 static __inline void
1694 safe_rng_disable_short_cycle(struct safe_softc *sc)
1695 {
1696 WRITE_REG(sc, SAFE_RNG_CTRL,
1697 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
1698 }
1699
1700 static __inline void
1701 safe_rng_enable_short_cycle(struct safe_softc *sc)
1702 {
1703 WRITE_REG(sc, SAFE_RNG_CTRL,
1704 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
1705 }
1706
1707 static __inline u_int32_t
1708 safe_rng_read(struct safe_softc *sc)
1709 {
1710 int i;
1711
1712 i = 0;
1713 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
1714 ;
1715 return READ_REG(sc, SAFE_RNG_OUT);
1716 }
1717
1718 static void
1719 safe_rng(void *arg)
1720 {
1721 struct safe_softc *sc = arg;
1722 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */
1723 u_int maxwords;
1724 int i;
1725
1726 safestats.st_rng++;
1727 /*
1728 * Fetch the next block of data.
1729 */
1730 maxwords = safe_rngbufsize;
1731 if (maxwords > SAFE_RNG_MAXBUFSIZ)
1732 maxwords = SAFE_RNG_MAXBUFSIZ;
1733 retry:
1734 for (i = 0; i < maxwords; i++)
1735 buf[i] = safe_rng_read(sc);
1736 /*
1737 * Check the comparator alarm count and reset the h/w if
1738 * it exceeds our threshold. This guards against the
1739 * hardware oscillators resonating with external signals.
1740 */
1741 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
1742 u_int32_t freq_inc, w;
1743
1744 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
1745 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
1746 safestats.st_rngalarm++;
1747 safe_rng_enable_short_cycle(sc);
1748 freq_inc = 18;
1749 for (i = 0; i < 64; i++) {
1750 w = READ_REG(sc, SAFE_RNG_CNFG);
1751 freq_inc = ((w + freq_inc) & 0x3fL);
1752 w = ((w & ~0x3fL) | freq_inc);
1753 WRITE_REG(sc, SAFE_RNG_CNFG, w);
1754
1755 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1756
1757 (void) safe_rng_read(sc);
1758 DELAY(25);
1759
1760 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
1761 safe_rng_disable_short_cycle(sc);
1762 goto retry;
1763 }
1764 freq_inc = 1;
1765 }
1766 safe_rng_disable_short_cycle(sc);
1767 } else
1768 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1769
1770 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t));
1771 callout_reset(&sc->sc_rngto,
1772 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc);
1773 }
1774 #endif /* SAFE_NO_RNG */
1775
1776 static void
1777 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1778 {
1779 bus_addr_t *paddr = (bus_addr_t*) arg;
1780 *paddr = segs->ds_addr;
1781 }
1782
1783 static int
1784 safe_dma_malloc(
1785 struct safe_softc *sc,
1786 bus_size_t size,
1787 struct safe_dma_alloc *dma,
1788 int mapflags
1789 )
1790 {
1791 int r;
1792
1793 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
1794 sizeof(u_int32_t), 0, /* alignment, bounds */
1795 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1796 BUS_SPACE_MAXADDR, /* highaddr */
1797 NULL, NULL, /* filter, filterarg */
1798 size, /* maxsize */
1799 1, /* nsegments */
1800 size, /* maxsegsize */
1801 BUS_DMA_ALLOCNOW, /* flags */
1802 NULL, NULL, /* locking */
1803 &dma->dma_tag);
1804 if (r != 0) {
1805 device_printf(sc->sc_dev, "safe_dma_malloc: "
1806 "bus_dma_tag_create failed; error %u\n", r);
1807 goto fail_0;
1808 }
1809
1810 r = bus_dmamap_create(dma->dma_tag, BUS_DMA_NOWAIT, &dma->dma_map);
1811 if (r != 0) {
1812 device_printf(sc->sc_dev, "safe_dma_malloc: "
1813 "bus_dmamap_create failed; error %u\n", r);
1814 goto fail_1;
1815 }
1816
1817 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1818 BUS_DMA_NOWAIT, &dma->dma_map);
1819 if (r != 0) {
1820 device_printf(sc->sc_dev, "safe_dma_malloc: "
1821 "bus_dmammem_alloc failed; size %zu, error %u\n",
1822 size, r);
1823 goto fail_2;
1824 }
1825
1826 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1827 size,
1828 safe_dmamap_cb,
1829 &dma->dma_paddr,
1830 mapflags | BUS_DMA_NOWAIT);
1831 if (r != 0) {
1832 device_printf(sc->sc_dev, "safe_dma_malloc: "
1833 "bus_dmamap_load failed; error %u\n", r);
1834 goto fail_3;
1835 }
1836
1837 dma->dma_size = size;
1838 return (0);
1839
1840 fail_3:
1841 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1842 fail_2:
1843 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1844 fail_1:
1845 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1846 bus_dma_tag_destroy(dma->dma_tag);
1847 fail_0:
1848 dma->dma_map = NULL;
1849 dma->dma_tag = NULL;
1850 return (r);
1851 }
1852
1853 static void
1854 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma)
1855 {
1856 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1857 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1858 bus_dmamap_destroy(dma->dma_tag, dma->dma_map);
1859 bus_dma_tag_destroy(dma->dma_tag);
1860 }
1861
1862 /*
1863 * Resets the board. Values in the regesters are left as is
1864 * from the reset (i.e. initial values are assigned elsewhere).
1865 */
1866 static void
1867 safe_reset_board(struct safe_softc *sc)
1868 {
1869 u_int32_t v;
1870 /*
1871 * Reset the device. The manual says no delay
1872 * is needed between marking and clearing reset.
1873 */
1874 v = READ_REG(sc, SAFE_PE_DMACFG) &~
1875 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
1876 SAFE_PE_DMACFG_SGRESET);
1877 WRITE_REG(sc, SAFE_PE_DMACFG, v
1878 | SAFE_PE_DMACFG_PERESET
1879 | SAFE_PE_DMACFG_PDRRESET
1880 | SAFE_PE_DMACFG_SGRESET);
1881 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1882 }
1883
1884 /*
1885 * Initialize registers we need to touch only once.
1886 */
1887 static void
1888 safe_init_board(struct safe_softc *sc)
1889 {
1890 u_int32_t v, dwords;
1891
1892 v = READ_REG(sc, SAFE_PE_DMACFG);
1893 v &=~ SAFE_PE_DMACFG_PEMODE;
1894 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
1895 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
1896 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
1897 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
1898 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
1899 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
1900 ;
1901 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1902 #if 0
1903 /* XXX select byte swap based on host byte order */
1904 WRITE_REG(sc, SAFE_ENDIAN, 0x1b);
1905 #endif
1906 if (sc->sc_chiprev == SAFE_REV(1,0)) {
1907 /*
1908 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
1909 * "target mode transfers" done while the chip is DMA'ing
1910 * >1020 bytes cause the hardware to lockup. To avoid this
1911 * we reduce the max PCI transfer size and use small source
1912 * particle descriptors (<= 256 bytes).
1913 */
1914 WRITE_REG(sc, SAFE_DMA_CFG, 256);
1915 device_printf(sc->sc_dev,
1916 "Reduce max DMA size to %u words for rev %u.%u WAR\n",
1917 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff,
1918 SAFE_REV_MAJ(sc->sc_chiprev),
1919 SAFE_REV_MIN(sc->sc_chiprev));
1920 }
1921
1922 /* NB: operands+results are overlaid */
1923 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
1924 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
1925 /*
1926 * Configure ring entry size and number of items in the ring.
1927 */
1928 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
1929 ("PE ring entry not 32-bit aligned!"));
1930 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
1931 WRITE_REG(sc, SAFE_PE_RINGCFG,
1932 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
1933 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
1934
1935 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
1936 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
1937 WRITE_REG(sc, SAFE_PE_PARTSIZE,
1938 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
1939 /*
1940 * NB: destination particles are fixed size. We use
1941 * an mbuf cluster and require all results go to
1942 * clusters or smaller.
1943 */
1944 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE);
1945
1946 /* it's now safe to enable PE mode, do it */
1947 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
1948
1949 /*
1950 * Configure hardware to use level-triggered interrupts and
1951 * to interrupt after each descriptor is processed.
1952 */
1953 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
1954 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
1955 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
1956 }
1957
1958 /*
1959 * Init PCI registers
1960 */
1961 static void
1962 safe_init_pciregs(device_t dev)
1963 {
1964 }
1965
1966 /*
1967 * Clean up after a chip crash.
1968 * It is assumed that the caller in splimp()
1969 */
1970 static void
1971 safe_cleanchip(struct safe_softc *sc)
1972 {
1973
1974 if (sc->sc_nqchip != 0) {
1975 struct safe_ringentry *re = sc->sc_back;
1976
1977 while (re != sc->sc_front) {
1978 if (re->re_desc.d_csr != 0)
1979 safe_free_entry(sc, re);
1980 if (++re == sc->sc_ringtop)
1981 re = sc->sc_ring;
1982 }
1983 sc->sc_back = re;
1984 sc->sc_nqchip = 0;
1985 }
1986 }
1987
1988 /*
1989 * free a safe_q
1990 * It is assumed that the caller is within splimp().
1991 */
1992 static int
1993 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
1994 {
1995 struct cryptop *crp;
1996
1997 /*
1998 * Free header MCR
1999 */
2000 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
2001 m_freem(re->re_dst_m);
2002
2003 crp = (struct cryptop *)re->re_crp;
2004
2005 re->re_desc.d_csr = 0;
2006
2007 crp->crp_etype = EFAULT;
2008 crypto_done(crp);
2009 return(0);
2010 }
2011
2012 /*
2013 * Routine to reset the chip and clean up.
2014 * It is assumed that the caller is in splimp()
2015 */
2016 static void
2017 safe_totalreset(struct safe_softc *sc)
2018 {
2019 safe_reset_board(sc);
2020 safe_init_board(sc);
2021 safe_cleanchip(sc);
2022 }
2023
2024 /*
2025 * Is the operand suitable aligned for direct DMA. Each
2026 * segment must be aligned on a 32-bit boundary and all
2027 * but the last segment must be a multiple of 4 bytes.
2028 */
2029 static int
2030 safe_dmamap_aligned(const struct safe_operand *op)
2031 {
2032 int i;
2033
2034 for (i = 0; i < op->nsegs; i++) {
2035 if (op->segs[i].ds_addr & 3)
2036 return (0);
2037 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
2038 return (0);
2039 }
2040 return (1);
2041 }
2042
2043 /*
2044 * Is the operand suitable for direct DMA as the destination
2045 * of an operation. The hardware requires that each ``particle''
2046 * but the last in an operation result have the same size. We
2047 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
2048 * 0 if some segment is not a multiple of of this size, 1 if all
2049 * segments are exactly this size, or 2 if segments are at worst
2050 * a multple of this size.
2051 */
2052 static int
2053 safe_dmamap_uniform(const struct safe_operand *op)
2054 {
2055 int result = 1;
2056
2057 if (op->nsegs > 0) {
2058 int i;
2059
2060 for (i = 0; i < op->nsegs-1; i++) {
2061 if (op->segs[i].ds_len % SAFE_MAX_DSIZE)
2062 return (0);
2063 if (op->segs[i].ds_len != SAFE_MAX_DSIZE)
2064 result = 2;
2065 }
2066 }
2067 return (result);
2068 }
2069
2070 #ifdef SAFE_DEBUG
2071 static void
2072 safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
2073 {
2074 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
2075 , tag
2076 , READ_REG(sc, SAFE_DMA_ENDIAN)
2077 , READ_REG(sc, SAFE_DMA_SRCADDR)
2078 , READ_REG(sc, SAFE_DMA_DSTADDR)
2079 , READ_REG(sc, SAFE_DMA_STAT)
2080 );
2081 }
2082
2083 static void
2084 safe_dump_intrstate(struct safe_softc *sc, const char *tag)
2085 {
2086 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
2087 , tag
2088 , READ_REG(sc, SAFE_HI_CFG)
2089 , READ_REG(sc, SAFE_HI_MASK)
2090 , READ_REG(sc, SAFE_HI_DESC_CNT)
2091 , READ_REG(sc, SAFE_HU_STAT)
2092 , READ_REG(sc, SAFE_HM_STAT)
2093 );
2094 }
2095
2096 static void
2097 safe_dump_ringstate(struct safe_softc *sc, const char *tag)
2098 {
2099 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
2100
2101 /* NB: assume caller has lock on ring */
2102 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
2103 tag,
2104 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
2105 (unsigned long)(sc->sc_back - sc->sc_ring),
2106 (unsigned long)(sc->sc_front - sc->sc_ring));
2107 }
2108
2109 static void
2110 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
2111 {
2112 int ix, nsegs;
2113
2114 ix = re - sc->sc_ring;
2115 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
2116 , tag
2117 , re, ix
2118 , re->re_desc.d_csr
2119 , re->re_desc.d_src
2120 , re->re_desc.d_dst
2121 , re->re_desc.d_sa
2122 , re->re_desc.d_len
2123 );
2124 if (re->re_src.nsegs > 1) {
2125 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
2126 sizeof(struct safe_pdesc);
2127 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
2128 printf(" spd[%u] %p: %p size %u flags %x"
2129 , ix, &sc->sc_spring[ix]
2130 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
2131 , sc->sc_spring[ix].pd_size
2132 , sc->sc_spring[ix].pd_flags
2133 );
2134 if (sc->sc_spring[ix].pd_size == 0)
2135 printf(" (zero!)");
2136 printf("\n");
2137 if (++ix == SAFE_TOTAL_SPART)
2138 ix = 0;
2139 }
2140 }
2141 if (re->re_dst.nsegs > 1) {
2142 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
2143 sizeof(struct safe_pdesc);
2144 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
2145 printf(" dpd[%u] %p: %p flags %x\n"
2146 , ix, &sc->sc_dpring[ix]
2147 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
2148 , sc->sc_dpring[ix].pd_flags
2149 );
2150 if (++ix == SAFE_TOTAL_DPART)
2151 ix = 0;
2152 }
2153 }
2154 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
2155 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
2156 printf("sa: key %x %x %x %x %x %x %x %x\n"
2157 , re->re_sa.sa_key[0]
2158 , re->re_sa.sa_key[1]
2159 , re->re_sa.sa_key[2]
2160 , re->re_sa.sa_key[3]
2161 , re->re_sa.sa_key[4]
2162 , re->re_sa.sa_key[5]
2163 , re->re_sa.sa_key[6]
2164 , re->re_sa.sa_key[7]
2165 );
2166 printf("sa: indigest %x %x %x %x %x\n"
2167 , re->re_sa.sa_indigest[0]
2168 , re->re_sa.sa_indigest[1]
2169 , re->re_sa.sa_indigest[2]
2170 , re->re_sa.sa_indigest[3]
2171 , re->re_sa.sa_indigest[4]
2172 );
2173 printf("sa: outdigest %x %x %x %x %x\n"
2174 , re->re_sa.sa_outdigest[0]
2175 , re->re_sa.sa_outdigest[1]
2176 , re->re_sa.sa_outdigest[2]
2177 , re->re_sa.sa_outdigest[3]
2178 , re->re_sa.sa_outdigest[4]
2179 );
2180 printf("sr: iv %x %x %x %x\n"
2181 , re->re_sastate.sa_saved_iv[0]
2182 , re->re_sastate.sa_saved_iv[1]
2183 , re->re_sastate.sa_saved_iv[2]
2184 , re->re_sastate.sa_saved_iv[3]
2185 );
2186 printf("sr: hashbc %u indigest %x %x %x %x %x\n"
2187 , re->re_sastate.sa_saved_hashbc
2188 , re->re_sastate.sa_saved_indigest[0]
2189 , re->re_sastate.sa_saved_indigest[1]
2190 , re->re_sastate.sa_saved_indigest[2]
2191 , re->re_sastate.sa_saved_indigest[3]
2192 , re->re_sastate.sa_saved_indigest[4]
2193 );
2194 }
2195
2196 static void
2197 safe_dump_ring(struct safe_softc *sc, const char *tag)
2198 {
2199 mtx_lock(&sc->sc_ringmtx);
2200 printf("\nSafeNet Ring State:\n");
2201 safe_dump_intrstate(sc, tag);
2202 safe_dump_dmastatus(sc, tag);
2203 safe_dump_ringstate(sc, tag);
2204 if (sc->sc_nqchip) {
2205 struct safe_ringentry *re = sc->sc_back;
2206 do {
2207 safe_dump_request(sc, tag, re);
2208 if (++re == sc->sc_ringtop)
2209 re = sc->sc_ring;
2210 } while (re != sc->sc_front);
2211 }
2212 mtx_unlock(&sc->sc_ringmtx);
2213 }
2214
2215 static int
2216 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS)
2217 {
2218 char dmode[64];
2219 int error;
2220
2221 strncpy(dmode, "", sizeof(dmode) - 1);
2222 dmode[sizeof(dmode) - 1] = '\0';
2223 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req);
2224
2225 if (error == 0 && req->newptr != NULL) {
2226 struct safe_softc *sc = safec;
2227
2228 if (!sc)
2229 return EINVAL;
2230 if (strncmp(dmode, "dma", 3) == 0)
2231 safe_dump_dmastatus(sc, "safe0");
2232 else if (strncmp(dmode, "int", 3) == 0)
2233 safe_dump_intrstate(sc, "safe0");
2234 else if (strncmp(dmode, "ring", 4) == 0)
2235 safe_dump_ring(sc, "safe0");
2236 else
2237 return EINVAL;
2238 }
2239 return error;
2240 }
2241 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, CTLTYPE_STRING | CTLFLAG_RW,
2242 0, 0, sysctl_hw_safe_dump, "A", "Dump driver state");
2243 #endif /* SAFE_DEBUG */
Cache object: d796becc13b9db217e2825a75da0ca3b
|