FreeBSD/Linux Kernel Cross Reference
sys/dev/safe/safe.c
1 /*-
2 * Copyright (c) 2003 Sam Leffler, Errno Consulting
3 * Copyright (c) 2003 Global Technology Associates, Inc.
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 */
27
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD: releng/11.2/sys/dev/safe/safe.c 331722 2018-03-29 02:50:57Z eadler $");
30
31 /*
32 * SafeNet SafeXcel-1141 hardware crypto accelerator
33 */
34 #include "opt_safe.h"
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/errno.h>
40 #include <sys/malloc.h>
41 #include <sys/kernel.h>
42 #include <sys/mbuf.h>
43 #include <sys/module.h>
44 #include <sys/lock.h>
45 #include <sys/mutex.h>
46 #include <sys/sysctl.h>
47 #include <sys/endian.h>
48
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51
52 #include <machine/bus.h>
53 #include <machine/resource.h>
54 #include <sys/bus.h>
55 #include <sys/rman.h>
56
57 #include <crypto/sha1.h>
58 #include <opencrypto/cryptodev.h>
59 #include <opencrypto/cryptosoft.h>
60 #include <sys/md5.h>
61 #include <sys/random.h>
62 #include <sys/kobj.h>
63
64 #include "cryptodev_if.h"
65
66 #include <dev/pci/pcivar.h>
67 #include <dev/pci/pcireg.h>
68
69 #ifdef SAFE_RNDTEST
70 #include <dev/rndtest/rndtest.h>
71 #endif
72 #include <dev/safe/safereg.h>
73 #include <dev/safe/safevar.h>
74
75 #ifndef bswap32
76 #define bswap32 NTOHL
77 #endif
78
79 /*
80 * Prototypes and count for the pci_device structure
81 */
82 static int safe_probe(device_t);
83 static int safe_attach(device_t);
84 static int safe_detach(device_t);
85 static int safe_suspend(device_t);
86 static int safe_resume(device_t);
87 static int safe_shutdown(device_t);
88
89 static int safe_newsession(device_t, u_int32_t *, struct cryptoini *);
90 static int safe_freesession(device_t, u_int64_t);
91 static int safe_process(device_t, struct cryptop *, int);
92
93 static device_method_t safe_methods[] = {
94 /* Device interface */
95 DEVMETHOD(device_probe, safe_probe),
96 DEVMETHOD(device_attach, safe_attach),
97 DEVMETHOD(device_detach, safe_detach),
98 DEVMETHOD(device_suspend, safe_suspend),
99 DEVMETHOD(device_resume, safe_resume),
100 DEVMETHOD(device_shutdown, safe_shutdown),
101
102 /* crypto device methods */
103 DEVMETHOD(cryptodev_newsession, safe_newsession),
104 DEVMETHOD(cryptodev_freesession,safe_freesession),
105 DEVMETHOD(cryptodev_process, safe_process),
106
107 DEVMETHOD_END
108 };
109 static driver_t safe_driver = {
110 "safe",
111 safe_methods,
112 sizeof (struct safe_softc)
113 };
114 static devclass_t safe_devclass;
115
116 DRIVER_MODULE(safe, pci, safe_driver, safe_devclass, 0, 0);
117 MODULE_DEPEND(safe, crypto, 1, 1, 1);
118 #ifdef SAFE_RNDTEST
119 MODULE_DEPEND(safe, rndtest, 1, 1, 1);
120 #endif
121
122 static void safe_intr(void *);
123 static void safe_callback(struct safe_softc *, struct safe_ringentry *);
124 static void safe_feed(struct safe_softc *, struct safe_ringentry *);
125 static void safe_mcopy(struct mbuf *, struct mbuf *, u_int);
126 #ifndef SAFE_NO_RNG
127 static void safe_rng_init(struct safe_softc *);
128 static void safe_rng(void *);
129 #endif /* SAFE_NO_RNG */
130 static int safe_dma_malloc(struct safe_softc *, bus_size_t,
131 struct safe_dma_alloc *, int);
132 #define safe_dma_sync(_dma, _flags) \
133 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags))
134 static void safe_dma_free(struct safe_softc *, struct safe_dma_alloc *);
135 static int safe_dmamap_aligned(const struct safe_operand *);
136 static int safe_dmamap_uniform(const struct safe_operand *);
137
138 static void safe_reset_board(struct safe_softc *);
139 static void safe_init_board(struct safe_softc *);
140 static void safe_init_pciregs(device_t dev);
141 static void safe_cleanchip(struct safe_softc *);
142 static void safe_totalreset(struct safe_softc *);
143
144 static int safe_free_entry(struct safe_softc *, struct safe_ringentry *);
145
146 static SYSCTL_NODE(_hw, OID_AUTO, safe, CTLFLAG_RD, 0,
147 "SafeNet driver parameters");
148
149 #ifdef SAFE_DEBUG
150 static void safe_dump_dmastatus(struct safe_softc *, const char *);
151 static void safe_dump_ringstate(struct safe_softc *, const char *);
152 static void safe_dump_intrstate(struct safe_softc *, const char *);
153 static void safe_dump_request(struct safe_softc *, const char *,
154 struct safe_ringentry *);
155
156 static struct safe_softc *safec; /* for use by hw.safe.dump */
157
158 static int safe_debug = 0;
159 SYSCTL_INT(_hw_safe, OID_AUTO, debug, CTLFLAG_RW, &safe_debug,
160 0, "control debugging msgs");
161 #define DPRINTF(_x) if (safe_debug) printf _x
162 #else
163 #define DPRINTF(_x)
164 #endif
165
166 #define READ_REG(sc,r) \
167 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
168
169 #define WRITE_REG(sc,reg,val) \
170 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
171
172 struct safe_stats safestats;
173 SYSCTL_STRUCT(_hw_safe, OID_AUTO, stats, CTLFLAG_RD, &safestats,
174 safe_stats, "driver statistics");
175 #ifndef SAFE_NO_RNG
176 static int safe_rnginterval = 1; /* poll once a second */
177 SYSCTL_INT(_hw_safe, OID_AUTO, rnginterval, CTLFLAG_RW, &safe_rnginterval,
178 0, "RNG polling interval (secs)");
179 static int safe_rngbufsize = 16; /* 64 bytes each poll */
180 SYSCTL_INT(_hw_safe, OID_AUTO, rngbufsize, CTLFLAG_RW, &safe_rngbufsize,
181 0, "RNG polling buffer size (32-bit words)");
182 static int safe_rngmaxalarm = 8; /* max alarms before reset */
183 SYSCTL_INT(_hw_safe, OID_AUTO, rngmaxalarm, CTLFLAG_RW, &safe_rngmaxalarm,
184 0, "RNG max alarms before reset");
185 #endif /* SAFE_NO_RNG */
186
187 static int
188 safe_probe(device_t dev)
189 {
190 if (pci_get_vendor(dev) == PCI_VENDOR_SAFENET &&
191 pci_get_device(dev) == PCI_PRODUCT_SAFEXCEL)
192 return (BUS_PROBE_DEFAULT);
193 return (ENXIO);
194 }
195
196 static const char*
197 safe_partname(struct safe_softc *sc)
198 {
199 /* XXX sprintf numbers when not decoded */
200 switch (pci_get_vendor(sc->sc_dev)) {
201 case PCI_VENDOR_SAFENET:
202 switch (pci_get_device(sc->sc_dev)) {
203 case PCI_PRODUCT_SAFEXCEL: return "SafeNet SafeXcel-1141";
204 }
205 return "SafeNet unknown-part";
206 }
207 return "Unknown-vendor unknown-part";
208 }
209
210 #ifndef SAFE_NO_RNG
211 static void
212 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
213 {
214 /* MarkM: FIX!! Check that this does not swamp the harvester! */
215 random_harvest_queue(buf, count, count*NBBY/2, RANDOM_PURE_SAFE);
216 }
217 #endif /* SAFE_NO_RNG */
218
219 static int
220 safe_attach(device_t dev)
221 {
222 struct safe_softc *sc = device_get_softc(dev);
223 u_int32_t raddr;
224 u_int32_t i, devinfo;
225 int rid;
226
227 bzero(sc, sizeof (*sc));
228 sc->sc_dev = dev;
229
230 /* XXX handle power management */
231
232 pci_enable_busmaster(dev);
233
234 /*
235 * Setup memory-mapping of PCI registers.
236 */
237 rid = BS_BAR;
238 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
239 RF_ACTIVE);
240 if (sc->sc_sr == NULL) {
241 device_printf(dev, "cannot map register space\n");
242 goto bad;
243 }
244 sc->sc_st = rman_get_bustag(sc->sc_sr);
245 sc->sc_sh = rman_get_bushandle(sc->sc_sr);
246
247 /*
248 * Arrange interrupt line.
249 */
250 rid = 0;
251 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
252 RF_SHAREABLE|RF_ACTIVE);
253 if (sc->sc_irq == NULL) {
254 device_printf(dev, "could not map interrupt\n");
255 goto bad1;
256 }
257 /*
258 * NB: Network code assumes we are blocked with splimp()
259 * so make sure the IRQ is mapped appropriately.
260 */
261 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
262 NULL, safe_intr, sc, &sc->sc_ih)) {
263 device_printf(dev, "could not establish interrupt\n");
264 goto bad2;
265 }
266
267 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
268 if (sc->sc_cid < 0) {
269 device_printf(dev, "could not get crypto driver id\n");
270 goto bad3;
271 }
272
273 sc->sc_chiprev = READ_REG(sc, SAFE_DEVINFO) &
274 (SAFE_DEVINFO_REV_MAJ | SAFE_DEVINFO_REV_MIN);
275
276 /*
277 * Setup DMA descriptor area.
278 */
279 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
280 1, /* alignment */
281 SAFE_DMA_BOUNDARY, /* boundary */
282 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
283 BUS_SPACE_MAXADDR, /* highaddr */
284 NULL, NULL, /* filter, filterarg */
285 SAFE_MAX_DMA, /* maxsize */
286 SAFE_MAX_PART, /* nsegments */
287 SAFE_MAX_SSIZE, /* maxsegsize */
288 BUS_DMA_ALLOCNOW, /* flags */
289 NULL, NULL, /* locking */
290 &sc->sc_srcdmat)) {
291 device_printf(dev, "cannot allocate DMA tag\n");
292 goto bad4;
293 }
294 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
295 1, /* alignment */
296 SAFE_MAX_DSIZE, /* boundary */
297 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
298 BUS_SPACE_MAXADDR, /* highaddr */
299 NULL, NULL, /* filter, filterarg */
300 SAFE_MAX_DMA, /* maxsize */
301 SAFE_MAX_PART, /* nsegments */
302 SAFE_MAX_DSIZE, /* maxsegsize */
303 BUS_DMA_ALLOCNOW, /* flags */
304 NULL, NULL, /* locking */
305 &sc->sc_dstdmat)) {
306 device_printf(dev, "cannot allocate DMA tag\n");
307 goto bad4;
308 }
309
310 /*
311 * Allocate packet engine descriptors.
312 */
313 if (safe_dma_malloc(sc,
314 SAFE_MAX_NQUEUE * sizeof (struct safe_ringentry),
315 &sc->sc_ringalloc, 0)) {
316 device_printf(dev, "cannot allocate PE descriptor ring\n");
317 bus_dma_tag_destroy(sc->sc_srcdmat);
318 goto bad4;
319 }
320 /*
321 * Hookup the static portion of all our data structures.
322 */
323 sc->sc_ring = (struct safe_ringentry *) sc->sc_ringalloc.dma_vaddr;
324 sc->sc_ringtop = sc->sc_ring + SAFE_MAX_NQUEUE;
325 sc->sc_front = sc->sc_ring;
326 sc->sc_back = sc->sc_ring;
327 raddr = sc->sc_ringalloc.dma_paddr;
328 bzero(sc->sc_ring, SAFE_MAX_NQUEUE * sizeof(struct safe_ringentry));
329 for (i = 0; i < SAFE_MAX_NQUEUE; i++) {
330 struct safe_ringentry *re = &sc->sc_ring[i];
331
332 re->re_desc.d_sa = raddr +
333 offsetof(struct safe_ringentry, re_sa);
334 re->re_sa.sa_staterec = raddr +
335 offsetof(struct safe_ringentry, re_sastate);
336
337 raddr += sizeof (struct safe_ringentry);
338 }
339 mtx_init(&sc->sc_ringmtx, device_get_nameunit(dev),
340 "packet engine ring", MTX_DEF);
341
342 /*
343 * Allocate scatter and gather particle descriptors.
344 */
345 if (safe_dma_malloc(sc, SAFE_TOTAL_SPART * sizeof (struct safe_pdesc),
346 &sc->sc_spalloc, 0)) {
347 device_printf(dev, "cannot allocate source particle "
348 "descriptor ring\n");
349 mtx_destroy(&sc->sc_ringmtx);
350 safe_dma_free(sc, &sc->sc_ringalloc);
351 bus_dma_tag_destroy(sc->sc_srcdmat);
352 goto bad4;
353 }
354 sc->sc_spring = (struct safe_pdesc *) sc->sc_spalloc.dma_vaddr;
355 sc->sc_springtop = sc->sc_spring + SAFE_TOTAL_SPART;
356 sc->sc_spfree = sc->sc_spring;
357 bzero(sc->sc_spring, SAFE_TOTAL_SPART * sizeof(struct safe_pdesc));
358
359 if (safe_dma_malloc(sc, SAFE_TOTAL_DPART * sizeof (struct safe_pdesc),
360 &sc->sc_dpalloc, 0)) {
361 device_printf(dev, "cannot allocate destination particle "
362 "descriptor ring\n");
363 mtx_destroy(&sc->sc_ringmtx);
364 safe_dma_free(sc, &sc->sc_spalloc);
365 safe_dma_free(sc, &sc->sc_ringalloc);
366 bus_dma_tag_destroy(sc->sc_dstdmat);
367 goto bad4;
368 }
369 sc->sc_dpring = (struct safe_pdesc *) sc->sc_dpalloc.dma_vaddr;
370 sc->sc_dpringtop = sc->sc_dpring + SAFE_TOTAL_DPART;
371 sc->sc_dpfree = sc->sc_dpring;
372 bzero(sc->sc_dpring, SAFE_TOTAL_DPART * sizeof(struct safe_pdesc));
373
374 device_printf(sc->sc_dev, "%s", safe_partname(sc));
375
376 devinfo = READ_REG(sc, SAFE_DEVINFO);
377 if (devinfo & SAFE_DEVINFO_RNG) {
378 sc->sc_flags |= SAFE_FLAGS_RNG;
379 printf(" rng");
380 }
381 if (devinfo & SAFE_DEVINFO_PKEY) {
382 #if 0
383 printf(" key");
384 sc->sc_flags |= SAFE_FLAGS_KEY;
385 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
386 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
387 #endif
388 }
389 if (devinfo & SAFE_DEVINFO_DES) {
390 printf(" des/3des");
391 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
392 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
393 }
394 if (devinfo & SAFE_DEVINFO_AES) {
395 printf(" aes");
396 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
397 }
398 if (devinfo & SAFE_DEVINFO_MD5) {
399 printf(" md5");
400 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
401 }
402 if (devinfo & SAFE_DEVINFO_SHA1) {
403 printf(" sha1");
404 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
405 }
406 printf(" null");
407 crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
408 crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
409 /* XXX other supported algorithms */
410 printf("\n");
411
412 safe_reset_board(sc); /* reset h/w */
413 safe_init_pciregs(dev); /* init pci settings */
414 safe_init_board(sc); /* init h/w */
415
416 #ifndef SAFE_NO_RNG
417 if (sc->sc_flags & SAFE_FLAGS_RNG) {
418 #ifdef SAFE_RNDTEST
419 sc->sc_rndtest = rndtest_attach(dev);
420 if (sc->sc_rndtest)
421 sc->sc_harvest = rndtest_harvest;
422 else
423 sc->sc_harvest = default_harvest;
424 #else
425 sc->sc_harvest = default_harvest;
426 #endif
427 safe_rng_init(sc);
428
429 callout_init(&sc->sc_rngto, 1);
430 callout_reset(&sc->sc_rngto, hz*safe_rnginterval, safe_rng, sc);
431 }
432 #endif /* SAFE_NO_RNG */
433 #ifdef SAFE_DEBUG
434 safec = sc; /* for use by hw.safe.dump */
435 #endif
436 return (0);
437 bad4:
438 crypto_unregister_all(sc->sc_cid);
439 bad3:
440 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
441 bad2:
442 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
443 bad1:
444 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
445 bad:
446 return (ENXIO);
447 }
448
449 /*
450 * Detach a device that successfully probed.
451 */
452 static int
453 safe_detach(device_t dev)
454 {
455 struct safe_softc *sc = device_get_softc(dev);
456
457 /* XXX wait/abort active ops */
458
459 WRITE_REG(sc, SAFE_HI_MASK, 0); /* disable interrupts */
460
461 callout_stop(&sc->sc_rngto);
462
463 crypto_unregister_all(sc->sc_cid);
464
465 #ifdef SAFE_RNDTEST
466 if (sc->sc_rndtest)
467 rndtest_detach(sc->sc_rndtest);
468 #endif
469
470 safe_cleanchip(sc);
471 safe_dma_free(sc, &sc->sc_dpalloc);
472 safe_dma_free(sc, &sc->sc_spalloc);
473 mtx_destroy(&sc->sc_ringmtx);
474 safe_dma_free(sc, &sc->sc_ringalloc);
475
476 bus_generic_detach(dev);
477 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
478 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
479
480 bus_dma_tag_destroy(sc->sc_srcdmat);
481 bus_dma_tag_destroy(sc->sc_dstdmat);
482 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
483
484 return (0);
485 }
486
487 /*
488 * Stop all chip i/o so that the kernel's probe routines don't
489 * get confused by errant DMAs when rebooting.
490 */
491 static int
492 safe_shutdown(device_t dev)
493 {
494 #ifdef notyet
495 safe_stop(device_get_softc(dev));
496 #endif
497 return (0);
498 }
499
500 /*
501 * Device suspend routine.
502 */
503 static int
504 safe_suspend(device_t dev)
505 {
506 struct safe_softc *sc = device_get_softc(dev);
507
508 #ifdef notyet
509 /* XXX stop the device and save PCI settings */
510 #endif
511 sc->sc_suspended = 1;
512
513 return (0);
514 }
515
516 static int
517 safe_resume(device_t dev)
518 {
519 struct safe_softc *sc = device_get_softc(dev);
520
521 #ifdef notyet
522 /* XXX retore PCI settings and start the device */
523 #endif
524 sc->sc_suspended = 0;
525 return (0);
526 }
527
528 /*
529 * SafeXcel Interrupt routine
530 */
531 static void
532 safe_intr(void *arg)
533 {
534 struct safe_softc *sc = arg;
535 volatile u_int32_t stat;
536
537 stat = READ_REG(sc, SAFE_HM_STAT);
538 if (stat == 0) /* shared irq, not for us */
539 return;
540
541 WRITE_REG(sc, SAFE_HI_CLR, stat); /* IACK */
542
543 if ((stat & SAFE_INT_PE_DDONE)) {
544 /*
545 * Descriptor(s) done; scan the ring and
546 * process completed operations.
547 */
548 mtx_lock(&sc->sc_ringmtx);
549 while (sc->sc_back != sc->sc_front) {
550 struct safe_ringentry *re = sc->sc_back;
551 #ifdef SAFE_DEBUG
552 if (safe_debug) {
553 safe_dump_ringstate(sc, __func__);
554 safe_dump_request(sc, __func__, re);
555 }
556 #endif
557 /*
558 * safe_process marks ring entries that were allocated
559 * but not used with a csr of zero. This insures the
560 * ring front pointer never needs to be set backwards
561 * in the event that an entry is allocated but not used
562 * because of a setup error.
563 */
564 if (re->re_desc.d_csr != 0) {
565 if (!SAFE_PE_CSR_IS_DONE(re->re_desc.d_csr))
566 break;
567 if (!SAFE_PE_LEN_IS_DONE(re->re_desc.d_len))
568 break;
569 sc->sc_nqchip--;
570 safe_callback(sc, re);
571 }
572 if (++(sc->sc_back) == sc->sc_ringtop)
573 sc->sc_back = sc->sc_ring;
574 }
575 mtx_unlock(&sc->sc_ringmtx);
576 }
577
578 /*
579 * Check to see if we got any DMA Error
580 */
581 if (stat & SAFE_INT_PE_ERROR) {
582 DPRINTF(("dmaerr dmastat %08x\n",
583 READ_REG(sc, SAFE_PE_DMASTAT)));
584 safestats.st_dmaerr++;
585 safe_totalreset(sc);
586 #if 0
587 safe_feed(sc);
588 #endif
589 }
590
591 if (sc->sc_needwakeup) { /* XXX check high watermark */
592 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
593 DPRINTF(("%s: wakeup crypto %x\n", __func__,
594 sc->sc_needwakeup));
595 sc->sc_needwakeup &= ~wakeup;
596 crypto_unblock(sc->sc_cid, wakeup);
597 }
598 }
599
600 /*
601 * safe_feed() - post a request to chip
602 */
603 static void
604 safe_feed(struct safe_softc *sc, struct safe_ringentry *re)
605 {
606 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_PREWRITE);
607 if (re->re_dst_map != NULL)
608 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
609 BUS_DMASYNC_PREREAD);
610 /* XXX have no smaller granularity */
611 safe_dma_sync(&sc->sc_ringalloc,
612 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
613 safe_dma_sync(&sc->sc_spalloc, BUS_DMASYNC_PREWRITE);
614 safe_dma_sync(&sc->sc_dpalloc, BUS_DMASYNC_PREWRITE);
615
616 #ifdef SAFE_DEBUG
617 if (safe_debug) {
618 safe_dump_ringstate(sc, __func__);
619 safe_dump_request(sc, __func__, re);
620 }
621 #endif
622 sc->sc_nqchip++;
623 if (sc->sc_nqchip > safestats.st_maxqchip)
624 safestats.st_maxqchip = sc->sc_nqchip;
625 /* poke h/w to check descriptor ring, any value can be written */
626 WRITE_REG(sc, SAFE_HI_RD_DESCR, 0);
627 }
628
629 #define N(a) (sizeof(a) / sizeof (a[0]))
630 static void
631 safe_setup_enckey(struct safe_session *ses, caddr_t key)
632 {
633 int i;
634
635 bcopy(key, ses->ses_key, ses->ses_klen / 8);
636
637 /* PE is little-endian, insure proper byte order */
638 for (i = 0; i < N(ses->ses_key); i++)
639 ses->ses_key[i] = htole32(ses->ses_key[i]);
640 }
641
642 static void
643 safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
644 {
645 MD5_CTX md5ctx;
646 SHA1_CTX sha1ctx;
647 int i;
648
649
650 for (i = 0; i < klen; i++)
651 key[i] ^= HMAC_IPAD_VAL;
652
653 if (algo == CRYPTO_MD5_HMAC) {
654 MD5Init(&md5ctx);
655 MD5Update(&md5ctx, key, klen);
656 MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
657 bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state));
658 } else {
659 SHA1Init(&sha1ctx);
660 SHA1Update(&sha1ctx, key, klen);
661 SHA1Update(&sha1ctx, hmac_ipad_buffer,
662 SHA1_HMAC_BLOCK_LEN - klen);
663 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
664 }
665
666 for (i = 0; i < klen; i++)
667 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
668
669 if (algo == CRYPTO_MD5_HMAC) {
670 MD5Init(&md5ctx);
671 MD5Update(&md5ctx, key, klen);
672 MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
673 bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state));
674 } else {
675 SHA1Init(&sha1ctx);
676 SHA1Update(&sha1ctx, key, klen);
677 SHA1Update(&sha1ctx, hmac_opad_buffer,
678 SHA1_HMAC_BLOCK_LEN - klen);
679 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
680 }
681
682 for (i = 0; i < klen; i++)
683 key[i] ^= HMAC_OPAD_VAL;
684
685 /* PE is little-endian, insure proper byte order */
686 for (i = 0; i < N(ses->ses_hminner); i++) {
687 ses->ses_hminner[i] = htole32(ses->ses_hminner[i]);
688 ses->ses_hmouter[i] = htole32(ses->ses_hmouter[i]);
689 }
690 }
691 #undef N
692
693 /*
694 * Allocate a new 'session' and return an encoded session id. 'sidp'
695 * contains our registration id, and should contain an encoded session
696 * id on successful allocation.
697 */
698 static int
699 safe_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
700 {
701 struct safe_softc *sc = device_get_softc(dev);
702 struct cryptoini *c, *encini = NULL, *macini = NULL;
703 struct safe_session *ses = NULL;
704 int sesn;
705
706 if (sidp == NULL || cri == NULL || sc == NULL)
707 return (EINVAL);
708
709 for (c = cri; c != NULL; c = c->cri_next) {
710 if (c->cri_alg == CRYPTO_MD5_HMAC ||
711 c->cri_alg == CRYPTO_SHA1_HMAC ||
712 c->cri_alg == CRYPTO_NULL_HMAC) {
713 if (macini)
714 return (EINVAL);
715 macini = c;
716 } else if (c->cri_alg == CRYPTO_DES_CBC ||
717 c->cri_alg == CRYPTO_3DES_CBC ||
718 c->cri_alg == CRYPTO_AES_CBC ||
719 c->cri_alg == CRYPTO_NULL_CBC) {
720 if (encini)
721 return (EINVAL);
722 encini = c;
723 } else
724 return (EINVAL);
725 }
726 if (encini == NULL && macini == NULL)
727 return (EINVAL);
728 if (encini) { /* validate key length */
729 switch (encini->cri_alg) {
730 case CRYPTO_DES_CBC:
731 if (encini->cri_klen != 64)
732 return (EINVAL);
733 break;
734 case CRYPTO_3DES_CBC:
735 if (encini->cri_klen != 192)
736 return (EINVAL);
737 break;
738 case CRYPTO_AES_CBC:
739 if (encini->cri_klen != 128 &&
740 encini->cri_klen != 192 &&
741 encini->cri_klen != 256)
742 return (EINVAL);
743 break;
744 }
745 }
746
747 if (sc->sc_sessions == NULL) {
748 ses = sc->sc_sessions = (struct safe_session *)malloc(
749 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
750 if (ses == NULL)
751 return (ENOMEM);
752 sesn = 0;
753 sc->sc_nsessions = 1;
754 } else {
755 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
756 if (sc->sc_sessions[sesn].ses_used == 0) {
757 ses = &sc->sc_sessions[sesn];
758 break;
759 }
760 }
761
762 if (ses == NULL) {
763 sesn = sc->sc_nsessions;
764 ses = (struct safe_session *)malloc((sesn + 1) *
765 sizeof(struct safe_session), M_DEVBUF, M_NOWAIT);
766 if (ses == NULL)
767 return (ENOMEM);
768 bcopy(sc->sc_sessions, ses, sesn *
769 sizeof(struct safe_session));
770 bzero(sc->sc_sessions, sesn *
771 sizeof(struct safe_session));
772 free(sc->sc_sessions, M_DEVBUF);
773 sc->sc_sessions = ses;
774 ses = &sc->sc_sessions[sesn];
775 sc->sc_nsessions++;
776 }
777 }
778
779 bzero(ses, sizeof(struct safe_session));
780 ses->ses_used = 1;
781
782 if (encini) {
783 /* get an IV */
784 /* XXX may read fewer than requested */
785 read_random(ses->ses_iv, sizeof(ses->ses_iv));
786
787 ses->ses_klen = encini->cri_klen;
788 if (encini->cri_key != NULL)
789 safe_setup_enckey(ses, encini->cri_key);
790 }
791
792 if (macini) {
793 ses->ses_mlen = macini->cri_mlen;
794 if (ses->ses_mlen == 0) {
795 if (macini->cri_alg == CRYPTO_MD5_HMAC)
796 ses->ses_mlen = MD5_HASH_LEN;
797 else
798 ses->ses_mlen = SHA1_HASH_LEN;
799 }
800
801 if (macini->cri_key != NULL) {
802 safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
803 macini->cri_klen / 8);
804 }
805 }
806
807 *sidp = SAFE_SID(device_get_unit(sc->sc_dev), sesn);
808 return (0);
809 }
810
811 /*
812 * Deallocate a session.
813 */
814 static int
815 safe_freesession(device_t dev, u_int64_t tid)
816 {
817 struct safe_softc *sc = device_get_softc(dev);
818 int session, ret;
819 u_int32_t sid = ((u_int32_t) tid) & 0xffffffff;
820
821 if (sc == NULL)
822 return (EINVAL);
823
824 session = SAFE_SESSION(sid);
825 if (session < sc->sc_nsessions) {
826 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
827 ret = 0;
828 } else
829 ret = EINVAL;
830 return (ret);
831 }
832
833 static void
834 safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
835 {
836 struct safe_operand *op = arg;
837
838 DPRINTF(("%s: mapsize %u nsegs %d error %d\n", __func__,
839 (u_int) mapsize, nsegs, error));
840 if (error != 0)
841 return;
842 op->mapsize = mapsize;
843 op->nsegs = nsegs;
844 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
845 }
846
847 static int
848 safe_process(device_t dev, struct cryptop *crp, int hint)
849 {
850 struct safe_softc *sc = device_get_softc(dev);
851 int err = 0, i, nicealign, uniform;
852 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
853 int bypass, oplen, ivsize;
854 caddr_t iv;
855 int16_t coffset;
856 struct safe_session *ses;
857 struct safe_ringentry *re;
858 struct safe_sarec *sa;
859 struct safe_pdesc *pd;
860 u_int32_t cmd0, cmd1, staterec;
861
862 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
863 safestats.st_invalid++;
864 return (EINVAL);
865 }
866 if (SAFE_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
867 safestats.st_badsession++;
868 return (EINVAL);
869 }
870
871 mtx_lock(&sc->sc_ringmtx);
872 if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
873 safestats.st_ringfull++;
874 sc->sc_needwakeup |= CRYPTO_SYMQ;
875 mtx_unlock(&sc->sc_ringmtx);
876 return (ERESTART);
877 }
878 re = sc->sc_front;
879
880 staterec = re->re_sa.sa_staterec; /* save */
881 /* NB: zero everything but the PE descriptor */
882 bzero(&re->re_sa, sizeof(struct safe_ringentry) - sizeof(re->re_desc));
883 re->re_sa.sa_staterec = staterec; /* restore */
884
885 re->re_crp = crp;
886 re->re_sesn = SAFE_SESSION(crp->crp_sid);
887
888 if (crp->crp_flags & CRYPTO_F_IMBUF) {
889 re->re_src_m = (struct mbuf *)crp->crp_buf;
890 re->re_dst_m = (struct mbuf *)crp->crp_buf;
891 } else if (crp->crp_flags & CRYPTO_F_IOV) {
892 re->re_src_io = (struct uio *)crp->crp_buf;
893 re->re_dst_io = (struct uio *)crp->crp_buf;
894 } else {
895 safestats.st_badflags++;
896 err = EINVAL;
897 goto errout; /* XXX we don't handle contiguous blocks! */
898 }
899
900 sa = &re->re_sa;
901 ses = &sc->sc_sessions[re->re_sesn];
902
903 crd1 = crp->crp_desc;
904 if (crd1 == NULL) {
905 safestats.st_nodesc++;
906 err = EINVAL;
907 goto errout;
908 }
909 crd2 = crd1->crd_next;
910
911 cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
912 cmd1 = 0;
913 if (crd2 == NULL) {
914 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
915 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
916 crd1->crd_alg == CRYPTO_NULL_HMAC) {
917 maccrd = crd1;
918 enccrd = NULL;
919 cmd0 |= SAFE_SA_CMD0_OP_HASH;
920 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
921 crd1->crd_alg == CRYPTO_3DES_CBC ||
922 crd1->crd_alg == CRYPTO_AES_CBC ||
923 crd1->crd_alg == CRYPTO_NULL_CBC) {
924 maccrd = NULL;
925 enccrd = crd1;
926 cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
927 } else {
928 safestats.st_badalg++;
929 err = EINVAL;
930 goto errout;
931 }
932 } else {
933 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
934 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
935 crd1->crd_alg == CRYPTO_NULL_HMAC) &&
936 (crd2->crd_alg == CRYPTO_DES_CBC ||
937 crd2->crd_alg == CRYPTO_3DES_CBC ||
938 crd2->crd_alg == CRYPTO_AES_CBC ||
939 crd2->crd_alg == CRYPTO_NULL_CBC) &&
940 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
941 maccrd = crd1;
942 enccrd = crd2;
943 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
944 crd1->crd_alg == CRYPTO_3DES_CBC ||
945 crd1->crd_alg == CRYPTO_AES_CBC ||
946 crd1->crd_alg == CRYPTO_NULL_CBC) &&
947 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
948 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
949 crd2->crd_alg == CRYPTO_NULL_HMAC) &&
950 (crd1->crd_flags & CRD_F_ENCRYPT)) {
951 enccrd = crd1;
952 maccrd = crd2;
953 } else {
954 safestats.st_badalg++;
955 err = EINVAL;
956 goto errout;
957 }
958 cmd0 |= SAFE_SA_CMD0_OP_BOTH;
959 }
960
961 if (enccrd) {
962 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
963 safe_setup_enckey(ses, enccrd->crd_key);
964
965 if (enccrd->crd_alg == CRYPTO_DES_CBC) {
966 cmd0 |= SAFE_SA_CMD0_DES;
967 cmd1 |= SAFE_SA_CMD1_CBC;
968 ivsize = 2*sizeof(u_int32_t);
969 } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
970 cmd0 |= SAFE_SA_CMD0_3DES;
971 cmd1 |= SAFE_SA_CMD1_CBC;
972 ivsize = 2*sizeof(u_int32_t);
973 } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
974 cmd0 |= SAFE_SA_CMD0_AES;
975 cmd1 |= SAFE_SA_CMD1_CBC;
976 if (ses->ses_klen == 128)
977 cmd1 |= SAFE_SA_CMD1_AES128;
978 else if (ses->ses_klen == 192)
979 cmd1 |= SAFE_SA_CMD1_AES192;
980 else
981 cmd1 |= SAFE_SA_CMD1_AES256;
982 ivsize = 4*sizeof(u_int32_t);
983 } else {
984 cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
985 ivsize = 0;
986 }
987
988 /*
989 * Setup encrypt/decrypt state. When using basic ops
990 * we can't use an inline IV because hash/crypt offset
991 * must be from the end of the IV to the start of the
992 * crypt data and this leaves out the preceding header
993 * from the hash calculation. Instead we place the IV
994 * in the state record and set the hash/crypt offset to
995 * copy both the header+IV.
996 */
997 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
998 cmd0 |= SAFE_SA_CMD0_OUTBOUND;
999
1000 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1001 iv = enccrd->crd_iv;
1002 else
1003 iv = (caddr_t) ses->ses_iv;
1004 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1005 crypto_copyback(crp->crp_flags, crp->crp_buf,
1006 enccrd->crd_inject, ivsize, iv);
1007 }
1008 bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
1009 cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
1010 re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
1011 } else {
1012 cmd0 |= SAFE_SA_CMD0_INBOUND;
1013
1014 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
1015 bcopy(enccrd->crd_iv,
1016 re->re_sastate.sa_saved_iv, ivsize);
1017 } else {
1018 crypto_copydata(crp->crp_flags, crp->crp_buf,
1019 enccrd->crd_inject, ivsize,
1020 (caddr_t)re->re_sastate.sa_saved_iv);
1021 }
1022 cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
1023 }
1024 /*
1025 * For basic encryption use the zero pad algorithm.
1026 * This pads results to an 8-byte boundary and
1027 * suppresses padding verification for inbound (i.e.
1028 * decrypt) operations.
1029 *
1030 * NB: Not sure if the 8-byte pad boundary is a problem.
1031 */
1032 cmd0 |= SAFE_SA_CMD0_PAD_ZERO;
1033
1034 /* XXX assert key bufs have the same size */
1035 bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
1036 }
1037
1038 if (maccrd) {
1039 if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
1040 safe_setup_mackey(ses, maccrd->crd_alg,
1041 maccrd->crd_key, maccrd->crd_klen / 8);
1042 }
1043
1044 if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
1045 cmd0 |= SAFE_SA_CMD0_MD5;
1046 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
1047 } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
1048 cmd0 |= SAFE_SA_CMD0_SHA1;
1049 cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
1050 } else {
1051 cmd0 |= SAFE_SA_CMD0_HASH_NULL;
1052 }
1053 /*
1054 * Digest data is loaded from the SA and the hash
1055 * result is saved to the state block where we
1056 * retrieve it for return to the caller.
1057 */
1058 /* XXX assert digest bufs have the same size */
1059 bcopy(ses->ses_hminner, sa->sa_indigest,
1060 sizeof(sa->sa_indigest));
1061 bcopy(ses->ses_hmouter, sa->sa_outdigest,
1062 sizeof(sa->sa_outdigest));
1063
1064 cmd0 |= SAFE_SA_CMD0_HSLD_SA | SAFE_SA_CMD0_SAVEHASH;
1065 re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
1066 }
1067
1068 if (enccrd && maccrd) {
1069 /*
1070 * The offset from hash data to the start of
1071 * crypt data is the difference in the skips.
1072 */
1073 bypass = maccrd->crd_skip;
1074 coffset = enccrd->crd_skip - maccrd->crd_skip;
1075 if (coffset < 0) {
1076 DPRINTF(("%s: hash does not precede crypt; "
1077 "mac skip %u enc skip %u\n",
1078 __func__, maccrd->crd_skip, enccrd->crd_skip));
1079 safestats.st_skipmismatch++;
1080 err = EINVAL;
1081 goto errout;
1082 }
1083 oplen = enccrd->crd_skip + enccrd->crd_len;
1084 if (maccrd->crd_skip + maccrd->crd_len != oplen) {
1085 DPRINTF(("%s: hash amount %u != crypt amount %u\n",
1086 __func__, maccrd->crd_skip + maccrd->crd_len,
1087 oplen));
1088 safestats.st_lenmismatch++;
1089 err = EINVAL;
1090 goto errout;
1091 }
1092 #ifdef SAFE_DEBUG
1093 if (safe_debug) {
1094 printf("mac: skip %d, len %d, inject %d\n",
1095 maccrd->crd_skip, maccrd->crd_len,
1096 maccrd->crd_inject);
1097 printf("enc: skip %d, len %d, inject %d\n",
1098 enccrd->crd_skip, enccrd->crd_len,
1099 enccrd->crd_inject);
1100 printf("bypass %d coffset %d oplen %d\n",
1101 bypass, coffset, oplen);
1102 }
1103 #endif
1104 if (coffset & 3) { /* offset must be 32-bit aligned */
1105 DPRINTF(("%s: coffset %u misaligned\n",
1106 __func__, coffset));
1107 safestats.st_coffmisaligned++;
1108 err = EINVAL;
1109 goto errout;
1110 }
1111 coffset >>= 2;
1112 if (coffset > 255) { /* offset must be <256 dwords */
1113 DPRINTF(("%s: coffset %u too big\n",
1114 __func__, coffset));
1115 safestats.st_cofftoobig++;
1116 err = EINVAL;
1117 goto errout;
1118 }
1119 /*
1120 * Tell the hardware to copy the header to the output.
1121 * The header is defined as the data from the end of
1122 * the bypass to the start of data to be encrypted.
1123 * Typically this is the inline IV. Note that you need
1124 * to do this even if src+dst are the same; it appears
1125 * that w/o this bit the crypted data is written
1126 * immediately after the bypass data.
1127 */
1128 cmd1 |= SAFE_SA_CMD1_HDRCOPY;
1129 /*
1130 * Disable IP header mutable bit handling. This is
1131 * needed to get correct HMAC calculations.
1132 */
1133 cmd1 |= SAFE_SA_CMD1_MUTABLE;
1134 } else {
1135 if (enccrd) {
1136 bypass = enccrd->crd_skip;
1137 oplen = bypass + enccrd->crd_len;
1138 } else {
1139 bypass = maccrd->crd_skip;
1140 oplen = bypass + maccrd->crd_len;
1141 }
1142 coffset = 0;
1143 }
1144 /* XXX verify multiple of 4 when using s/g */
1145 if (bypass > 96) { /* bypass offset must be <= 96 bytes */
1146 DPRINTF(("%s: bypass %u too big\n", __func__, bypass));
1147 safestats.st_bypasstoobig++;
1148 err = EINVAL;
1149 goto errout;
1150 }
1151
1152 if (bus_dmamap_create(sc->sc_srcdmat, BUS_DMA_NOWAIT, &re->re_src_map)) {
1153 safestats.st_nomap++;
1154 err = ENOMEM;
1155 goto errout;
1156 }
1157 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1158 if (bus_dmamap_load_mbuf(sc->sc_srcdmat, re->re_src_map,
1159 re->re_src_m, safe_op_cb,
1160 &re->re_src, BUS_DMA_NOWAIT) != 0) {
1161 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1162 re->re_src_map = NULL;
1163 safestats.st_noload++;
1164 err = ENOMEM;
1165 goto errout;
1166 }
1167 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1168 if (bus_dmamap_load_uio(sc->sc_srcdmat, re->re_src_map,
1169 re->re_src_io, safe_op_cb,
1170 &re->re_src, BUS_DMA_NOWAIT) != 0) {
1171 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1172 re->re_src_map = NULL;
1173 safestats.st_noload++;
1174 err = ENOMEM;
1175 goto errout;
1176 }
1177 }
1178 nicealign = safe_dmamap_aligned(&re->re_src);
1179 uniform = safe_dmamap_uniform(&re->re_src);
1180
1181 DPRINTF(("src nicealign %u uniform %u nsegs %u\n",
1182 nicealign, uniform, re->re_src.nsegs));
1183 if (re->re_src.nsegs > 1) {
1184 re->re_desc.d_src = sc->sc_spalloc.dma_paddr +
1185 ((caddr_t) sc->sc_spfree - (caddr_t) sc->sc_spring);
1186 for (i = 0; i < re->re_src_nsegs; i++) {
1187 /* NB: no need to check if there's space */
1188 pd = sc->sc_spfree;
1189 if (++(sc->sc_spfree) == sc->sc_springtop)
1190 sc->sc_spfree = sc->sc_spring;
1191
1192 KASSERT((pd->pd_flags&3) == 0 ||
1193 (pd->pd_flags&3) == SAFE_PD_DONE,
1194 ("bogus source particle descriptor; flags %x",
1195 pd->pd_flags));
1196 pd->pd_addr = re->re_src_segs[i].ds_addr;
1197 pd->pd_size = re->re_src_segs[i].ds_len;
1198 pd->pd_flags = SAFE_PD_READY;
1199 }
1200 cmd0 |= SAFE_SA_CMD0_IGATHER;
1201 } else {
1202 /*
1203 * No need for gather, reference the operand directly.
1204 */
1205 re->re_desc.d_src = re->re_src_segs[0].ds_addr;
1206 }
1207
1208 if (enccrd == NULL && maccrd != NULL) {
1209 /*
1210 * Hash op; no destination needed.
1211 */
1212 } else {
1213 if (crp->crp_flags & CRYPTO_F_IOV) {
1214 if (!nicealign) {
1215 safestats.st_iovmisaligned++;
1216 err = EINVAL;
1217 goto errout;
1218 }
1219 if (uniform != 1) {
1220 /*
1221 * Source is not suitable for direct use as
1222 * the destination. Create a new scatter/gather
1223 * list based on the destination requirements
1224 * and check if that's ok.
1225 */
1226 if (bus_dmamap_create(sc->sc_dstdmat,
1227 BUS_DMA_NOWAIT, &re->re_dst_map)) {
1228 safestats.st_nomap++;
1229 err = ENOMEM;
1230 goto errout;
1231 }
1232 if (bus_dmamap_load_uio(sc->sc_dstdmat,
1233 re->re_dst_map, re->re_dst_io,
1234 safe_op_cb, &re->re_dst,
1235 BUS_DMA_NOWAIT) != 0) {
1236 bus_dmamap_destroy(sc->sc_dstdmat,
1237 re->re_dst_map);
1238 re->re_dst_map = NULL;
1239 safestats.st_noload++;
1240 err = ENOMEM;
1241 goto errout;
1242 }
1243 uniform = safe_dmamap_uniform(&re->re_dst);
1244 if (!uniform) {
1245 /*
1246 * There's no way to handle the DMA
1247 * requirements with this uio. We
1248 * could create a separate DMA area for
1249 * the result and then copy it back,
1250 * but for now we just bail and return
1251 * an error. Note that uio requests
1252 * > SAFE_MAX_DSIZE are handled because
1253 * the DMA map and segment list for the
1254 * destination wil result in a
1255 * destination particle list that does
1256 * the necessary scatter DMA.
1257 */
1258 safestats.st_iovnotuniform++;
1259 err = EINVAL;
1260 goto errout;
1261 }
1262 } else
1263 re->re_dst = re->re_src;
1264 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1265 if (nicealign && uniform == 1) {
1266 /*
1267 * Source layout is suitable for direct
1268 * sharing of the DMA map and segment list.
1269 */
1270 re->re_dst = re->re_src;
1271 } else if (nicealign && uniform == 2) {
1272 /*
1273 * The source is properly aligned but requires a
1274 * different particle list to handle DMA of the
1275 * result. Create a new map and do the load to
1276 * create the segment list. The particle
1277 * descriptor setup code below will handle the
1278 * rest.
1279 */
1280 if (bus_dmamap_create(sc->sc_dstdmat,
1281 BUS_DMA_NOWAIT, &re->re_dst_map)) {
1282 safestats.st_nomap++;
1283 err = ENOMEM;
1284 goto errout;
1285 }
1286 if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
1287 re->re_dst_map, re->re_dst_m,
1288 safe_op_cb, &re->re_dst,
1289 BUS_DMA_NOWAIT) != 0) {
1290 bus_dmamap_destroy(sc->sc_dstdmat,
1291 re->re_dst_map);
1292 re->re_dst_map = NULL;
1293 safestats.st_noload++;
1294 err = ENOMEM;
1295 goto errout;
1296 }
1297 } else { /* !(aligned and/or uniform) */
1298 int totlen, len;
1299 struct mbuf *m, *top, **mp;
1300
1301 /*
1302 * DMA constraints require that we allocate a
1303 * new mbuf chain for the destination. We
1304 * allocate an entire new set of mbufs of
1305 * optimal/required size and then tell the
1306 * hardware to copy any bits that are not
1307 * created as a byproduct of the operation.
1308 */
1309 if (!nicealign)
1310 safestats.st_unaligned++;
1311 if (!uniform)
1312 safestats.st_notuniform++;
1313 totlen = re->re_src_mapsize;
1314 if (re->re_src_m->m_flags & M_PKTHDR) {
1315 len = MHLEN;
1316 MGETHDR(m, M_NOWAIT, MT_DATA);
1317 if (m && !m_dup_pkthdr(m, re->re_src_m,
1318 M_NOWAIT)) {
1319 m_free(m);
1320 m = NULL;
1321 }
1322 } else {
1323 len = MLEN;
1324 MGET(m, M_NOWAIT, MT_DATA);
1325 }
1326 if (m == NULL) {
1327 safestats.st_nombuf++;
1328 err = sc->sc_nqchip ? ERESTART : ENOMEM;
1329 goto errout;
1330 }
1331 if (totlen >= MINCLSIZE) {
1332 if (!(MCLGET(m, M_NOWAIT))) {
1333 m_free(m);
1334 safestats.st_nomcl++;
1335 err = sc->sc_nqchip ?
1336 ERESTART : ENOMEM;
1337 goto errout;
1338 }
1339 len = MCLBYTES;
1340 }
1341 m->m_len = len;
1342 top = NULL;
1343 mp = ⊤
1344
1345 while (totlen > 0) {
1346 if (top) {
1347 MGET(m, M_NOWAIT, MT_DATA);
1348 if (m == NULL) {
1349 m_freem(top);
1350 safestats.st_nombuf++;
1351 err = sc->sc_nqchip ?
1352 ERESTART : ENOMEM;
1353 goto errout;
1354 }
1355 len = MLEN;
1356 }
1357 if (top && totlen >= MINCLSIZE) {
1358 if (!(MCLGET(m, M_NOWAIT))) {
1359 *mp = m;
1360 m_freem(top);
1361 safestats.st_nomcl++;
1362 err = sc->sc_nqchip ?
1363 ERESTART : ENOMEM;
1364 goto errout;
1365 }
1366 len = MCLBYTES;
1367 }
1368 m->m_len = len = min(totlen, len);
1369 totlen -= len;
1370 *mp = m;
1371 mp = &m->m_next;
1372 }
1373 re->re_dst_m = top;
1374 if (bus_dmamap_create(sc->sc_dstdmat,
1375 BUS_DMA_NOWAIT, &re->re_dst_map) != 0) {
1376 safestats.st_nomap++;
1377 err = ENOMEM;
1378 goto errout;
1379 }
1380 if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
1381 re->re_dst_map, re->re_dst_m,
1382 safe_op_cb, &re->re_dst,
1383 BUS_DMA_NOWAIT) != 0) {
1384 bus_dmamap_destroy(sc->sc_dstdmat,
1385 re->re_dst_map);
1386 re->re_dst_map = NULL;
1387 safestats.st_noload++;
1388 err = ENOMEM;
1389 goto errout;
1390 }
1391 if (re->re_src.mapsize > oplen) {
1392 /*
1393 * There's data following what the
1394 * hardware will copy for us. If this
1395 * isn't just the ICV (that's going to
1396 * be written on completion), copy it
1397 * to the new mbufs
1398 */
1399 if (!(maccrd &&
1400 (re->re_src.mapsize-oplen) == 12 &&
1401 maccrd->crd_inject == oplen))
1402 safe_mcopy(re->re_src_m,
1403 re->re_dst_m,
1404 oplen);
1405 else
1406 safestats.st_noicvcopy++;
1407 }
1408 }
1409 } else {
1410 safestats.st_badflags++;
1411 err = EINVAL;
1412 goto errout;
1413 }
1414
1415 if (re->re_dst.nsegs > 1) {
1416 re->re_desc.d_dst = sc->sc_dpalloc.dma_paddr +
1417 ((caddr_t) sc->sc_dpfree - (caddr_t) sc->sc_dpring);
1418 for (i = 0; i < re->re_dst_nsegs; i++) {
1419 pd = sc->sc_dpfree;
1420 KASSERT((pd->pd_flags&3) == 0 ||
1421 (pd->pd_flags&3) == SAFE_PD_DONE,
1422 ("bogus dest particle descriptor; flags %x",
1423 pd->pd_flags));
1424 if (++(sc->sc_dpfree) == sc->sc_dpringtop)
1425 sc->sc_dpfree = sc->sc_dpring;
1426 pd->pd_addr = re->re_dst_segs[i].ds_addr;
1427 pd->pd_flags = SAFE_PD_READY;
1428 }
1429 cmd0 |= SAFE_SA_CMD0_OSCATTER;
1430 } else {
1431 /*
1432 * No need for scatter, reference the operand directly.
1433 */
1434 re->re_desc.d_dst = re->re_dst_segs[0].ds_addr;
1435 }
1436 }
1437
1438 /*
1439 * All done with setup; fillin the SA command words
1440 * and the packet engine descriptor. The operation
1441 * is now ready for submission to the hardware.
1442 */
1443 sa->sa_cmd0 = cmd0 | SAFE_SA_CMD0_IPCI | SAFE_SA_CMD0_OPCI;
1444 sa->sa_cmd1 = cmd1
1445 | (coffset << SAFE_SA_CMD1_OFFSET_S)
1446 | SAFE_SA_CMD1_SAREV1 /* Rev 1 SA data structure */
1447 | SAFE_SA_CMD1_SRPCI
1448 ;
1449 /*
1450 * NB: the order of writes is important here. In case the
1451 * chip is scanning the ring because of an outstanding request
1452 * it might nab this one too. In that case we need to make
1453 * sure the setup is complete before we write the length
1454 * field of the descriptor as it signals the descriptor is
1455 * ready for processing.
1456 */
1457 re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
1458 if (maccrd)
1459 re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
1460 re->re_desc.d_len = oplen
1461 | SAFE_PE_LEN_READY
1462 | (bypass << SAFE_PE_LEN_BYPASS_S)
1463 ;
1464
1465 safestats.st_ipackets++;
1466 safestats.st_ibytes += oplen;
1467
1468 if (++(sc->sc_front) == sc->sc_ringtop)
1469 sc->sc_front = sc->sc_ring;
1470
1471 /* XXX honor batching */
1472 safe_feed(sc, re);
1473 mtx_unlock(&sc->sc_ringmtx);
1474 return (0);
1475
1476 errout:
1477 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
1478 m_freem(re->re_dst_m);
1479
1480 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1481 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1482 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1483 }
1484 if (re->re_src_map != NULL) {
1485 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1486 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1487 }
1488 mtx_unlock(&sc->sc_ringmtx);
1489 if (err != ERESTART) {
1490 crp->crp_etype = err;
1491 crypto_done(crp);
1492 } else {
1493 sc->sc_needwakeup |= CRYPTO_SYMQ;
1494 }
1495 return (err);
1496 }
1497
1498 static void
1499 safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
1500 {
1501 struct cryptop *crp = (struct cryptop *)re->re_crp;
1502 struct cryptodesc *crd;
1503
1504 safestats.st_opackets++;
1505 safestats.st_obytes += re->re_dst.mapsize;
1506
1507 safe_dma_sync(&sc->sc_ringalloc,
1508 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1509 if (re->re_desc.d_csr & SAFE_PE_CSR_STATUS) {
1510 device_printf(sc->sc_dev, "csr 0x%x cmd0 0x%x cmd1 0x%x\n",
1511 re->re_desc.d_csr,
1512 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1);
1513 safestats.st_peoperr++;
1514 crp->crp_etype = EIO; /* something more meaningful? */
1515 }
1516 if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
1517 bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
1518 BUS_DMASYNC_POSTREAD);
1519 bus_dmamap_unload(sc->sc_dstdmat, re->re_dst_map);
1520 bus_dmamap_destroy(sc->sc_dstdmat, re->re_dst_map);
1521 }
1522 bus_dmamap_sync(sc->sc_srcdmat, re->re_src_map, BUS_DMASYNC_POSTWRITE);
1523 bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
1524 bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
1525
1526 /*
1527 * If result was written to a differet mbuf chain, swap
1528 * it in as the return value and reclaim the original.
1529 */
1530 if ((crp->crp_flags & CRYPTO_F_IMBUF) && re->re_src_m != re->re_dst_m) {
1531 m_freem(re->re_src_m);
1532 crp->crp_buf = (caddr_t)re->re_dst_m;
1533 }
1534
1535 if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
1536 /* copy out IV for future use */
1537 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1538 int ivsize;
1539
1540 if (crd->crd_alg == CRYPTO_DES_CBC ||
1541 crd->crd_alg == CRYPTO_3DES_CBC) {
1542 ivsize = 2*sizeof(u_int32_t);
1543 } else if (crd->crd_alg == CRYPTO_AES_CBC) {
1544 ivsize = 4*sizeof(u_int32_t);
1545 } else
1546 continue;
1547 crypto_copydata(crp->crp_flags, crp->crp_buf,
1548 crd->crd_skip + crd->crd_len - ivsize, ivsize,
1549 (caddr_t)sc->sc_sessions[re->re_sesn].ses_iv);
1550 break;
1551 }
1552 }
1553
1554 if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
1555 /* copy out ICV result */
1556 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1557 if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
1558 crd->crd_alg == CRYPTO_SHA1_HMAC ||
1559 crd->crd_alg == CRYPTO_NULL_HMAC))
1560 continue;
1561 if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
1562 /*
1563 * SHA-1 ICV's are byte-swapped; fix 'em up
1564 * before copy them to their destination.
1565 */
1566 re->re_sastate.sa_saved_indigest[0] =
1567 bswap32(re->re_sastate.sa_saved_indigest[0]);
1568 re->re_sastate.sa_saved_indigest[1] =
1569 bswap32(re->re_sastate.sa_saved_indigest[1]);
1570 re->re_sastate.sa_saved_indigest[2] =
1571 bswap32(re->re_sastate.sa_saved_indigest[2]);
1572 }
1573 crypto_copyback(crp->crp_flags, crp->crp_buf,
1574 crd->crd_inject,
1575 sc->sc_sessions[re->re_sesn].ses_mlen,
1576 (caddr_t)re->re_sastate.sa_saved_indigest);
1577 break;
1578 }
1579 }
1580 crypto_done(crp);
1581 }
1582
1583 /*
1584 * Copy all data past offset from srcm to dstm.
1585 */
1586 static void
1587 safe_mcopy(struct mbuf *srcm, struct mbuf *dstm, u_int offset)
1588 {
1589 u_int j, dlen, slen;
1590 caddr_t dptr, sptr;
1591
1592 /*
1593 * Advance src and dst to offset.
1594 */
1595 j = offset;
1596 while (j >= 0) {
1597 if (srcm->m_len > j)
1598 break;
1599 j -= srcm->m_len;
1600 srcm = srcm->m_next;
1601 if (srcm == NULL)
1602 return;
1603 }
1604 sptr = mtod(srcm, caddr_t) + j;
1605 slen = srcm->m_len - j;
1606
1607 j = offset;
1608 while (j >= 0) {
1609 if (dstm->m_len > j)
1610 break;
1611 j -= dstm->m_len;
1612 dstm = dstm->m_next;
1613 if (dstm == NULL)
1614 return;
1615 }
1616 dptr = mtod(dstm, caddr_t) + j;
1617 dlen = dstm->m_len - j;
1618
1619 /*
1620 * Copy everything that remains.
1621 */
1622 for (;;) {
1623 j = min(slen, dlen);
1624 bcopy(sptr, dptr, j);
1625 if (slen == j) {
1626 srcm = srcm->m_next;
1627 if (srcm == NULL)
1628 return;
1629 sptr = srcm->m_data;
1630 slen = srcm->m_len;
1631 } else
1632 sptr += j, slen -= j;
1633 if (dlen == j) {
1634 dstm = dstm->m_next;
1635 if (dstm == NULL)
1636 return;
1637 dptr = dstm->m_data;
1638 dlen = dstm->m_len;
1639 } else
1640 dptr += j, dlen -= j;
1641 }
1642 }
1643
1644 #ifndef SAFE_NO_RNG
1645 #define SAFE_RNG_MAXWAIT 1000
1646
1647 static void
1648 safe_rng_init(struct safe_softc *sc)
1649 {
1650 u_int32_t w, v;
1651 int i;
1652
1653 WRITE_REG(sc, SAFE_RNG_CTRL, 0);
1654 /* use default value according to the manual */
1655 WRITE_REG(sc, SAFE_RNG_CNFG, 0x834); /* magic from SafeNet */
1656 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1657
1658 /*
1659 * There is a bug in rev 1.0 of the 1140 that when the RNG
1660 * is brought out of reset the ready status flag does not
1661 * work until the RNG has finished its internal initialization.
1662 *
1663 * So in order to determine the device is through its
1664 * initialization we must read the data register, using the
1665 * status reg in the read in case it is initialized. Then read
1666 * the data register until it changes from the first read.
1667 * Once it changes read the data register until it changes
1668 * again. At this time the RNG is considered initialized.
1669 * This could take between 750ms - 1000ms in time.
1670 */
1671 i = 0;
1672 w = READ_REG(sc, SAFE_RNG_OUT);
1673 do {
1674 v = READ_REG(sc, SAFE_RNG_OUT);
1675 if (v != w) {
1676 w = v;
1677 break;
1678 }
1679 DELAY(10);
1680 } while (++i < SAFE_RNG_MAXWAIT);
1681
1682 /* Wait Until data changes again */
1683 i = 0;
1684 do {
1685 v = READ_REG(sc, SAFE_RNG_OUT);
1686 if (v != w)
1687 break;
1688 DELAY(10);
1689 } while (++i < SAFE_RNG_MAXWAIT);
1690 }
1691
1692 static __inline void
1693 safe_rng_disable_short_cycle(struct safe_softc *sc)
1694 {
1695 WRITE_REG(sc, SAFE_RNG_CTRL,
1696 READ_REG(sc, SAFE_RNG_CTRL) &~ SAFE_RNG_CTRL_SHORTEN);
1697 }
1698
1699 static __inline void
1700 safe_rng_enable_short_cycle(struct safe_softc *sc)
1701 {
1702 WRITE_REG(sc, SAFE_RNG_CTRL,
1703 READ_REG(sc, SAFE_RNG_CTRL) | SAFE_RNG_CTRL_SHORTEN);
1704 }
1705
1706 static __inline u_int32_t
1707 safe_rng_read(struct safe_softc *sc)
1708 {
1709 int i;
1710
1711 i = 0;
1712 while (READ_REG(sc, SAFE_RNG_STAT) != 0 && ++i < SAFE_RNG_MAXWAIT)
1713 ;
1714 return READ_REG(sc, SAFE_RNG_OUT);
1715 }
1716
1717 static void
1718 safe_rng(void *arg)
1719 {
1720 struct safe_softc *sc = arg;
1721 u_int32_t buf[SAFE_RNG_MAXBUFSIZ]; /* NB: maybe move to softc */
1722 u_int maxwords;
1723 int i;
1724
1725 safestats.st_rng++;
1726 /*
1727 * Fetch the next block of data.
1728 */
1729 maxwords = safe_rngbufsize;
1730 if (maxwords > SAFE_RNG_MAXBUFSIZ)
1731 maxwords = SAFE_RNG_MAXBUFSIZ;
1732 retry:
1733 for (i = 0; i < maxwords; i++)
1734 buf[i] = safe_rng_read(sc);
1735 /*
1736 * Check the comparator alarm count and reset the h/w if
1737 * it exceeds our threshold. This guards against the
1738 * hardware oscillators resonating with external signals.
1739 */
1740 if (READ_REG(sc, SAFE_RNG_ALM_CNT) > safe_rngmaxalarm) {
1741 u_int32_t freq_inc, w;
1742
1743 DPRINTF(("%s: alarm count %u exceeds threshold %u\n", __func__,
1744 READ_REG(sc, SAFE_RNG_ALM_CNT), safe_rngmaxalarm));
1745 safestats.st_rngalarm++;
1746 safe_rng_enable_short_cycle(sc);
1747 freq_inc = 18;
1748 for (i = 0; i < 64; i++) {
1749 w = READ_REG(sc, SAFE_RNG_CNFG);
1750 freq_inc = ((w + freq_inc) & 0x3fL);
1751 w = ((w & ~0x3fL) | freq_inc);
1752 WRITE_REG(sc, SAFE_RNG_CNFG, w);
1753
1754 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1755
1756 (void) safe_rng_read(sc);
1757 DELAY(25);
1758
1759 if (READ_REG(sc, SAFE_RNG_ALM_CNT) == 0) {
1760 safe_rng_disable_short_cycle(sc);
1761 goto retry;
1762 }
1763 freq_inc = 1;
1764 }
1765 safe_rng_disable_short_cycle(sc);
1766 } else
1767 WRITE_REG(sc, SAFE_RNG_ALM_CNT, 0);
1768
1769 (*sc->sc_harvest)(sc->sc_rndtest, buf, maxwords*sizeof (u_int32_t));
1770 callout_reset(&sc->sc_rngto,
1771 hz * (safe_rnginterval ? safe_rnginterval : 1), safe_rng, sc);
1772 }
1773 #endif /* SAFE_NO_RNG */
1774
1775 static void
1776 safe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1777 {
1778 bus_addr_t *paddr = (bus_addr_t*) arg;
1779 *paddr = segs->ds_addr;
1780 }
1781
1782 static int
1783 safe_dma_malloc(
1784 struct safe_softc *sc,
1785 bus_size_t size,
1786 struct safe_dma_alloc *dma,
1787 int mapflags
1788 )
1789 {
1790 int r;
1791
1792 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
1793 sizeof(u_int32_t), 0, /* alignment, bounds */
1794 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1795 BUS_SPACE_MAXADDR, /* highaddr */
1796 NULL, NULL, /* filter, filterarg */
1797 size, /* maxsize */
1798 1, /* nsegments */
1799 size, /* maxsegsize */
1800 BUS_DMA_ALLOCNOW, /* flags */
1801 NULL, NULL, /* locking */
1802 &dma->dma_tag);
1803 if (r != 0) {
1804 device_printf(sc->sc_dev, "safe_dma_malloc: "
1805 "bus_dma_tag_create failed; error %u\n", r);
1806 goto fail_0;
1807 }
1808
1809 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1810 BUS_DMA_NOWAIT, &dma->dma_map);
1811 if (r != 0) {
1812 device_printf(sc->sc_dev, "safe_dma_malloc: "
1813 "bus_dmammem_alloc failed; size %ju, error %u\n",
1814 (uintmax_t)size, r);
1815 goto fail_1;
1816 }
1817
1818 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1819 size,
1820 safe_dmamap_cb,
1821 &dma->dma_paddr,
1822 mapflags | BUS_DMA_NOWAIT);
1823 if (r != 0) {
1824 device_printf(sc->sc_dev, "safe_dma_malloc: "
1825 "bus_dmamap_load failed; error %u\n", r);
1826 goto fail_2;
1827 }
1828
1829 dma->dma_size = size;
1830 return (0);
1831
1832 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1833 fail_2:
1834 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1835 fail_1:
1836 bus_dma_tag_destroy(dma->dma_tag);
1837 fail_0:
1838 dma->dma_tag = NULL;
1839 return (r);
1840 }
1841
1842 static void
1843 safe_dma_free(struct safe_softc *sc, struct safe_dma_alloc *dma)
1844 {
1845 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1846 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1847 bus_dma_tag_destroy(dma->dma_tag);
1848 }
1849
1850 /*
1851 * Resets the board. Values in the regesters are left as is
1852 * from the reset (i.e. initial values are assigned elsewhere).
1853 */
1854 static void
1855 safe_reset_board(struct safe_softc *sc)
1856 {
1857 u_int32_t v;
1858 /*
1859 * Reset the device. The manual says no delay
1860 * is needed between marking and clearing reset.
1861 */
1862 v = READ_REG(sc, SAFE_PE_DMACFG) &~
1863 (SAFE_PE_DMACFG_PERESET | SAFE_PE_DMACFG_PDRRESET |
1864 SAFE_PE_DMACFG_SGRESET);
1865 WRITE_REG(sc, SAFE_PE_DMACFG, v
1866 | SAFE_PE_DMACFG_PERESET
1867 | SAFE_PE_DMACFG_PDRRESET
1868 | SAFE_PE_DMACFG_SGRESET);
1869 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1870 }
1871
1872 /*
1873 * Initialize registers we need to touch only once.
1874 */
1875 static void
1876 safe_init_board(struct safe_softc *sc)
1877 {
1878 u_int32_t v, dwords;
1879
1880 v = READ_REG(sc, SAFE_PE_DMACFG);
1881 v &=~ SAFE_PE_DMACFG_PEMODE;
1882 v |= SAFE_PE_DMACFG_FSENA /* failsafe enable */
1883 | SAFE_PE_DMACFG_GPRPCI /* gather ring on PCI */
1884 | SAFE_PE_DMACFG_SPRPCI /* scatter ring on PCI */
1885 | SAFE_PE_DMACFG_ESDESC /* endian-swap descriptors */
1886 | SAFE_PE_DMACFG_ESSA /* endian-swap SA's */
1887 | SAFE_PE_DMACFG_ESPDESC /* endian-swap part. desc's */
1888 ;
1889 WRITE_REG(sc, SAFE_PE_DMACFG, v);
1890 #if 0
1891 /* XXX select byte swap based on host byte order */
1892 WRITE_REG(sc, SAFE_ENDIAN, 0x1b);
1893 #endif
1894 if (sc->sc_chiprev == SAFE_REV(1,0)) {
1895 /*
1896 * Avoid large PCI DMA transfers. Rev 1.0 has a bug where
1897 * "target mode transfers" done while the chip is DMA'ing
1898 * >1020 bytes cause the hardware to lockup. To avoid this
1899 * we reduce the max PCI transfer size and use small source
1900 * particle descriptors (<= 256 bytes).
1901 */
1902 WRITE_REG(sc, SAFE_DMA_CFG, 256);
1903 device_printf(sc->sc_dev,
1904 "Reduce max DMA size to %u words for rev %u.%u WAR\n",
1905 (READ_REG(sc, SAFE_DMA_CFG)>>2) & 0xff,
1906 SAFE_REV_MAJ(sc->sc_chiprev),
1907 SAFE_REV_MIN(sc->sc_chiprev));
1908 }
1909
1910 /* NB: operands+results are overlaid */
1911 WRITE_REG(sc, SAFE_PE_PDRBASE, sc->sc_ringalloc.dma_paddr);
1912 WRITE_REG(sc, SAFE_PE_RDRBASE, sc->sc_ringalloc.dma_paddr);
1913 /*
1914 * Configure ring entry size and number of items in the ring.
1915 */
1916 KASSERT((sizeof(struct safe_ringentry) % sizeof(u_int32_t)) == 0,
1917 ("PE ring entry not 32-bit aligned!"));
1918 dwords = sizeof(struct safe_ringentry) / sizeof(u_int32_t);
1919 WRITE_REG(sc, SAFE_PE_RINGCFG,
1920 (dwords << SAFE_PE_RINGCFG_OFFSET_S) | SAFE_MAX_NQUEUE);
1921 WRITE_REG(sc, SAFE_PE_RINGPOLL, 0); /* disable polling */
1922
1923 WRITE_REG(sc, SAFE_PE_GRNGBASE, sc->sc_spalloc.dma_paddr);
1924 WRITE_REG(sc, SAFE_PE_SRNGBASE, sc->sc_dpalloc.dma_paddr);
1925 WRITE_REG(sc, SAFE_PE_PARTSIZE,
1926 (SAFE_TOTAL_DPART<<16) | SAFE_TOTAL_SPART);
1927 /*
1928 * NB: destination particles are fixed size. We use
1929 * an mbuf cluster and require all results go to
1930 * clusters or smaller.
1931 */
1932 WRITE_REG(sc, SAFE_PE_PARTCFG, SAFE_MAX_DSIZE);
1933
1934 /* it's now safe to enable PE mode, do it */
1935 WRITE_REG(sc, SAFE_PE_DMACFG, v | SAFE_PE_DMACFG_PEMODE);
1936
1937 /*
1938 * Configure hardware to use level-triggered interrupts and
1939 * to interrupt after each descriptor is processed.
1940 */
1941 WRITE_REG(sc, SAFE_HI_CFG, SAFE_HI_CFG_LEVEL);
1942 WRITE_REG(sc, SAFE_HI_DESC_CNT, 1);
1943 WRITE_REG(sc, SAFE_HI_MASK, SAFE_INT_PE_DDONE | SAFE_INT_PE_ERROR);
1944 }
1945
1946 /*
1947 * Init PCI registers
1948 */
1949 static void
1950 safe_init_pciregs(device_t dev)
1951 {
1952 }
1953
1954 /*
1955 * Clean up after a chip crash.
1956 * It is assumed that the caller in splimp()
1957 */
1958 static void
1959 safe_cleanchip(struct safe_softc *sc)
1960 {
1961
1962 if (sc->sc_nqchip != 0) {
1963 struct safe_ringentry *re = sc->sc_back;
1964
1965 while (re != sc->sc_front) {
1966 if (re->re_desc.d_csr != 0)
1967 safe_free_entry(sc, re);
1968 if (++re == sc->sc_ringtop)
1969 re = sc->sc_ring;
1970 }
1971 sc->sc_back = re;
1972 sc->sc_nqchip = 0;
1973 }
1974 }
1975
1976 /*
1977 * free a safe_q
1978 * It is assumed that the caller is within splimp().
1979 */
1980 static int
1981 safe_free_entry(struct safe_softc *sc, struct safe_ringentry *re)
1982 {
1983 struct cryptop *crp;
1984
1985 /*
1986 * Free header MCR
1987 */
1988 if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
1989 m_freem(re->re_dst_m);
1990
1991 crp = (struct cryptop *)re->re_crp;
1992
1993 re->re_desc.d_csr = 0;
1994
1995 crp->crp_etype = EFAULT;
1996 crypto_done(crp);
1997 return(0);
1998 }
1999
2000 /*
2001 * Routine to reset the chip and clean up.
2002 * It is assumed that the caller is in splimp()
2003 */
2004 static void
2005 safe_totalreset(struct safe_softc *sc)
2006 {
2007 safe_reset_board(sc);
2008 safe_init_board(sc);
2009 safe_cleanchip(sc);
2010 }
2011
2012 /*
2013 * Is the operand suitable aligned for direct DMA. Each
2014 * segment must be aligned on a 32-bit boundary and all
2015 * but the last segment must be a multiple of 4 bytes.
2016 */
2017 static int
2018 safe_dmamap_aligned(const struct safe_operand *op)
2019 {
2020 int i;
2021
2022 for (i = 0; i < op->nsegs; i++) {
2023 if (op->segs[i].ds_addr & 3)
2024 return (0);
2025 if (i != (op->nsegs - 1) && (op->segs[i].ds_len & 3))
2026 return (0);
2027 }
2028 return (1);
2029 }
2030
2031 /*
2032 * Is the operand suitable for direct DMA as the destination
2033 * of an operation. The hardware requires that each ``particle''
2034 * but the last in an operation result have the same size. We
2035 * fix that size at SAFE_MAX_DSIZE bytes. This routine returns
2036 * 0 if some segment is not a multiple of of this size, 1 if all
2037 * segments are exactly this size, or 2 if segments are at worst
2038 * a multple of this size.
2039 */
2040 static int
2041 safe_dmamap_uniform(const struct safe_operand *op)
2042 {
2043 int result = 1;
2044
2045 if (op->nsegs > 0) {
2046 int i;
2047
2048 for (i = 0; i < op->nsegs-1; i++) {
2049 if (op->segs[i].ds_len % SAFE_MAX_DSIZE)
2050 return (0);
2051 if (op->segs[i].ds_len != SAFE_MAX_DSIZE)
2052 result = 2;
2053 }
2054 }
2055 return (result);
2056 }
2057
2058 #ifdef SAFE_DEBUG
2059 static void
2060 safe_dump_dmastatus(struct safe_softc *sc, const char *tag)
2061 {
2062 printf("%s: ENDIAN 0x%x SRC 0x%x DST 0x%x STAT 0x%x\n"
2063 , tag
2064 , READ_REG(sc, SAFE_DMA_ENDIAN)
2065 , READ_REG(sc, SAFE_DMA_SRCADDR)
2066 , READ_REG(sc, SAFE_DMA_DSTADDR)
2067 , READ_REG(sc, SAFE_DMA_STAT)
2068 );
2069 }
2070
2071 static void
2072 safe_dump_intrstate(struct safe_softc *sc, const char *tag)
2073 {
2074 printf("%s: HI_CFG 0x%x HI_MASK 0x%x HI_DESC_CNT 0x%x HU_STAT 0x%x HM_STAT 0x%x\n"
2075 , tag
2076 , READ_REG(sc, SAFE_HI_CFG)
2077 , READ_REG(sc, SAFE_HI_MASK)
2078 , READ_REG(sc, SAFE_HI_DESC_CNT)
2079 , READ_REG(sc, SAFE_HU_STAT)
2080 , READ_REG(sc, SAFE_HM_STAT)
2081 );
2082 }
2083
2084 static void
2085 safe_dump_ringstate(struct safe_softc *sc, const char *tag)
2086 {
2087 u_int32_t estat = READ_REG(sc, SAFE_PE_ERNGSTAT);
2088
2089 /* NB: assume caller has lock on ring */
2090 printf("%s: ERNGSTAT %x (next %u) back %lu front %lu\n",
2091 tag,
2092 estat, (estat >> SAFE_PE_ERNGSTAT_NEXT_S),
2093 (unsigned long)(sc->sc_back - sc->sc_ring),
2094 (unsigned long)(sc->sc_front - sc->sc_ring));
2095 }
2096
2097 static void
2098 safe_dump_request(struct safe_softc *sc, const char* tag, struct safe_ringentry *re)
2099 {
2100 int ix, nsegs;
2101
2102 ix = re - sc->sc_ring;
2103 printf("%s: %p (%u): csr %x src %x dst %x sa %x len %x\n"
2104 , tag
2105 , re, ix
2106 , re->re_desc.d_csr
2107 , re->re_desc.d_src
2108 , re->re_desc.d_dst
2109 , re->re_desc.d_sa
2110 , re->re_desc.d_len
2111 );
2112 if (re->re_src.nsegs > 1) {
2113 ix = (re->re_desc.d_src - sc->sc_spalloc.dma_paddr) /
2114 sizeof(struct safe_pdesc);
2115 for (nsegs = re->re_src.nsegs; nsegs; nsegs--) {
2116 printf(" spd[%u] %p: %p size %u flags %x"
2117 , ix, &sc->sc_spring[ix]
2118 , (caddr_t)(uintptr_t) sc->sc_spring[ix].pd_addr
2119 , sc->sc_spring[ix].pd_size
2120 , sc->sc_spring[ix].pd_flags
2121 );
2122 if (sc->sc_spring[ix].pd_size == 0)
2123 printf(" (zero!)");
2124 printf("\n");
2125 if (++ix == SAFE_TOTAL_SPART)
2126 ix = 0;
2127 }
2128 }
2129 if (re->re_dst.nsegs > 1) {
2130 ix = (re->re_desc.d_dst - sc->sc_dpalloc.dma_paddr) /
2131 sizeof(struct safe_pdesc);
2132 for (nsegs = re->re_dst.nsegs; nsegs; nsegs--) {
2133 printf(" dpd[%u] %p: %p flags %x\n"
2134 , ix, &sc->sc_dpring[ix]
2135 , (caddr_t)(uintptr_t) sc->sc_dpring[ix].pd_addr
2136 , sc->sc_dpring[ix].pd_flags
2137 );
2138 if (++ix == SAFE_TOTAL_DPART)
2139 ix = 0;
2140 }
2141 }
2142 printf("sa: cmd0 %08x cmd1 %08x staterec %x\n",
2143 re->re_sa.sa_cmd0, re->re_sa.sa_cmd1, re->re_sa.sa_staterec);
2144 printf("sa: key %x %x %x %x %x %x %x %x\n"
2145 , re->re_sa.sa_key[0]
2146 , re->re_sa.sa_key[1]
2147 , re->re_sa.sa_key[2]
2148 , re->re_sa.sa_key[3]
2149 , re->re_sa.sa_key[4]
2150 , re->re_sa.sa_key[5]
2151 , re->re_sa.sa_key[6]
2152 , re->re_sa.sa_key[7]
2153 );
2154 printf("sa: indigest %x %x %x %x %x\n"
2155 , re->re_sa.sa_indigest[0]
2156 , re->re_sa.sa_indigest[1]
2157 , re->re_sa.sa_indigest[2]
2158 , re->re_sa.sa_indigest[3]
2159 , re->re_sa.sa_indigest[4]
2160 );
2161 printf("sa: outdigest %x %x %x %x %x\n"
2162 , re->re_sa.sa_outdigest[0]
2163 , re->re_sa.sa_outdigest[1]
2164 , re->re_sa.sa_outdigest[2]
2165 , re->re_sa.sa_outdigest[3]
2166 , re->re_sa.sa_outdigest[4]
2167 );
2168 printf("sr: iv %x %x %x %x\n"
2169 , re->re_sastate.sa_saved_iv[0]
2170 , re->re_sastate.sa_saved_iv[1]
2171 , re->re_sastate.sa_saved_iv[2]
2172 , re->re_sastate.sa_saved_iv[3]
2173 );
2174 printf("sr: hashbc %u indigest %x %x %x %x %x\n"
2175 , re->re_sastate.sa_saved_hashbc
2176 , re->re_sastate.sa_saved_indigest[0]
2177 , re->re_sastate.sa_saved_indigest[1]
2178 , re->re_sastate.sa_saved_indigest[2]
2179 , re->re_sastate.sa_saved_indigest[3]
2180 , re->re_sastate.sa_saved_indigest[4]
2181 );
2182 }
2183
2184 static void
2185 safe_dump_ring(struct safe_softc *sc, const char *tag)
2186 {
2187 mtx_lock(&sc->sc_ringmtx);
2188 printf("\nSafeNet Ring State:\n");
2189 safe_dump_intrstate(sc, tag);
2190 safe_dump_dmastatus(sc, tag);
2191 safe_dump_ringstate(sc, tag);
2192 if (sc->sc_nqchip) {
2193 struct safe_ringentry *re = sc->sc_back;
2194 do {
2195 safe_dump_request(sc, tag, re);
2196 if (++re == sc->sc_ringtop)
2197 re = sc->sc_ring;
2198 } while (re != sc->sc_front);
2199 }
2200 mtx_unlock(&sc->sc_ringmtx);
2201 }
2202
2203 static int
2204 sysctl_hw_safe_dump(SYSCTL_HANDLER_ARGS)
2205 {
2206 char dmode[64];
2207 int error;
2208
2209 strncpy(dmode, "", sizeof(dmode) - 1);
2210 dmode[sizeof(dmode) - 1] = '\0';
2211 error = sysctl_handle_string(oidp, &dmode[0], sizeof(dmode), req);
2212
2213 if (error == 0 && req->newptr != NULL) {
2214 struct safe_softc *sc = safec;
2215
2216 if (!sc)
2217 return EINVAL;
2218 if (strncmp(dmode, "dma", 3) == 0)
2219 safe_dump_dmastatus(sc, "safe0");
2220 else if (strncmp(dmode, "int", 3) == 0)
2221 safe_dump_intrstate(sc, "safe0");
2222 else if (strncmp(dmode, "ring", 4) == 0)
2223 safe_dump_ring(sc, "safe0");
2224 else
2225 return EINVAL;
2226 }
2227 return error;
2228 }
2229 SYSCTL_PROC(_hw_safe, OID_AUTO, dump, CTLTYPE_STRING | CTLFLAG_RW,
2230 0, 0, sysctl_hw_safe_dump, "A", "Dump driver state");
2231 #endif /* SAFE_DEBUG */
Cache object: 10953254ffff2061306a83453a8ba453
|