1 /* $FreeBSD$ */
2 /* $OpenBSD: hifn7751.c,v 1.120 2002/05/17 00:33:34 deraadt Exp $ */
3
4 /*
5 * Invertex AEON / Hifn 7751 driver
6 * Copyright (c) 1999 Invertex Inc. All rights reserved.
7 * Copyright (c) 1999 Theo de Raadt
8 * Copyright (c) 2000-2001 Network Security Technologies, Inc.
9 * http://www.netsec.net
10 * Copyright (c) 2003 Hifn Inc.
11 *
12 * This driver is based on a previous driver by Invertex, for which they
13 * requested: Please send any comments, feedback, bug-fixes, or feature
14 * requests to software@invertex.com.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 *
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. The name of the author may not be used to endorse or promote products
26 * derived from this software without specific prior written permission.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
33 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
37 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 * Effort sponsored in part by the Defense Advanced Research Projects
40 * Agency (DARPA) and Air Force Research Laboratory, Air Force
41 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
42 *
43 */
44
45 /*
46 * Driver for various Hifn encryption processors.
47 */
48 #include "opt_hifn.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/proc.h>
53 #include <sys/errno.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
56 #include <sys/mbuf.h>
57 #include <sys/sysctl.h>
58
59 #include <vm/vm.h>
60 #include <vm/pmap.h>
61
62 #include <machine/clock.h>
63 #include <machine/bus.h>
64 #include <machine/resource.h>
65 #include <sys/bus.h>
66 #include <sys/rman.h>
67
68 #include <opencrypto/cryptodev.h>
69 #include <sys/random.h>
70
71 #include <pci/pcivar.h>
72 #include <pci/pcireg.h>
73
74 #ifdef HIFN_RNDTEST
75 #include <dev/rndtest/rndtest.h>
76 #endif
77 #include <dev/hifn/hifn7751reg.h>
78 #include <dev/hifn/hifn7751var.h>
79
80 /*
81 * Prototypes and count for the pci_device structure
82 */
83 static int hifn_probe(device_t);
84 static int hifn_attach(device_t);
85 static int hifn_detach(device_t);
86 static int hifn_suspend(device_t);
87 static int hifn_resume(device_t);
88 static void hifn_shutdown(device_t);
89
90 static device_method_t hifn_methods[] = {
91 /* Device interface */
92 DEVMETHOD(device_probe, hifn_probe),
93 DEVMETHOD(device_attach, hifn_attach),
94 DEVMETHOD(device_detach, hifn_detach),
95 DEVMETHOD(device_suspend, hifn_suspend),
96 DEVMETHOD(device_resume, hifn_resume),
97 DEVMETHOD(device_shutdown, hifn_shutdown),
98
99 /* bus interface */
100 DEVMETHOD(bus_print_child, bus_generic_print_child),
101 DEVMETHOD(bus_driver_added, bus_generic_driver_added),
102
103 { 0, 0 }
104 };
105 static driver_t hifn_driver = {
106 "hifn",
107 hifn_methods,
108 sizeof (struct hifn_softc)
109 };
110 static devclass_t hifn_devclass;
111
112 DRIVER_MODULE(hifn, pci, hifn_driver, hifn_devclass, 0, 0);
113 MODULE_DEPEND(hifn, crypto, 1, 1, 1);
114 #ifdef HIFN_RNDTEST
115 MODULE_DEPEND(hifn, rndtest, 1, 1, 1);
116 #endif
117
118 static void hifn_reset_board(struct hifn_softc *, int);
119 static void hifn_reset_puc(struct hifn_softc *);
120 static void hifn_puc_wait(struct hifn_softc *);
121 static int hifn_enable_crypto(struct hifn_softc *);
122 static void hifn_set_retry(struct hifn_softc *sc);
123 static void hifn_init_dma(struct hifn_softc *);
124 static void hifn_init_pci_registers(struct hifn_softc *);
125 static int hifn_sramsize(struct hifn_softc *);
126 static int hifn_dramsize(struct hifn_softc *);
127 static int hifn_ramtype(struct hifn_softc *);
128 static void hifn_sessions(struct hifn_softc *);
129 static void hifn_intr(void *);
130 static u_int hifn_write_command(struct hifn_command *, u_int8_t *);
131 static u_int32_t hifn_next_signature(u_int32_t a, u_int cnt);
132 static int hifn_newsession(void *, u_int32_t *, struct cryptoini *);
133 static int hifn_freesession(void *, u_int64_t);
134 static int hifn_process(void *, struct cryptop *, int);
135 static void hifn_callback(struct hifn_softc *, struct hifn_command *, u_int8_t *);
136 static int hifn_crypto(struct hifn_softc *, struct hifn_command *, struct cryptop *, int);
137 static int hifn_readramaddr(struct hifn_softc *, int, u_int8_t *);
138 static int hifn_writeramaddr(struct hifn_softc *, int, u_int8_t *);
139 static int hifn_dmamap_load_src(struct hifn_softc *, struct hifn_command *);
140 static int hifn_dmamap_load_dst(struct hifn_softc *, struct hifn_command *);
141 static int hifn_init_pubrng(struct hifn_softc *);
142 #ifndef HIFN_NO_RNG
143 static void hifn_rng(void *);
144 #endif
145 static void hifn_tick(void *);
146 static void hifn_abort(struct hifn_softc *);
147 static void hifn_alloc_slot(struct hifn_softc *, int *, int *, int *, int *);
148
149 static void hifn_write_reg_0(struct hifn_softc *, bus_size_t, u_int32_t);
150 static void hifn_write_reg_1(struct hifn_softc *, bus_size_t, u_int32_t);
151
152 static __inline__ u_int32_t
153 READ_REG_0(struct hifn_softc *sc, bus_size_t reg)
154 {
155 u_int32_t v = bus_space_read_4(sc->sc_st0, sc->sc_sh0, reg);
156 sc->sc_bar0_lastreg = (bus_size_t) -1;
157 return (v);
158 }
159 #define WRITE_REG_0(sc, reg, val) hifn_write_reg_0(sc, reg, val)
160
161 static __inline__ u_int32_t
162 READ_REG_1(struct hifn_softc *sc, bus_size_t reg)
163 {
164 u_int32_t v = bus_space_read_4(sc->sc_st1, sc->sc_sh1, reg);
165 sc->sc_bar1_lastreg = (bus_size_t) -1;
166 return (v);
167 }
168 #define WRITE_REG_1(sc, reg, val) hifn_write_reg_1(sc, reg, val)
169
170 SYSCTL_NODE(_hw, OID_AUTO, hifn, CTLFLAG_RD, 0, "Hifn driver parameters");
171
172 #ifdef HIFN_DEBUG
173 static int hifn_debug = 0;
174 SYSCTL_INT(_hw_hifn, OID_AUTO, debug, CTLFLAG_RW, &hifn_debug,
175 0, "control debugging msgs");
176 #endif
177
178 static struct hifn_stats hifnstats;
179 SYSCTL_STRUCT(_hw_hifn, OID_AUTO, stats, CTLFLAG_RD, &hifnstats,
180 hifn_stats, "driver statistics");
181 static int hifn_maxbatch = 1;
182 SYSCTL_INT(_hw_hifn, OID_AUTO, maxbatch, CTLFLAG_RW, &hifn_maxbatch,
183 0, "max ops to batch w/o interrupt");
184
185 /*
186 * Probe for a supported device. The PCI vendor and device
187 * IDs are used to detect devices we know how to handle.
188 */
189 static int
190 hifn_probe(device_t dev)
191 {
192 if (pci_get_vendor(dev) == PCI_VENDOR_INVERTEX &&
193 pci_get_device(dev) == PCI_PRODUCT_INVERTEX_AEON)
194 return (0);
195 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
196 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7751 ||
197 pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
198 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
199 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956 ||
200 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811))
201 return (0);
202 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
203 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751)
204 return (0);
205 return (ENXIO);
206 }
207
208 static void
209 hifn_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
210 {
211 bus_addr_t *paddr = (bus_addr_t*) arg;
212 *paddr = segs->ds_addr;
213 }
214
215 static const char*
216 hifn_partname(struct hifn_softc *sc)
217 {
218 /* XXX sprintf numbers when not decoded */
219 switch (pci_get_vendor(sc->sc_dev)) {
220 case PCI_VENDOR_HIFN:
221 switch (pci_get_device(sc->sc_dev)) {
222 case PCI_PRODUCT_HIFN_6500: return "Hifn 6500";
223 case PCI_PRODUCT_HIFN_7751: return "Hifn 7751";
224 case PCI_PRODUCT_HIFN_7811: return "Hifn 7811";
225 case PCI_PRODUCT_HIFN_7951: return "Hifn 7951";
226 case PCI_PRODUCT_HIFN_7955: return "Hifn 7955";
227 case PCI_PRODUCT_HIFN_7956: return "Hifn 7956";
228 }
229 return "Hifn unknown-part";
230 case PCI_VENDOR_INVERTEX:
231 switch (pci_get_device(sc->sc_dev)) {
232 case PCI_PRODUCT_INVERTEX_AEON: return "Invertex AEON";
233 }
234 return "Invertex unknown-part";
235 case PCI_VENDOR_NETSEC:
236 switch (pci_get_device(sc->sc_dev)) {
237 case PCI_PRODUCT_NETSEC_7751: return "NetSec 7751";
238 }
239 return "NetSec unknown-part";
240 }
241 return "Unknown-vendor unknown-part";
242 }
243
244 static void
245 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
246 {
247 u_int32_t *p = (u_int32_t *)buf;
248 for (count /= sizeof (u_int32_t); count; count--)
249 add_true_randomness(*p++);
250 }
251
252 /*
253 * Attach an interface that successfully probed.
254 */
255 static int
256 hifn_attach(device_t dev)
257 {
258 struct hifn_softc *sc = device_get_softc(dev);
259 u_int32_t cmd;
260 caddr_t kva;
261 int rseg, rid;
262 char rbase;
263 u_int16_t ena, rev;
264
265 KASSERT(sc != NULL, ("hifn_attach: null software carrier!"));
266 bzero(sc, sizeof (*sc));
267 sc->sc_dev = dev;
268
269 /* XXX handle power management */
270
271 /*
272 * The 7951 and 795x have a random number generator and
273 * public key support; note this.
274 */
275 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
276 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7951 ||
277 pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
278 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
279 sc->sc_flags = HIFN_HAS_RNG | HIFN_HAS_PUBLIC;
280 /*
281 * The 7811 has a random number generator and
282 * we also note it's identity 'cuz of some quirks.
283 */
284 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
285 pci_get_device(dev) == PCI_PRODUCT_HIFN_7811)
286 sc->sc_flags |= HIFN_IS_7811 | HIFN_HAS_RNG;
287
288 /*
289 * The 795x parts support AES.
290 */
291 if (pci_get_vendor(dev) == PCI_VENDOR_HIFN &&
292 (pci_get_device(dev) == PCI_PRODUCT_HIFN_7955 ||
293 pci_get_device(dev) == PCI_PRODUCT_HIFN_7956))
294 sc->sc_flags |= HIFN_IS_7956 | HIFN_HAS_AES;
295
296 /*
297 * Configure support for memory-mapped access to
298 * registers and for DMA operations.
299 */
300 #define PCIM_ENA (PCIM_CMD_MEMEN|PCIM_CMD_BUSMASTEREN)
301 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
302 cmd |= PCIM_ENA;
303 pci_write_config(dev, PCIR_COMMAND, cmd, 4);
304 cmd = pci_read_config(dev, PCIR_COMMAND, 4);
305 if ((cmd & PCIM_ENA) != PCIM_ENA) {
306 device_printf(dev, "failed to enable %s\n",
307 (cmd & PCIM_ENA) == 0 ?
308 "memory mapping & bus mastering" :
309 (cmd & PCIM_CMD_MEMEN) == 0 ?
310 "memory mapping" : "bus mastering");
311 goto fail_pci;
312 }
313 #undef PCIM_ENA
314
315 /*
316 * Setup PCI resources. Note that we record the bus
317 * tag and handle for each register mapping, this is
318 * used by the READ_REG_0, WRITE_REG_0, READ_REG_1,
319 * and WRITE_REG_1 macros throughout the driver.
320 */
321 rid = HIFN_BAR0;
322 sc->sc_bar0res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
323 0, ~0, 1, RF_ACTIVE);
324 if (sc->sc_bar0res == NULL) {
325 device_printf(dev, "cannot map bar%d register space\n", 0);
326 goto fail_pci;
327 }
328 sc->sc_st0 = rman_get_bustag(sc->sc_bar0res);
329 sc->sc_sh0 = rman_get_bushandle(sc->sc_bar0res);
330 sc->sc_bar0_lastreg = (bus_size_t) -1;
331
332 rid = HIFN_BAR1;
333 sc->sc_bar1res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
334 0, ~0, 1, RF_ACTIVE);
335 if (sc->sc_bar1res == NULL) {
336 device_printf(dev, "cannot map bar%d register space\n", 1);
337 goto fail_io0;
338 }
339 sc->sc_st1 = rman_get_bustag(sc->sc_bar1res);
340 sc->sc_sh1 = rman_get_bushandle(sc->sc_bar1res);
341 sc->sc_bar1_lastreg = (bus_size_t) -1;
342
343 hifn_set_retry(sc);
344
345 /*
346 * Setup the area where the Hifn DMA's descriptors
347 * and associated data structures.
348 */
349 if (bus_dma_tag_create(NULL, /* parent */
350 1, 0, /* alignment,boundary */
351 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
352 BUS_SPACE_MAXADDR, /* highaddr */
353 NULL, NULL, /* filter, filterarg */
354 HIFN_MAX_DMALEN, /* maxsize */
355 MAX_SCATTER, /* nsegments */
356 HIFN_MAX_SEGLEN, /* maxsegsize */
357 BUS_DMA_ALLOCNOW, /* flags */
358 &sc->sc_dmat)) {
359 device_printf(dev, "cannot allocate DMA tag\n");
360 goto fail_io1;
361 }
362 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
363 device_printf(dev, "cannot create dma map\n");
364 bus_dma_tag_destroy(sc->sc_dmat);
365 goto fail_io1;
366 }
367 if (bus_dmamem_alloc(sc->sc_dmat, (void**) &kva, BUS_DMA_NOWAIT, &sc->sc_dmamap)) {
368 device_printf(dev, "cannot alloc dma buffer\n");
369 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
370 bus_dma_tag_destroy(sc->sc_dmat);
371 goto fail_io1;
372 }
373 if (bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, kva,
374 sizeof (*sc->sc_dma),
375 hifn_dmamap_cb, &sc->sc_dma_physaddr,
376 BUS_DMA_NOWAIT)) {
377 device_printf(dev, "cannot load dma map\n");
378 bus_dmamem_free(sc->sc_dmat, kva, sc->sc_dmamap);
379 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
380 bus_dma_tag_destroy(sc->sc_dmat);
381 goto fail_io1;
382 }
383 sc->sc_dma = (struct hifn_dma *)kva;
384 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
385
386 KASSERT(sc->sc_st0 != NULL, ("hifn_attach: null bar0 tag!"));
387 KASSERT(sc->sc_sh0 != NULL, ("hifn_attach: null bar0 handle!"));
388 KASSERT(sc->sc_st1 != NULL, ("hifn_attach: null bar1 tag!"));
389 KASSERT(sc->sc_sh1 != NULL, ("hifn_attach: null bar1 handle!"));
390
391 /*
392 * Reset the board and do the ``secret handshake''
393 * to enable the crypto support. Then complete the
394 * initialization procedure by setting up the interrupt
395 * and hooking in to the system crypto support so we'll
396 * get used for system services like the crypto device,
397 * IPsec, RNG device, etc.
398 */
399 hifn_reset_board(sc, 0);
400
401 if (hifn_enable_crypto(sc) != 0) {
402 device_printf(dev, "crypto enabling failed\n");
403 goto fail_mem;
404 }
405 hifn_reset_puc(sc);
406
407 hifn_init_dma(sc);
408 hifn_init_pci_registers(sc);
409
410 /* XXX can't dynamically determine ram type for 795x; force dram */
411 if (sc->sc_flags & HIFN_IS_7956)
412 sc->sc_drammodel = 1;
413 else if (hifn_ramtype(sc))
414 goto fail_mem;
415
416 if (sc->sc_drammodel == 0)
417 hifn_sramsize(sc);
418 else
419 hifn_dramsize(sc);
420
421 /*
422 * Workaround for NetSec 7751 rev A: half ram size because two
423 * of the address lines were left floating
424 */
425 if (pci_get_vendor(dev) == PCI_VENDOR_NETSEC &&
426 pci_get_device(dev) == PCI_PRODUCT_NETSEC_7751 &&
427 pci_get_revid(dev) == 0x61) /*XXX???*/
428 sc->sc_ramsize >>= 1;
429
430 /*
431 * Arrange the interrupt line.
432 */
433 rid = 0;
434 sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid,
435 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE);
436 if (sc->sc_irq == NULL) {
437 device_printf(dev, "could not map interrupt\n");
438 goto fail_mem;
439 }
440 /*
441 * NB: Network code assumes we are blocked with splimp()
442 * so make sure the IRQ is marked appropriately.
443 */
444 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET,
445 hifn_intr, sc, &sc->sc_intrhand)) {
446 device_printf(dev, "could not setup interrupt\n");
447 goto fail_intr2;
448 }
449
450 hifn_sessions(sc);
451
452 /*
453 * NB: Keep only the low 16 bits; this masks the chip id
454 * from the 7951.
455 */
456 rev = READ_REG_1(sc, HIFN_1_REVID) & 0xffff;
457
458 rseg = sc->sc_ramsize / 1024;
459 rbase = 'K';
460 if (sc->sc_ramsize >= (1024 * 1024)) {
461 rbase = 'M';
462 rseg /= 1024;
463 }
464 device_printf(sc->sc_dev, "%s, rev %u, %d%cB %cram\n",
465 hifn_partname(sc), rev,
466 rseg, rbase, sc->sc_drammodel ? 'd' : 's');
467
468 sc->sc_cid = crypto_get_driverid(0);
469 if (sc->sc_cid < 0) {
470 device_printf(dev, "could not get crypto driver id\n");
471 goto fail_intr;
472 }
473
474 WRITE_REG_0(sc, HIFN_0_PUCNFG,
475 READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
476 ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
477
478 switch (ena) {
479 case HIFN_PUSTAT_ENA_2:
480 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0,
481 hifn_newsession, hifn_freesession, hifn_process, sc);
482 crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0,
483 hifn_newsession, hifn_freesession, hifn_process, sc);
484 if (sc->sc_flags & HIFN_HAS_AES)
485 crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0,
486 hifn_newsession, hifn_freesession,
487 hifn_process, sc);
488 /*FALLTHROUGH*/
489 case HIFN_PUSTAT_ENA_1:
490 crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0,
491 hifn_newsession, hifn_freesession, hifn_process, sc);
492 crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0,
493 hifn_newsession, hifn_freesession, hifn_process, sc);
494 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0,
495 hifn_newsession, hifn_freesession, hifn_process, sc);
496 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0,
497 hifn_newsession, hifn_freesession, hifn_process, sc);
498 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0,
499 hifn_newsession, hifn_freesession, hifn_process, sc);
500 break;
501 }
502
503 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
504 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
505
506 if (sc->sc_flags & (HIFN_HAS_PUBLIC | HIFN_HAS_RNG))
507 hifn_init_pubrng(sc);
508
509 /* NB: 1 means the callout runs w/o Giant locked */
510 callout_init(&sc->sc_tickto);
511 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
512
513 return (0);
514
515 fail_intr:
516 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
517 fail_intr2:
518 /* XXX don't store rid */
519 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
520 fail_mem:
521 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
522 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
523 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
524 bus_dma_tag_destroy(sc->sc_dmat);
525
526 /* Turn off DMA polling */
527 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
528 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
529 fail_io1:
530 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
531 fail_io0:
532 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
533 fail_pci:
534 return (ENXIO);
535 }
536
537 /*
538 * Detach an interface that successfully probed.
539 */
540 static int
541 hifn_detach(device_t dev)
542 {
543 struct hifn_softc *sc = device_get_softc(dev);
544 int s;
545
546 KASSERT(sc != NULL, ("hifn_detach: null software carrier!"));
547
548 s = splimp();
549
550 /*XXX other resources */
551 callout_stop(&sc->sc_tickto);
552 callout_stop(&sc->sc_rngto);
553 #ifdef HIFN_RNDTEST
554 if (sc->sc_rndtest)
555 rndtest_detach(sc->sc_rndtest);
556 #endif
557
558 /* Turn off DMA polling */
559 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
560 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
561
562 crypto_unregister_all(sc->sc_cid);
563
564 bus_generic_detach(dev); /*XXX should be no children, right? */
565
566 bus_teardown_intr(dev, sc->sc_irq, sc->sc_intrhand);
567 /* XXX don't store rid */
568 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
569
570 bus_dmamap_unload(sc->sc_dmat, sc->sc_dmamap);
571 bus_dmamem_free(sc->sc_dmat, sc->sc_dma, sc->sc_dmamap);
572 bus_dmamap_destroy(sc->sc_dmat, sc->sc_dmamap);
573 bus_dma_tag_destroy(sc->sc_dmat);
574
575 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR1, sc->sc_bar1res);
576 bus_release_resource(dev, SYS_RES_MEMORY, HIFN_BAR0, sc->sc_bar0res);
577
578 splx(s);
579
580 return (0);
581 }
582
583 /*
584 * Stop all chip I/O so that the kernel's probe routines don't
585 * get confused by errant DMAs when rebooting.
586 */
587 static void
588 hifn_shutdown(device_t dev)
589 {
590 #ifdef notyet
591 hifn_stop(device_get_softc(dev));
592 #endif
593 }
594
595 /*
596 * Device suspend routine. Stop the interface and save some PCI
597 * settings in case the BIOS doesn't restore them properly on
598 * resume.
599 */
600 static int
601 hifn_suspend(device_t dev)
602 {
603 struct hifn_softc *sc = device_get_softc(dev);
604 #ifdef notyet
605 int i;
606
607 hifn_stop(sc);
608 for (i = 0; i < 5; i++)
609 sc->saved_maps[i] = pci_read_config(dev, PCIR_MAPS + i * 4, 4);
610 sc->saved_biosaddr = pci_read_config(dev, PCIR_BIOS, 4);
611 sc->saved_intline = pci_read_config(dev, PCIR_INTLINE, 1);
612 sc->saved_cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
613 sc->saved_lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
614 #endif
615 sc->sc_suspended = 1;
616
617 return (0);
618 }
619
620 /*
621 * Device resume routine. Restore some PCI settings in case the BIOS
622 * doesn't, re-enable busmastering, and restart the interface if
623 * appropriate.
624 */
625 static int
626 hifn_resume(device_t dev)
627 {
628 struct hifn_softc *sc = device_get_softc(dev);
629 #ifdef notyet
630 int i;
631
632 /* better way to do this? */
633 for (i = 0; i < 5; i++)
634 pci_write_config(dev, PCIR_MAPS + i * 4, sc->saved_maps[i], 4);
635 pci_write_config(dev, PCIR_BIOS, sc->saved_biosaddr, 4);
636 pci_write_config(dev, PCIR_INTLINE, sc->saved_intline, 1);
637 pci_write_config(dev, PCIR_CACHELNSZ, sc->saved_cachelnsz, 1);
638 pci_write_config(dev, PCIR_LATTIMER, sc->saved_lattimer, 1);
639
640 /* reenable busmastering */
641 pci_enable_busmaster(dev);
642 pci_enable_io(dev, HIFN_RES);
643
644 /* reinitialize interface if necessary */
645 if (ifp->if_flags & IFF_UP)
646 rl_init(sc);
647 #endif
648 sc->sc_suspended = 0;
649
650 return (0);
651 }
652
653 static int
654 hifn_init_pubrng(struct hifn_softc *sc)
655 {
656 u_int32_t r;
657 int i;
658
659 #ifdef HIFN_RNDTEST
660 sc->sc_rndtest = rndtest_attach(sc->sc_dev);
661 if (sc->sc_rndtest)
662 sc->sc_harvest = rndtest_harvest;
663 else
664 sc->sc_harvest = default_harvest;
665 #else
666 sc->sc_harvest = default_harvest;
667 #endif
668 if ((sc->sc_flags & HIFN_IS_7811) == 0) {
669 /* Reset 7951 public key/rng engine */
670 WRITE_REG_1(sc, HIFN_1_PUB_RESET,
671 READ_REG_1(sc, HIFN_1_PUB_RESET) | HIFN_PUBRST_RESET);
672
673 for (i = 0; i < 100; i++) {
674 DELAY(1000);
675 if ((READ_REG_1(sc, HIFN_1_PUB_RESET) &
676 HIFN_PUBRST_RESET) == 0)
677 break;
678 }
679
680 if (i == 100) {
681 device_printf(sc->sc_dev, "public key init failed\n");
682 return (1);
683 }
684 }
685
686 #ifndef HIFN_NO_RNG
687 /* Enable the rng, if available */
688 if (sc->sc_flags & HIFN_HAS_RNG) {
689 if (sc->sc_flags & HIFN_IS_7811) {
690 r = READ_REG_1(sc, HIFN_1_7811_RNGENA);
691 if (r & HIFN_7811_RNGENA_ENA) {
692 r &= ~HIFN_7811_RNGENA_ENA;
693 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
694 }
695 WRITE_REG_1(sc, HIFN_1_7811_RNGCFG,
696 HIFN_7811_RNGCFG_DEFL);
697 r |= HIFN_7811_RNGENA_ENA;
698 WRITE_REG_1(sc, HIFN_1_7811_RNGENA, r);
699 } else
700 WRITE_REG_1(sc, HIFN_1_RNG_CONFIG,
701 READ_REG_1(sc, HIFN_1_RNG_CONFIG) |
702 HIFN_RNGCFG_ENA);
703
704 sc->sc_rngfirst = 1;
705 if (hz >= 100)
706 sc->sc_rnghz = hz / 100;
707 else
708 sc->sc_rnghz = 1;
709 /* NB: 1 means the callout runs w/o Giant locked */
710 callout_init(&sc->sc_rngto);
711 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
712 }
713 #endif
714
715 /* Enable public key engine, if available */
716 if (sc->sc_flags & HIFN_HAS_PUBLIC) {
717 WRITE_REG_1(sc, HIFN_1_PUB_IEN, HIFN_PUBIEN_DONE);
718 sc->sc_dmaier |= HIFN_DMAIER_PUBDONE;
719 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
720 }
721
722 return (0);
723 }
724
725 #ifndef HIFN_NO_RNG
726 static void
727 hifn_rng(void *vsc)
728 {
729 #define RANDOM_BITS(n) (n)*sizeof (u_int32_t), (n)*sizeof (u_int32_t)*NBBY, 0
730 struct hifn_softc *sc = vsc;
731 u_int32_t sts, num[2];
732 int i;
733
734 if (sc->sc_flags & HIFN_IS_7811) {
735 for (i = 0; i < 5; i++) {
736 sts = READ_REG_1(sc, HIFN_1_7811_RNGSTS);
737 if (sts & HIFN_7811_RNGSTS_UFL) {
738 device_printf(sc->sc_dev,
739 "RNG underflow: disabling\n");
740 return;
741 }
742 if ((sts & HIFN_7811_RNGSTS_RDY) == 0)
743 break;
744
745 /*
746 * There are at least two words in the RNG FIFO
747 * at this point.
748 */
749 num[0] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
750 num[1] = READ_REG_1(sc, HIFN_1_7811_RNGDAT);
751 /* NB: discard first data read */
752 if (sc->sc_rngfirst)
753 sc->sc_rngfirst = 0;
754 else
755 (*sc->sc_harvest)(sc->sc_rndtest,
756 num, sizeof (num));
757 }
758 } else {
759 num[0] = READ_REG_1(sc, HIFN_1_RNG_DATA);
760
761 /* NB: discard first data read */
762 if (sc->sc_rngfirst)
763 sc->sc_rngfirst = 0;
764 else
765 (*sc->sc_harvest)(sc->sc_rndtest,
766 num, sizeof (num[0]));
767 }
768
769 callout_reset(&sc->sc_rngto, sc->sc_rnghz, hifn_rng, sc);
770 #undef RANDOM_BITS
771 }
772 #endif
773
774 static void
775 hifn_puc_wait(struct hifn_softc *sc)
776 {
777 int i;
778
779 for (i = 5000; i > 0; i--) {
780 DELAY(1);
781 if (!(READ_REG_0(sc, HIFN_0_PUCTRL) & HIFN_PUCTRL_RESET))
782 break;
783 }
784 if (!i)
785 device_printf(sc->sc_dev, "proc unit did not reset\n");
786 }
787
788 /*
789 * Reset the processing unit.
790 */
791 static void
792 hifn_reset_puc(struct hifn_softc *sc)
793 {
794 /* Reset processing unit */
795 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
796 hifn_puc_wait(sc);
797 }
798
799 /*
800 * Set the Retry and TRDY registers; note that we set them to
801 * zero because the 7811 locks up when forced to retry (section
802 * 3.6 of "Specification Update SU-0014-04". Not clear if we
803 * should do this for all Hifn parts, but it doesn't seem to hurt.
804 */
805 static void
806 hifn_set_retry(struct hifn_softc *sc)
807 {
808 /* NB: RETRY only responds to 8-bit reads/writes */
809 pci_write_config(sc->sc_dev, HIFN_RETRY_TIMEOUT, 0, 1);
810 pci_write_config(sc->sc_dev, HIFN_TRDY_TIMEOUT, 0, 4);
811 }
812
813 /*
814 * Resets the board. Values in the regesters are left as is
815 * from the reset (i.e. initial values are assigned elsewhere).
816 */
817 static void
818 hifn_reset_board(struct hifn_softc *sc, int full)
819 {
820 u_int32_t reg;
821
822 /*
823 * Set polling in the DMA configuration register to zero. 0x7 avoids
824 * resetting the board and zeros out the other fields.
825 */
826 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
827 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
828
829 /*
830 * Now that polling has been disabled, we have to wait 1 ms
831 * before resetting the board.
832 */
833 DELAY(1000);
834
835 /* Reset the DMA unit */
836 if (full) {
837 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MODE);
838 DELAY(1000);
839 } else {
840 WRITE_REG_1(sc, HIFN_1_DMA_CNFG,
841 HIFN_DMACNFG_MODE | HIFN_DMACNFG_MSTRESET);
842 hifn_reset_puc(sc);
843 }
844
845 KASSERT(sc->sc_dma != NULL, ("hifn_reset_board: null DMA tag!"));
846 bzero(sc->sc_dma, sizeof(*sc->sc_dma));
847
848 /* Bring dma unit out of reset */
849 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
850 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
851
852 hifn_puc_wait(sc);
853 hifn_set_retry(sc);
854
855 if (sc->sc_flags & HIFN_IS_7811) {
856 for (reg = 0; reg < 1000; reg++) {
857 if (READ_REG_1(sc, HIFN_1_7811_MIPSRST) &
858 HIFN_MIPSRST_CRAMINIT)
859 break;
860 DELAY(1000);
861 }
862 if (reg == 1000)
863 printf(": cram init timeout\n");
864 }
865 }
866
867 static u_int32_t
868 hifn_next_signature(u_int32_t a, u_int cnt)
869 {
870 int i;
871 u_int32_t v;
872
873 for (i = 0; i < cnt; i++) {
874
875 /* get the parity */
876 v = a & 0x80080125;
877 v ^= v >> 16;
878 v ^= v >> 8;
879 v ^= v >> 4;
880 v ^= v >> 2;
881 v ^= v >> 1;
882
883 a = (v & 1) ^ (a << 1);
884 }
885
886 return a;
887 }
888
889 struct pci2id {
890 u_short pci_vendor;
891 u_short pci_prod;
892 char card_id[13];
893 };
894 static struct pci2id pci2id[] = {
895 {
896 PCI_VENDOR_HIFN,
897 PCI_PRODUCT_HIFN_7951,
898 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
899 0x00, 0x00, 0x00, 0x00, 0x00 }
900 }, {
901 PCI_VENDOR_HIFN,
902 PCI_PRODUCT_HIFN_7955,
903 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
904 0x00, 0x00, 0x00, 0x00, 0x00 }
905 }, {
906 PCI_VENDOR_HIFN,
907 PCI_PRODUCT_HIFN_7956,
908 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
909 0x00, 0x00, 0x00, 0x00, 0x00 }
910 }, {
911 PCI_VENDOR_NETSEC,
912 PCI_PRODUCT_NETSEC_7751,
913 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
914 0x00, 0x00, 0x00, 0x00, 0x00 }
915 }, {
916 PCI_VENDOR_INVERTEX,
917 PCI_PRODUCT_INVERTEX_AEON,
918 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
919 0x00, 0x00, 0x00, 0x00, 0x00 }
920 }, {
921 PCI_VENDOR_HIFN,
922 PCI_PRODUCT_HIFN_7811,
923 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
924 0x00, 0x00, 0x00, 0x00, 0x00 }
925 }, {
926 /*
927 * Other vendors share this PCI ID as well, such as
928 * http://www.powercrypt.com, and obviously they also
929 * use the same key.
930 */
931 PCI_VENDOR_HIFN,
932 PCI_PRODUCT_HIFN_7751,
933 { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
934 0x00, 0x00, 0x00, 0x00, 0x00 }
935 },
936 };
937
938 /*
939 * Checks to see if crypto is already enabled. If crypto isn't enable,
940 * "hifn_enable_crypto" is called to enable it. The check is important,
941 * as enabling crypto twice will lock the board.
942 */
943 static int
944 hifn_enable_crypto(struct hifn_softc *sc)
945 {
946 u_int32_t dmacfg, ramcfg, encl, addr, i;
947 char *offtbl = NULL;
948
949 for (i = 0; i < sizeof(pci2id)/sizeof(pci2id[0]); i++) {
950 if (pci2id[i].pci_vendor == pci_get_vendor(sc->sc_dev) &&
951 pci2id[i].pci_prod == pci_get_device(sc->sc_dev)) {
952 offtbl = pci2id[i].card_id;
953 break;
954 }
955 }
956 if (offtbl == NULL) {
957 device_printf(sc->sc_dev, "Unknown card!\n");
958 return (1);
959 }
960
961 ramcfg = READ_REG_0(sc, HIFN_0_PUCNFG);
962 dmacfg = READ_REG_1(sc, HIFN_1_DMA_CNFG);
963
964 /*
965 * The RAM config register's encrypt level bit needs to be set before
966 * every read performed on the encryption level register.
967 */
968 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
969
970 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
971
972 /*
973 * Make sure we don't re-unlock. Two unlocks kills chip until the
974 * next reboot.
975 */
976 if (encl == HIFN_PUSTAT_ENA_1 || encl == HIFN_PUSTAT_ENA_2) {
977 #ifdef HIFN_DEBUG
978 if (hifn_debug)
979 device_printf(sc->sc_dev,
980 "Strong crypto already enabled!\n");
981 #endif
982 goto report;
983 }
984
985 if (encl != 0 && encl != HIFN_PUSTAT_ENA_0) {
986 #ifdef HIFN_DEBUG
987 if (hifn_debug)
988 device_printf(sc->sc_dev,
989 "Unknown encryption level 0x%x\n", encl);
990 #endif
991 return 1;
992 }
993
994 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_UNLOCK |
995 HIFN_DMACNFG_MSTRESET | HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE);
996 DELAY(1000);
997 addr = READ_REG_1(sc, HIFN_UNLOCK_SECRET1);
998 DELAY(1000);
999 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, 0);
1000 DELAY(1000);
1001
1002 for (i = 0; i <= 12; i++) {
1003 addr = hifn_next_signature(addr, offtbl[i] + 0x101);
1004 WRITE_REG_1(sc, HIFN_UNLOCK_SECRET2, addr);
1005
1006 DELAY(1000);
1007 }
1008
1009 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg | HIFN_PUCNFG_CHIPID);
1010 encl = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
1011
1012 #ifdef HIFN_DEBUG
1013 if (hifn_debug) {
1014 if (encl != HIFN_PUSTAT_ENA_1 && encl != HIFN_PUSTAT_ENA_2)
1015 device_printf(sc->sc_dev, "Engine is permanently "
1016 "locked until next system reset!\n");
1017 else
1018 device_printf(sc->sc_dev, "Engine enabled "
1019 "successfully!\n");
1020 }
1021 #endif
1022
1023 report:
1024 WRITE_REG_0(sc, HIFN_0_PUCNFG, ramcfg);
1025 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, dmacfg);
1026
1027 switch (encl) {
1028 case HIFN_PUSTAT_ENA_1:
1029 case HIFN_PUSTAT_ENA_2:
1030 break;
1031 case HIFN_PUSTAT_ENA_0:
1032 default:
1033 device_printf(sc->sc_dev, "disabled");
1034 break;
1035 }
1036
1037 return 0;
1038 }
1039
1040 /*
1041 * Give initial values to the registers listed in the "Register Space"
1042 * section of the HIFN Software Development reference manual.
1043 */
1044 static void
1045 hifn_init_pci_registers(struct hifn_softc *sc)
1046 {
1047 /* write fixed values needed by the Initialization registers */
1048 WRITE_REG_0(sc, HIFN_0_PUCTRL, HIFN_PUCTRL_DMAENA);
1049 WRITE_REG_0(sc, HIFN_0_FIFOCNFG, HIFN_FIFOCNFG_THRESHOLD);
1050 WRITE_REG_0(sc, HIFN_0_PUIER, HIFN_PUIER_DSTOVER);
1051
1052 /* write all 4 ring address registers */
1053 WRITE_REG_1(sc, HIFN_1_DMA_CRAR, sc->sc_dma_physaddr +
1054 offsetof(struct hifn_dma, cmdr[0]));
1055 WRITE_REG_1(sc, HIFN_1_DMA_SRAR, sc->sc_dma_physaddr +
1056 offsetof(struct hifn_dma, srcr[0]));
1057 WRITE_REG_1(sc, HIFN_1_DMA_DRAR, sc->sc_dma_physaddr +
1058 offsetof(struct hifn_dma, dstr[0]));
1059 WRITE_REG_1(sc, HIFN_1_DMA_RRAR, sc->sc_dma_physaddr +
1060 offsetof(struct hifn_dma, resr[0]));
1061
1062 DELAY(2000);
1063
1064 /* write status register */
1065 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1066 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS |
1067 HIFN_DMACSR_S_CTRL_DIS | HIFN_DMACSR_C_CTRL_DIS |
1068 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_D_DONE | HIFN_DMACSR_D_LAST |
1069 HIFN_DMACSR_D_WAIT | HIFN_DMACSR_D_OVER |
1070 HIFN_DMACSR_R_ABORT | HIFN_DMACSR_R_DONE | HIFN_DMACSR_R_LAST |
1071 HIFN_DMACSR_R_WAIT | HIFN_DMACSR_R_OVER |
1072 HIFN_DMACSR_S_ABORT | HIFN_DMACSR_S_DONE | HIFN_DMACSR_S_LAST |
1073 HIFN_DMACSR_S_WAIT |
1074 HIFN_DMACSR_C_ABORT | HIFN_DMACSR_C_DONE | HIFN_DMACSR_C_LAST |
1075 HIFN_DMACSR_C_WAIT |
1076 HIFN_DMACSR_ENGINE |
1077 ((sc->sc_flags & HIFN_HAS_PUBLIC) ?
1078 HIFN_DMACSR_PUBDONE : 0) |
1079 ((sc->sc_flags & HIFN_IS_7811) ?
1080 HIFN_DMACSR_ILLW | HIFN_DMACSR_ILLR : 0));
1081
1082 sc->sc_d_busy = sc->sc_r_busy = sc->sc_s_busy = sc->sc_c_busy = 0;
1083 sc->sc_dmaier |= HIFN_DMAIER_R_DONE | HIFN_DMAIER_C_ABORT |
1084 HIFN_DMAIER_D_OVER | HIFN_DMAIER_R_OVER |
1085 HIFN_DMAIER_S_ABORT | HIFN_DMAIER_D_ABORT | HIFN_DMAIER_R_ABORT |
1086 ((sc->sc_flags & HIFN_IS_7811) ?
1087 HIFN_DMAIER_ILLW | HIFN_DMAIER_ILLR : 0);
1088 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
1089 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1090
1091
1092 if (sc->sc_flags & HIFN_IS_7956) {
1093 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1094 HIFN_PUCNFG_TCALLPHASES |
1095 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32);
1096 WRITE_REG_1(sc, HIFN_1_PLL, HIFN_PLL_7956);
1097 } else {
1098 WRITE_REG_0(sc, HIFN_0_PUCNFG, HIFN_PUCNFG_COMPSING |
1099 HIFN_PUCNFG_DRFR_128 | HIFN_PUCNFG_TCALLPHASES |
1100 HIFN_PUCNFG_TCDRVTOTEM | HIFN_PUCNFG_BUS32 |
1101 (sc->sc_drammodel ? HIFN_PUCNFG_DRAM : HIFN_PUCNFG_SRAM));
1102 }
1103
1104 WRITE_REG_0(sc, HIFN_0_PUISR, HIFN_PUISR_DSTOVER);
1105 WRITE_REG_1(sc, HIFN_1_DMA_CNFG, HIFN_DMACNFG_MSTRESET |
1106 HIFN_DMACNFG_DMARESET | HIFN_DMACNFG_MODE | HIFN_DMACNFG_LAST |
1107 ((HIFN_POLL_FREQUENCY << 16 ) & HIFN_DMACNFG_POLLFREQ) |
1108 ((HIFN_POLL_SCALAR << 8) & HIFN_DMACNFG_POLLINVAL));
1109 }
1110
1111 /*
1112 * The maximum number of sessions supported by the card
1113 * is dependent on the amount of context ram, which
1114 * encryption algorithms are enabled, and how compression
1115 * is configured. This should be configured before this
1116 * routine is called.
1117 */
1118 static void
1119 hifn_sessions(struct hifn_softc *sc)
1120 {
1121 u_int32_t pucnfg;
1122 int ctxsize;
1123
1124 pucnfg = READ_REG_0(sc, HIFN_0_PUCNFG);
1125
1126 if (pucnfg & HIFN_PUCNFG_COMPSING) {
1127 if (pucnfg & HIFN_PUCNFG_ENCCNFG)
1128 ctxsize = 128;
1129 else
1130 ctxsize = 512;
1131 /*
1132 * 7955/7956 has internal context memory of 32K
1133 */
1134 if (sc->sc_flags & HIFN_IS_7956)
1135 sc->sc_maxses = 32768 / ctxsize;
1136 else
1137 sc->sc_maxses = 1 +
1138 ((sc->sc_ramsize - 32768) / ctxsize);
1139 } else
1140 sc->sc_maxses = sc->sc_ramsize / 16384;
1141
1142 if (sc->sc_maxses > 2048)
1143 sc->sc_maxses = 2048;
1144 }
1145
1146 /*
1147 * Determine ram type (sram or dram). Board should be just out of a reset
1148 * state when this is called.
1149 */
1150 static int
1151 hifn_ramtype(struct hifn_softc *sc)
1152 {
1153 u_int8_t data[8], dataexpect[8];
1154 int i;
1155
1156 for (i = 0; i < sizeof(data); i++)
1157 data[i] = dataexpect[i] = 0x55;
1158 if (hifn_writeramaddr(sc, 0, data))
1159 return (-1);
1160 if (hifn_readramaddr(sc, 0, data))
1161 return (-1);
1162 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1163 sc->sc_drammodel = 1;
1164 return (0);
1165 }
1166
1167 for (i = 0; i < sizeof(data); i++)
1168 data[i] = dataexpect[i] = 0xaa;
1169 if (hifn_writeramaddr(sc, 0, data))
1170 return (-1);
1171 if (hifn_readramaddr(sc, 0, data))
1172 return (-1);
1173 if (bcmp(data, dataexpect, sizeof(data)) != 0) {
1174 sc->sc_drammodel = 1;
1175 return (0);
1176 }
1177
1178 return (0);
1179 }
1180
1181 #define HIFN_SRAM_MAX (32 << 20)
1182 #define HIFN_SRAM_STEP_SIZE 16384
1183 #define HIFN_SRAM_GRANULARITY (HIFN_SRAM_MAX / HIFN_SRAM_STEP_SIZE)
1184
1185 static int
1186 hifn_sramsize(struct hifn_softc *sc)
1187 {
1188 u_int32_t a;
1189 u_int8_t data[8];
1190 u_int8_t dataexpect[sizeof(data)];
1191 int32_t i;
1192
1193 for (i = 0; i < sizeof(data); i++)
1194 data[i] = dataexpect[i] = i ^ 0x5a;
1195
1196 for (i = HIFN_SRAM_GRANULARITY - 1; i >= 0; i--) {
1197 a = i * HIFN_SRAM_STEP_SIZE;
1198 bcopy(&i, data, sizeof(i));
1199 hifn_writeramaddr(sc, a, data);
1200 }
1201
1202 for (i = 0; i < HIFN_SRAM_GRANULARITY; i++) {
1203 a = i * HIFN_SRAM_STEP_SIZE;
1204 bcopy(&i, dataexpect, sizeof(i));
1205 if (hifn_readramaddr(sc, a, data) < 0)
1206 return (0);
1207 if (bcmp(data, dataexpect, sizeof(data)) != 0)
1208 return (0);
1209 sc->sc_ramsize = a + HIFN_SRAM_STEP_SIZE;
1210 }
1211
1212 return (0);
1213 }
1214
1215 /*
1216 * XXX For dram boards, one should really try all of the
1217 * HIFN_PUCNFG_DSZ_*'s. This just assumes that PUCNFG
1218 * is already set up correctly.
1219 */
1220 static int
1221 hifn_dramsize(struct hifn_softc *sc)
1222 {
1223 u_int32_t cnfg;
1224
1225 if (sc->sc_flags & HIFN_IS_7956) {
1226 /*
1227 * 7955/7956 have a fixed internal ram of only 32K.
1228 */
1229 sc->sc_ramsize = 32768;
1230 } else {
1231 cnfg = READ_REG_0(sc, HIFN_0_PUCNFG) &
1232 HIFN_PUCNFG_DRAMMASK;
1233 sc->sc_ramsize = 1 << ((cnfg >> 13) + 18);
1234 }
1235 return (0);
1236 }
1237
1238 static void
1239 hifn_alloc_slot(struct hifn_softc *sc, int *cmdp, int *srcp, int *dstp, int *resp)
1240 {
1241 struct hifn_dma *dma = sc->sc_dma;
1242
1243 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1244 dma->cmdi = 0;
1245 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1246 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1247 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1248 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1249 }
1250 *cmdp = dma->cmdi++;
1251 dma->cmdk = dma->cmdi;
1252
1253 if (dma->srci == HIFN_D_SRC_RSIZE) {
1254 dma->srci = 0;
1255 dma->srcr[HIFN_D_SRC_RSIZE].l = htole32(HIFN_D_VALID |
1256 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1257 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1258 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1259 }
1260 *srcp = dma->srci++;
1261 dma->srck = dma->srci;
1262
1263 if (dma->dsti == HIFN_D_DST_RSIZE) {
1264 dma->dsti = 0;
1265 dma->dstr[HIFN_D_DST_RSIZE].l = htole32(HIFN_D_VALID |
1266 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1267 HIFN_DSTR_SYNC(sc, HIFN_D_DST_RSIZE,
1268 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1269 }
1270 *dstp = dma->dsti++;
1271 dma->dstk = dma->dsti;
1272
1273 if (dma->resi == HIFN_D_RES_RSIZE) {
1274 dma->resi = 0;
1275 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1276 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1277 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1278 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1279 }
1280 *resp = dma->resi++;
1281 dma->resk = dma->resi;
1282 }
1283
1284 static int
1285 hifn_writeramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1286 {
1287 struct hifn_dma *dma = sc->sc_dma;
1288 hifn_base_command_t wc;
1289 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1290 int r, cmdi, resi, srci, dsti;
1291
1292 wc.masks = htole16(3 << 13);
1293 wc.session_num = htole16(addr >> 14);
1294 wc.total_source_count = htole16(8);
1295 wc.total_dest_count = htole16(addr & 0x3fff);
1296
1297 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1298
1299 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1300 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1301 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1302
1303 /* build write command */
1304 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1305 *(hifn_base_command_t *)dma->command_bufs[cmdi] = wc;
1306 bcopy(data, &dma->test_src, sizeof(dma->test_src));
1307
1308 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr
1309 + offsetof(struct hifn_dma, test_src));
1310 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr
1311 + offsetof(struct hifn_dma, test_dst));
1312
1313 dma->cmdr[cmdi].l = htole32(16 | masks);
1314 dma->srcr[srci].l = htole32(8 | masks);
1315 dma->dstr[dsti].l = htole32(4 | masks);
1316 dma->resr[resi].l = htole32(4 | masks);
1317
1318 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1319 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1320
1321 for (r = 10000; r >= 0; r--) {
1322 DELAY(10);
1323 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1324 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1325 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1326 break;
1327 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1328 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1329 }
1330 if (r == 0) {
1331 device_printf(sc->sc_dev, "writeramaddr -- "
1332 "result[%d](addr %d) still valid\n", resi, addr);
1333 r = -1;
1334 return (-1);
1335 } else
1336 r = 0;
1337
1338 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1339 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1340 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1341
1342 return (r);
1343 }
1344
1345 static int
1346 hifn_readramaddr(struct hifn_softc *sc, int addr, u_int8_t *data)
1347 {
1348 struct hifn_dma *dma = sc->sc_dma;
1349 hifn_base_command_t rc;
1350 const u_int32_t masks = HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ;
1351 int r, cmdi, srci, dsti, resi;
1352
1353 rc.masks = htole16(2 << 13);
1354 rc.session_num = htole16(addr >> 14);
1355 rc.total_source_count = htole16(addr & 0x3fff);
1356 rc.total_dest_count = htole16(8);
1357
1358 hifn_alloc_slot(sc, &cmdi, &srci, &dsti, &resi);
1359
1360 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1361 HIFN_DMACSR_C_CTRL_ENA | HIFN_DMACSR_S_CTRL_ENA |
1362 HIFN_DMACSR_D_CTRL_ENA | HIFN_DMACSR_R_CTRL_ENA);
1363
1364 bzero(dma->command_bufs[cmdi], HIFN_MAX_COMMAND);
1365 *(hifn_base_command_t *)dma->command_bufs[cmdi] = rc;
1366
1367 dma->srcr[srci].p = htole32(sc->sc_dma_physaddr +
1368 offsetof(struct hifn_dma, test_src));
1369 dma->test_src = 0;
1370 dma->dstr[dsti].p = htole32(sc->sc_dma_physaddr +
1371 offsetof(struct hifn_dma, test_dst));
1372 dma->test_dst = 0;
1373 dma->cmdr[cmdi].l = htole32(8 | masks);
1374 dma->srcr[srci].l = htole32(8 | masks);
1375 dma->dstr[dsti].l = htole32(8 | masks);
1376 dma->resr[resi].l = htole32(HIFN_MAX_RESULT | masks);
1377
1378 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1379 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1380
1381 for (r = 10000; r >= 0; r--) {
1382 DELAY(10);
1383 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1384 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1385 if ((dma->resr[resi].l & htole32(HIFN_D_VALID)) == 0)
1386 break;
1387 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
1388 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1389 }
1390 if (r == 0) {
1391 device_printf(sc->sc_dev, "readramaddr -- "
1392 "result[%d](addr %d) still valid\n", resi, addr);
1393 r = -1;
1394 } else {
1395 r = 0;
1396 bcopy(&dma->test_dst, data, sizeof(dma->test_dst));
1397 }
1398
1399 WRITE_REG_1(sc, HIFN_1_DMA_CSR,
1400 HIFN_DMACSR_C_CTRL_DIS | HIFN_DMACSR_S_CTRL_DIS |
1401 HIFN_DMACSR_D_CTRL_DIS | HIFN_DMACSR_R_CTRL_DIS);
1402
1403 return (r);
1404 }
1405
1406 /*
1407 * Initialize the descriptor rings.
1408 */
1409 static void
1410 hifn_init_dma(struct hifn_softc *sc)
1411 {
1412 struct hifn_dma *dma = sc->sc_dma;
1413 int i;
1414
1415 hifn_set_retry(sc);
1416
1417 /* initialize static pointer values */
1418 for (i = 0; i < HIFN_D_CMD_RSIZE; i++)
1419 dma->cmdr[i].p = htole32(sc->sc_dma_physaddr +
1420 offsetof(struct hifn_dma, command_bufs[i][0]));
1421 for (i = 0; i < HIFN_D_RES_RSIZE; i++)
1422 dma->resr[i].p = htole32(sc->sc_dma_physaddr +
1423 offsetof(struct hifn_dma, result_bufs[i][0]));
1424
1425 dma->cmdr[HIFN_D_CMD_RSIZE].p =
1426 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, cmdr[0]));
1427 dma->srcr[HIFN_D_SRC_RSIZE].p =
1428 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, srcr[0]));
1429 dma->dstr[HIFN_D_DST_RSIZE].p =
1430 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, dstr[0]));
1431 dma->resr[HIFN_D_RES_RSIZE].p =
1432 htole32(sc->sc_dma_physaddr + offsetof(struct hifn_dma, resr[0]));
1433
1434 dma->cmdu = dma->srcu = dma->dstu = dma->resu = 0;
1435 dma->cmdi = dma->srci = dma->dsti = dma->resi = 0;
1436 dma->cmdk = dma->srck = dma->dstk = dma->resk = 0;
1437 }
1438
1439 /*
1440 * Writes out the raw command buffer space. Returns the
1441 * command buffer size.
1442 */
1443 static u_int
1444 hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
1445 {
1446 #define MIN(a,b) ((a)<(b)?(a):(b))
1447 u_int8_t *buf_pos;
1448 hifn_base_command_t *base_cmd;
1449 hifn_mac_command_t *mac_cmd;
1450 hifn_crypt_command_t *cry_cmd;
1451 int using_mac, using_crypt, len, ivlen;
1452 u_int32_t dlen, slen;
1453
1454 buf_pos = buf;
1455 using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
1456 using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
1457
1458 base_cmd = (hifn_base_command_t *)buf_pos;
1459 base_cmd->masks = htole16(cmd->base_masks);
1460 slen = cmd->src_mapsize;
1461 if (cmd->sloplen)
1462 dlen = cmd->dst_mapsize - cmd->sloplen + sizeof(u_int32_t);
1463 else
1464 dlen = cmd->dst_mapsize;
1465 base_cmd->total_source_count = htole16(slen & HIFN_BASE_CMD_LENMASK_LO);
1466 base_cmd->total_dest_count = htole16(dlen & HIFN_BASE_CMD_LENMASK_LO);
1467 dlen >>= 16;
1468 slen >>= 16;
1469 base_cmd->session_num = htole16(
1470 ((slen << HIFN_BASE_CMD_SRCLEN_S) & HIFN_BASE_CMD_SRCLEN_M) |
1471 ((dlen << HIFN_BASE_CMD_DSTLEN_S) & HIFN_BASE_CMD_DSTLEN_M));
1472 buf_pos += sizeof(hifn_base_command_t);
1473
1474 if (using_mac) {
1475 mac_cmd = (hifn_mac_command_t *)buf_pos;
1476 dlen = cmd->maccrd->crd_len;
1477 mac_cmd->source_count = htole16(dlen & 0xffff);
1478 dlen >>= 16;
1479 mac_cmd->masks = htole16(cmd->mac_masks |
1480 ((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
1481 mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
1482 mac_cmd->reserved = 0;
1483 buf_pos += sizeof(hifn_mac_command_t);
1484 }
1485
1486 if (using_crypt) {
1487 cry_cmd = (hifn_crypt_command_t *)buf_pos;
1488 dlen = cmd->enccrd->crd_len;
1489 cry_cmd->source_count = htole16(dlen & 0xffff);
1490 dlen >>= 16;
1491 cry_cmd->masks = htole16(cmd->cry_masks |
1492 ((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
1493 cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
1494 cry_cmd->reserved = 0;
1495 buf_pos += sizeof(hifn_crypt_command_t);
1496 }
1497
1498 if (using_mac && cmd->mac_masks & HIFN_MAC_CMD_NEW_KEY) {
1499 bcopy(cmd->mac, buf_pos, HIFN_MAC_KEY_LENGTH);
1500 buf_pos += HIFN_MAC_KEY_LENGTH;
1501 }
1502
1503 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_KEY) {
1504 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1505 case HIFN_CRYPT_CMD_ALG_3DES:
1506 bcopy(cmd->ck, buf_pos, HIFN_3DES_KEY_LENGTH);
1507 buf_pos += HIFN_3DES_KEY_LENGTH;
1508 break;
1509 case HIFN_CRYPT_CMD_ALG_DES:
1510 bcopy(cmd->ck, buf_pos, HIFN_DES_KEY_LENGTH);
1511 buf_pos += HIFN_DES_KEY_LENGTH;
1512 break;
1513 case HIFN_CRYPT_CMD_ALG_RC4:
1514 len = 256;
1515 do {
1516 int clen;
1517
1518 clen = MIN(cmd->cklen, len);
1519 bcopy(cmd->ck, buf_pos, clen);
1520 len -= clen;
1521 buf_pos += clen;
1522 } while (len > 0);
1523 bzero(buf_pos, 4);
1524 buf_pos += 4;
1525 break;
1526 case HIFN_CRYPT_CMD_ALG_AES:
1527 /*
1528 * AES keys are variable 128, 192 and
1529 * 256 bits (16, 24 and 32 bytes).
1530 */
1531 bcopy(cmd->ck, buf_pos, cmd->cklen);
1532 buf_pos += cmd->cklen;
1533 break;
1534 }
1535 }
1536
1537 if (using_crypt && cmd->cry_masks & HIFN_CRYPT_CMD_NEW_IV) {
1538 switch (cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) {
1539 case HIFN_CRYPT_CMD_ALG_AES:
1540 ivlen = HIFN_AES_IV_LENGTH;
1541 break;
1542 default:
1543 ivlen = HIFN_IV_LENGTH;
1544 break;
1545 }
1546 bcopy(cmd->iv, buf_pos, ivlen);
1547 buf_pos += ivlen;
1548 }
1549
1550 if ((cmd->base_masks & (HIFN_BASE_CMD_MAC|HIFN_BASE_CMD_CRYPT)) == 0) {
1551 bzero(buf_pos, 8);
1552 buf_pos += 8;
1553 }
1554
1555 return (buf_pos - buf);
1556 #undef MIN
1557 }
1558
1559 static int
1560 hifn_dmamap_aligned(struct hifn_operand *op)
1561 {
1562 int i;
1563
1564 for (i = 0; i < op->nsegs; i++) {
1565 if (op->segs[i].ds_addr & 3)
1566 return (0);
1567 if ((i != (op->nsegs - 1)) && (op->segs[i].ds_len & 3))
1568 return (0);
1569 }
1570 return (1);
1571 }
1572
1573 static int
1574 hifn_dmamap_load_dst(struct hifn_softc *sc, struct hifn_command *cmd)
1575 {
1576 struct hifn_dma *dma = sc->sc_dma;
1577 struct hifn_operand *dst = &cmd->dst;
1578 u_int32_t p, l;
1579 int idx, used = 0, i;
1580
1581 idx = dma->dsti;
1582 for (i = 0; i < dst->nsegs - 1; i++) {
1583 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1584 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1585 HIFN_D_MASKDONEIRQ | dst->segs[i].ds_len);
1586 HIFN_DSTR_SYNC(sc, idx,
1587 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1588 used++;
1589
1590 if (++idx == HIFN_D_DST_RSIZE) {
1591 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1592 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1593 HIFN_DSTR_SYNC(sc, idx,
1594 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1595 idx = 0;
1596 }
1597 }
1598
1599 if (cmd->sloplen == 0) {
1600 p = dst->segs[i].ds_addr;
1601 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1602 dst->segs[i].ds_len;
1603 } else {
1604 p = sc->sc_dma_physaddr +
1605 offsetof(struct hifn_dma, slop[cmd->slopidx]);
1606 l = HIFN_D_VALID | HIFN_D_MASKDONEIRQ | HIFN_D_LAST |
1607 sizeof(u_int32_t);
1608
1609 if ((dst->segs[i].ds_len - cmd->sloplen) != 0) {
1610 dma->dstr[idx].p = htole32(dst->segs[i].ds_addr);
1611 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1612 HIFN_D_MASKDONEIRQ |
1613 (dst->segs[i].ds_len - cmd->sloplen));
1614 HIFN_DSTR_SYNC(sc, idx,
1615 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1616 used++;
1617
1618 if (++idx == HIFN_D_DST_RSIZE) {
1619 dma->dstr[idx].l = htole32(HIFN_D_VALID |
1620 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1621 HIFN_DSTR_SYNC(sc, idx,
1622 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1623 idx = 0;
1624 }
1625 }
1626 }
1627 dma->dstr[idx].p = htole32(p);
1628 dma->dstr[idx].l = htole32(l);
1629 HIFN_DSTR_SYNC(sc, idx, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1630 used++;
1631
1632 if (++idx == HIFN_D_DST_RSIZE) {
1633 dma->dstr[idx].l = htole32(HIFN_D_VALID | HIFN_D_JUMP |
1634 HIFN_D_MASKDONEIRQ);
1635 HIFN_DSTR_SYNC(sc, idx,
1636 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1637 idx = 0;
1638 }
1639
1640 dma->dsti = idx;
1641 dma->dstu += used;
1642 return (idx);
1643 }
1644
1645 static int
1646 hifn_dmamap_load_src(struct hifn_softc *sc, struct hifn_command *cmd)
1647 {
1648 struct hifn_dma *dma = sc->sc_dma;
1649 struct hifn_operand *src = &cmd->src;
1650 int idx, i;
1651 u_int32_t last = 0;
1652
1653 idx = dma->srci;
1654 for (i = 0; i < src->nsegs; i++) {
1655 if (i == src->nsegs - 1)
1656 last = HIFN_D_LAST;
1657
1658 dma->srcr[idx].p = htole32(src->segs[i].ds_addr);
1659 dma->srcr[idx].l = htole32(src->segs[i].ds_len |
1660 HIFN_D_VALID | HIFN_D_MASKDONEIRQ | last);
1661 HIFN_SRCR_SYNC(sc, idx,
1662 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1663
1664 if (++idx == HIFN_D_SRC_RSIZE) {
1665 dma->srcr[idx].l = htole32(HIFN_D_VALID |
1666 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1667 HIFN_SRCR_SYNC(sc, HIFN_D_SRC_RSIZE,
1668 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1669 idx = 0;
1670 }
1671 }
1672 dma->srci = idx;
1673 dma->srcu += src->nsegs;
1674 return (idx);
1675 }
1676
1677 static void
1678 hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1679 {
1680 struct hifn_operand *op = arg;
1681
1682 KASSERT(nsegs <= MAX_SCATTER,
1683 ("hifn_op_cb: too many DMA segments (%u > %u) "
1684 "returned when mapping operand", nsegs, MAX_SCATTER));
1685 op->mapsize = mapsize;
1686 op->nsegs = nsegs;
1687 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1688 }
1689
1690 static int
1691 hifn_crypto(
1692 struct hifn_softc *sc,
1693 struct hifn_command *cmd,
1694 struct cryptop *crp,
1695 int hint)
1696 {
1697 struct hifn_dma *dma = sc->sc_dma;
1698 u_int32_t cmdlen;
1699 int cmdi, resi, err = 0;
1700
1701 /*
1702 * need 1 cmd, and 1 res
1703 *
1704 * NB: check this first since it's easy.
1705 */
1706 if ((dma->cmdu + 1) > HIFN_D_CMD_RSIZE ||
1707 (dma->resu + 1) > HIFN_D_RES_RSIZE) {
1708 #ifdef HIFN_DEBUG
1709 if (hifn_debug) {
1710 device_printf(sc->sc_dev,
1711 "cmd/result exhaustion, cmdu %u resu %u\n",
1712 dma->cmdu, dma->resu);
1713 }
1714 #endif
1715 hifnstats.hst_nomem_cr++;
1716 return (ERESTART);
1717 }
1718
1719 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->src_map)) {
1720 hifnstats.hst_nomem_map++;
1721 return (ENOMEM);
1722 }
1723
1724 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1725 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
1726 cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1727 hifnstats.hst_nomem_load++;
1728 err = ENOMEM;
1729 goto err_srcmap1;
1730 }
1731 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1732 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
1733 cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
1734 hifnstats.hst_nomem_load++;
1735 err = ENOMEM;
1736 goto err_srcmap1;
1737 }
1738 } else {
1739 err = EINVAL;
1740 goto err_srcmap1;
1741 }
1742
1743 if (hifn_dmamap_aligned(&cmd->src)) {
1744 cmd->sloplen = cmd->src_mapsize & 3;
1745 cmd->dst = cmd->src;
1746 } else {
1747 if (crp->crp_flags & CRYPTO_F_IOV) {
1748 err = EINVAL;
1749 goto err_srcmap;
1750 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1751 int totlen, len;
1752 struct mbuf *m, *m0, *mlast;
1753
1754 KASSERT(cmd->dst_m == cmd->src_m,
1755 ("hifn_crypto: dst_m initialized improperly"));
1756 hifnstats.hst_unaligned++;
1757 /*
1758 * Source is not aligned on a longword boundary.
1759 * Copy the data to insure alignment. If we fail
1760 * to allocate mbufs or clusters while doing this
1761 * we return ERESTART so the operation is requeued
1762 * at the crypto later, but only if there are
1763 * ops already posted to the hardware; otherwise we
1764 * have no guarantee that we'll be re-entered.
1765 */
1766 totlen = cmd->src_mapsize;
1767 if (cmd->src_m->m_flags & M_PKTHDR) {
1768 len = MHLEN;
1769 MGETHDR(m0, M_DONTWAIT, MT_DATA);
1770 if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_DONTWAIT)) {
1771 m_free(m0);
1772 m0 = NULL;
1773 }
1774 } else {
1775 len = MLEN;
1776 MGET(m0, M_DONTWAIT, MT_DATA);
1777 }
1778 if (m0 == NULL) {
1779 hifnstats.hst_nomem_mbuf++;
1780 err = dma->cmdu ? ERESTART : ENOMEM;
1781 goto err_srcmap;
1782 }
1783 if (totlen >= MINCLSIZE) {
1784 MCLGET(m0, M_DONTWAIT);
1785 if ((m0->m_flags & M_EXT) == 0) {
1786 hifnstats.hst_nomem_mcl++;
1787 err = dma->cmdu ? ERESTART : ENOMEM;
1788 m_freem(m0);
1789 goto err_srcmap;
1790 }
1791 len = MCLBYTES;
1792 }
1793 totlen -= len;
1794 m0->m_pkthdr.len = m0->m_len = len;
1795 mlast = m0;
1796
1797 while (totlen > 0) {
1798 MGET(m, M_DONTWAIT, MT_DATA);
1799 if (m == NULL) {
1800 hifnstats.hst_nomem_mbuf++;
1801 err = dma->cmdu ? ERESTART : ENOMEM;
1802 m_freem(m0);
1803 goto err_srcmap;
1804 }
1805 len = MLEN;
1806 if (totlen >= MINCLSIZE) {
1807 MCLGET(m, M_DONTWAIT);
1808 if ((m->m_flags & M_EXT) == 0) {
1809 hifnstats.hst_nomem_mcl++;
1810 err = dma->cmdu ? ERESTART : ENOMEM;
1811 mlast->m_next = m;
1812 m_freem(m0);
1813 goto err_srcmap;
1814 }
1815 len = MCLBYTES;
1816 }
1817
1818 m->m_len = len;
1819 m0->m_pkthdr.len += len;
1820 totlen -= len;
1821
1822 mlast->m_next = m;
1823 mlast = m;
1824 }
1825 cmd->dst_m = m0;
1826 }
1827 }
1828
1829 if (cmd->dst_map == NULL) {
1830 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
1831 hifnstats.hst_nomem_map++;
1832 err = ENOMEM;
1833 goto err_srcmap;
1834 }
1835 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1836 if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
1837 cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1838 hifnstats.hst_nomem_map++;
1839 err = ENOMEM;
1840 goto err_dstmap1;
1841 }
1842 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1843 if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
1844 cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
1845 hifnstats.hst_nomem_load++;
1846 err = ENOMEM;
1847 goto err_dstmap1;
1848 }
1849 }
1850 }
1851
1852 #ifdef HIFN_DEBUG
1853 if (hifn_debug) {
1854 device_printf(sc->sc_dev,
1855 "Entering cmd: stat %8x ien %8x u %d/%d/%d/%d n %d/%d\n",
1856 READ_REG_1(sc, HIFN_1_DMA_CSR),
1857 READ_REG_1(sc, HIFN_1_DMA_IER),
1858 dma->cmdu, dma->srcu, dma->dstu, dma->resu,
1859 cmd->src_nsegs, cmd->dst_nsegs);
1860 }
1861 #endif
1862
1863 if (cmd->src_map == cmd->dst_map) {
1864 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1865 BUS_DMASYNC_PREWRITE|BUS_DMASYNC_PREREAD);
1866 } else {
1867 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
1868 BUS_DMASYNC_PREWRITE);
1869 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
1870 BUS_DMASYNC_PREREAD);
1871 }
1872
1873 /*
1874 * need N src, and N dst
1875 */
1876 if ((dma->srcu + cmd->src_nsegs) > HIFN_D_SRC_RSIZE ||
1877 (dma->dstu + cmd->dst_nsegs + 1) > HIFN_D_DST_RSIZE) {
1878 #ifdef HIFN_DEBUG
1879 if (hifn_debug) {
1880 device_printf(sc->sc_dev,
1881 "src/dst exhaustion, srcu %u+%u dstu %u+%u\n",
1882 dma->srcu, cmd->src_nsegs,
1883 dma->dstu, cmd->dst_nsegs);
1884 }
1885 #endif
1886 hifnstats.hst_nomem_sd++;
1887 err = ERESTART;
1888 goto err_dstmap;
1889 }
1890
1891 if (dma->cmdi == HIFN_D_CMD_RSIZE) {
1892 dma->cmdi = 0;
1893 dma->cmdr[HIFN_D_CMD_RSIZE].l = htole32(HIFN_D_VALID |
1894 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1895 HIFN_CMDR_SYNC(sc, HIFN_D_CMD_RSIZE,
1896 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1897 }
1898 cmdi = dma->cmdi++;
1899 cmdlen = hifn_write_command(cmd, dma->command_bufs[cmdi]);
1900 HIFN_CMD_SYNC(sc, cmdi, BUS_DMASYNC_PREWRITE);
1901
1902 /* .p for command/result already set */
1903 dma->cmdr[cmdi].l = htole32(cmdlen | HIFN_D_VALID | HIFN_D_LAST |
1904 HIFN_D_MASKDONEIRQ);
1905 HIFN_CMDR_SYNC(sc, cmdi,
1906 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1907 dma->cmdu++;
1908 if (sc->sc_c_busy == 0) {
1909 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_C_CTRL_ENA);
1910 sc->sc_c_busy = 1;
1911 }
1912
1913 /*
1914 * We don't worry about missing an interrupt (which a "command wait"
1915 * interrupt salvages us from), unless there is more than one command
1916 * in the queue.
1917 */
1918 if (dma->cmdu > 1) {
1919 sc->sc_dmaier |= HIFN_DMAIER_C_WAIT;
1920 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
1921 }
1922
1923 hifnstats.hst_ipackets++;
1924 hifnstats.hst_ibytes += cmd->src_mapsize;
1925
1926 hifn_dmamap_load_src(sc, cmd);
1927 if (sc->sc_s_busy == 0) {
1928 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_S_CTRL_ENA);
1929 sc->sc_s_busy = 1;
1930 }
1931
1932 /*
1933 * Unlike other descriptors, we don't mask done interrupt from
1934 * result descriptor.
1935 */
1936 #ifdef HIFN_DEBUG
1937 if (hifn_debug)
1938 printf("load res\n");
1939 #endif
1940 if (dma->resi == HIFN_D_RES_RSIZE) {
1941 dma->resi = 0;
1942 dma->resr[HIFN_D_RES_RSIZE].l = htole32(HIFN_D_VALID |
1943 HIFN_D_JUMP | HIFN_D_MASKDONEIRQ);
1944 HIFN_RESR_SYNC(sc, HIFN_D_RES_RSIZE,
1945 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1946 }
1947 resi = dma->resi++;
1948 KASSERT(dma->hifn_commands[resi] == NULL,
1949 ("hifn_crypto: command slot %u busy", resi));
1950 dma->hifn_commands[resi] = cmd;
1951 HIFN_RES_SYNC(sc, resi, BUS_DMASYNC_PREREAD);
1952 if ((hint & CRYPTO_HINT_MORE) && sc->sc_curbatch < hifn_maxbatch) {
1953 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1954 HIFN_D_VALID | HIFN_D_LAST | HIFN_D_MASKDONEIRQ);
1955 sc->sc_curbatch++;
1956 if (sc->sc_curbatch > hifnstats.hst_maxbatch)
1957 hifnstats.hst_maxbatch = sc->sc_curbatch;
1958 hifnstats.hst_totbatch++;
1959 } else {
1960 dma->resr[resi].l = htole32(HIFN_MAX_RESULT |
1961 HIFN_D_VALID | HIFN_D_LAST);
1962 sc->sc_curbatch = 0;
1963 }
1964 HIFN_RESR_SYNC(sc, resi,
1965 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1966 dma->resu++;
1967 if (sc->sc_r_busy == 0) {
1968 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_R_CTRL_ENA);
1969 sc->sc_r_busy = 1;
1970 }
1971
1972 if (cmd->sloplen)
1973 cmd->slopidx = resi;
1974
1975 hifn_dmamap_load_dst(sc, cmd);
1976
1977 if (sc->sc_d_busy == 0) {
1978 WRITE_REG_1(sc, HIFN_1_DMA_CSR, HIFN_DMACSR_D_CTRL_ENA);
1979 sc->sc_d_busy = 1;
1980 }
1981
1982 #ifdef HIFN_DEBUG
1983 if (hifn_debug) {
1984 device_printf(sc->sc_dev, "command: stat %8x ier %8x\n",
1985 READ_REG_1(sc, HIFN_1_DMA_CSR),
1986 READ_REG_1(sc, HIFN_1_DMA_IER));
1987 }
1988 #endif
1989
1990 sc->sc_active = 5;
1991 KASSERT(err == 0, ("hifn_crypto: success with error %u", err));
1992 return (err); /* success */
1993
1994 err_dstmap:
1995 if (cmd->src_map != cmd->dst_map)
1996 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
1997 err_dstmap1:
1998 if (cmd->src_map != cmd->dst_map)
1999 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2000 err_srcmap:
2001 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2002 if (cmd->src_m != cmd->dst_m)
2003 m_freem(cmd->dst_m);
2004 }
2005 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2006 err_srcmap1:
2007 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2008 return (err);
2009 }
2010
2011 static void
2012 hifn_tick(void* vsc)
2013 {
2014 struct hifn_softc *sc = vsc;
2015 int s;
2016
2017 s = splimp();
2018 if (sc->sc_active == 0) {
2019 struct hifn_dma *dma = sc->sc_dma;
2020 u_int32_t r = 0;
2021
2022 if (dma->cmdu == 0 && sc->sc_c_busy) {
2023 sc->sc_c_busy = 0;
2024 r |= HIFN_DMACSR_C_CTRL_DIS;
2025 }
2026 if (dma->srcu == 0 && sc->sc_s_busy) {
2027 sc->sc_s_busy = 0;
2028 r |= HIFN_DMACSR_S_CTRL_DIS;
2029 }
2030 if (dma->dstu == 0 && sc->sc_d_busy) {
2031 sc->sc_d_busy = 0;
2032 r |= HIFN_DMACSR_D_CTRL_DIS;
2033 }
2034 if (dma->resu == 0 && sc->sc_r_busy) {
2035 sc->sc_r_busy = 0;
2036 r |= HIFN_DMACSR_R_CTRL_DIS;
2037 }
2038 if (r)
2039 WRITE_REG_1(sc, HIFN_1_DMA_CSR, r);
2040 } else
2041 sc->sc_active--;
2042 splx(s);
2043 callout_reset(&sc->sc_tickto, hz, hifn_tick, sc);
2044 }
2045
2046 static void
2047 hifn_intr(void *arg)
2048 {
2049 struct hifn_softc *sc = arg;
2050 struct hifn_dma *dma;
2051 u_int32_t dmacsr, restart;
2052 int i, u;
2053
2054 dma = sc->sc_dma;
2055
2056 dmacsr = READ_REG_1(sc, HIFN_1_DMA_CSR);
2057
2058 #ifdef HIFN_DEBUG
2059 if (hifn_debug) {
2060 device_printf(sc->sc_dev,
2061 "irq: stat %08x ien %08x damier %08x i %d/%d/%d/%d k %d/%d/%d/%d u %d/%d/%d/%d\n",
2062 dmacsr, READ_REG_1(sc, HIFN_1_DMA_IER), sc->sc_dmaier,
2063 dma->cmdi, dma->srci, dma->dsti, dma->resi,
2064 dma->cmdk, dma->srck, dma->dstk, dma->resk,
2065 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2066 }
2067 #endif
2068
2069 WRITE_REG_1(sc, HIFN_1_DMA_CSR, dmacsr & sc->sc_dmaier);
2070
2071 if ((sc->sc_flags & HIFN_HAS_PUBLIC) &&
2072 (dmacsr & HIFN_DMACSR_PUBDONE))
2073 WRITE_REG_1(sc, HIFN_1_PUB_STATUS,
2074 READ_REG_1(sc, HIFN_1_PUB_STATUS) | HIFN_PUBSTS_DONE);
2075
2076 restart = dmacsr & (HIFN_DMACSR_D_OVER | HIFN_DMACSR_R_OVER);
2077 if (restart)
2078 device_printf(sc->sc_dev, "overrun %x\n", dmacsr);
2079
2080 if (sc->sc_flags & HIFN_IS_7811) {
2081 if (dmacsr & HIFN_DMACSR_ILLR)
2082 device_printf(sc->sc_dev, "illegal read\n");
2083 if (dmacsr & HIFN_DMACSR_ILLW)
2084 device_printf(sc->sc_dev, "illegal write\n");
2085 }
2086
2087 restart = dmacsr & (HIFN_DMACSR_C_ABORT | HIFN_DMACSR_S_ABORT |
2088 HIFN_DMACSR_D_ABORT | HIFN_DMACSR_R_ABORT);
2089 if (restart) {
2090 device_printf(sc->sc_dev, "abort, resetting.\n");
2091 hifnstats.hst_abort++;
2092 hifn_abort(sc);
2093 return;
2094 }
2095
2096 if ((dmacsr & HIFN_DMACSR_C_WAIT) && (dma->cmdu == 0)) {
2097 /*
2098 * If no slots to process and we receive a "waiting on
2099 * command" interrupt, we disable the "waiting on command"
2100 * (by clearing it).
2101 */
2102 sc->sc_dmaier &= ~HIFN_DMAIER_C_WAIT;
2103 WRITE_REG_1(sc, HIFN_1_DMA_IER, sc->sc_dmaier);
2104 }
2105
2106 /* clear the rings */
2107 i = dma->resk; u = dma->resu;
2108 while (u != 0) {
2109 HIFN_RESR_SYNC(sc, i,
2110 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2111 if (dma->resr[i].l & htole32(HIFN_D_VALID)) {
2112 HIFN_RESR_SYNC(sc, i,
2113 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2114 break;
2115 }
2116
2117 if (i != HIFN_D_RES_RSIZE) {
2118 struct hifn_command *cmd;
2119 u_int8_t *macbuf = NULL;
2120
2121 HIFN_RES_SYNC(sc, i, BUS_DMASYNC_POSTREAD);
2122 cmd = dma->hifn_commands[i];
2123 KASSERT(cmd != NULL,
2124 ("hifn_intr: null command slot %u", i));
2125 dma->hifn_commands[i] = NULL;
2126
2127 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2128 macbuf = dma->result_bufs[i];
2129 macbuf += 12;
2130 }
2131
2132 hifn_callback(sc, cmd, macbuf);
2133 hifnstats.hst_opackets++;
2134 u--;
2135 }
2136
2137 if (++i == (HIFN_D_RES_RSIZE + 1))
2138 i = 0;
2139 }
2140 dma->resk = i; dma->resu = u;
2141
2142 i = dma->srck; u = dma->srcu;
2143 while (u != 0) {
2144 if (i == HIFN_D_SRC_RSIZE)
2145 i = 0;
2146 HIFN_SRCR_SYNC(sc, i,
2147 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2148 if (dma->srcr[i].l & htole32(HIFN_D_VALID)) {
2149 HIFN_SRCR_SYNC(sc, i,
2150 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2151 break;
2152 }
2153 i++, u--;
2154 }
2155 dma->srck = i; dma->srcu = u;
2156
2157 i = dma->cmdk; u = dma->cmdu;
2158 while (u != 0) {
2159 HIFN_CMDR_SYNC(sc, i,
2160 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2161 if (dma->cmdr[i].l & htole32(HIFN_D_VALID)) {
2162 HIFN_CMDR_SYNC(sc, i,
2163 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2164 break;
2165 }
2166 if (i != HIFN_D_CMD_RSIZE) {
2167 u--;
2168 HIFN_CMD_SYNC(sc, i, BUS_DMASYNC_POSTWRITE);
2169 }
2170 if (++i == (HIFN_D_CMD_RSIZE + 1))
2171 i = 0;
2172 }
2173 dma->cmdk = i; dma->cmdu = u;
2174
2175 if (sc->sc_needwakeup) { /* XXX check high watermark */
2176 int wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
2177 #ifdef HIFN_DEBUG
2178 if (hifn_debug)
2179 device_printf(sc->sc_dev,
2180 "wakeup crypto (%x) u %d/%d/%d/%d\n",
2181 sc->sc_needwakeup,
2182 dma->cmdu, dma->srcu, dma->dstu, dma->resu);
2183 #endif
2184 sc->sc_needwakeup &= ~wakeup;
2185 crypto_unblock(sc->sc_cid, wakeup);
2186 }
2187 }
2188
2189 /*
2190 * Allocate a new 'session' and return an encoded session id. 'sidp'
2191 * contains our registration id, and should contain an encoded session
2192 * id on successful allocation.
2193 */
2194 static int
2195 hifn_newsession(void *arg, u_int32_t *sidp, struct cryptoini *cri)
2196 {
2197 struct cryptoini *c;
2198 struct hifn_softc *sc = arg;
2199 int mac = 0, cry = 0, sesn;
2200 struct hifn_session *ses = NULL;
2201
2202 KASSERT(sc != NULL, ("hifn_newsession: null softc"));
2203 if (sidp == NULL || cri == NULL || sc == NULL)
2204 return (EINVAL);
2205
2206 if (sc->sc_sessions == NULL) {
2207 ses = sc->sc_sessions = (struct hifn_session *)malloc(
2208 sizeof(*ses), M_DEVBUF, M_NOWAIT);
2209 if (ses == NULL)
2210 return (ENOMEM);
2211 sesn = 0;
2212 sc->sc_nsessions = 1;
2213 } else {
2214 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
2215 if (!sc->sc_sessions[sesn].hs_used) {
2216 ses = &sc->sc_sessions[sesn];
2217 break;
2218 }
2219 }
2220
2221 if (ses == NULL) {
2222 sesn = sc->sc_nsessions;
2223 ses = (struct hifn_session *)malloc((sesn + 1) *
2224 sizeof(*ses), M_DEVBUF, M_NOWAIT);
2225 if (ses == NULL)
2226 return (ENOMEM);
2227 bcopy(sc->sc_sessions, ses, sesn * sizeof(*ses));
2228 bzero(sc->sc_sessions, sesn * sizeof(*ses));
2229 free(sc->sc_sessions, M_DEVBUF);
2230 sc->sc_sessions = ses;
2231 ses = &sc->sc_sessions[sesn];
2232 sc->sc_nsessions++;
2233 }
2234 }
2235 bzero(ses, sizeof(*ses));
2236 ses->hs_used = 1;
2237
2238 for (c = cri; c != NULL; c = c->cri_next) {
2239 switch (c->cri_alg) {
2240 case CRYPTO_MD5:
2241 case CRYPTO_SHA1:
2242 case CRYPTO_MD5_HMAC:
2243 case CRYPTO_SHA1_HMAC:
2244 if (mac)
2245 return (EINVAL);
2246 mac = 1;
2247 break;
2248 case CRYPTO_DES_CBC:
2249 case CRYPTO_3DES_CBC:
2250 case CRYPTO_AES_CBC:
2251 /* XXX this may read fewer, does it matter? */
2252 read_random(ses->hs_iv,
2253 c->cri_alg == CRYPTO_AES_CBC ?
2254 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2255 /*FALLTHROUGH*/
2256 case CRYPTO_ARC4:
2257 if (cry)
2258 return (EINVAL);
2259 cry = 1;
2260 break;
2261 default:
2262 return (EINVAL);
2263 }
2264 }
2265 if (mac == 0 && cry == 0)
2266 return (EINVAL);
2267
2268 *sidp = HIFN_SID(device_get_unit(sc->sc_dev), sesn);
2269
2270 return (0);
2271 }
2272
2273 /*
2274 * Deallocate a session.
2275 * XXX this routine should run a zero'd mac/encrypt key into context ram.
2276 * XXX to blow away any keys already stored there.
2277 */
2278 static int
2279 hifn_freesession(void *arg, u_int64_t tid)
2280 {
2281 struct hifn_softc *sc = arg;
2282 int session;
2283 u_int32_t sid = CRYPTO_SESID2LID(tid);
2284
2285 KASSERT(sc != NULL, ("hifn_freesession: null softc"));
2286 if (sc == NULL)
2287 return (EINVAL);
2288
2289 session = HIFN_SESSION(sid);
2290 if (session >= sc->sc_nsessions)
2291 return (EINVAL);
2292
2293 bzero(&sc->sc_sessions[session], sizeof(sc->sc_sessions[session]));
2294 return (0);
2295 }
2296
2297 static int
2298 hifn_process(void *arg, struct cryptop *crp, int hint)
2299 {
2300 struct hifn_softc *sc = arg;
2301 struct hifn_command *cmd = NULL;
2302 int session, err, ivlen;
2303 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
2304
2305 if (crp == NULL || crp->crp_callback == NULL) {
2306 hifnstats.hst_invalid++;
2307 return (EINVAL);
2308 }
2309 session = HIFN_SESSION(crp->crp_sid);
2310
2311 if (sc == NULL || session >= sc->sc_nsessions) {
2312 err = EINVAL;
2313 goto errout;
2314 }
2315
2316 cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
2317 if (cmd == NULL) {
2318 hifnstats.hst_nomem++;
2319 err = ENOMEM;
2320 goto errout;
2321 }
2322
2323 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2324 cmd->src_m = (struct mbuf *)crp->crp_buf;
2325 cmd->dst_m = (struct mbuf *)crp->crp_buf;
2326 } else if (crp->crp_flags & CRYPTO_F_IOV) {
2327 cmd->src_io = (struct uio *)crp->crp_buf;
2328 cmd->dst_io = (struct uio *)crp->crp_buf;
2329 } else {
2330 err = EINVAL;
2331 goto errout; /* XXX we don't handle contiguous buffers! */
2332 }
2333
2334 crd1 = crp->crp_desc;
2335 if (crd1 == NULL) {
2336 err = EINVAL;
2337 goto errout;
2338 }
2339 crd2 = crd1->crd_next;
2340
2341 if (crd2 == NULL) {
2342 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
2343 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2344 crd1->crd_alg == CRYPTO_SHA1 ||
2345 crd1->crd_alg == CRYPTO_MD5) {
2346 maccrd = crd1;
2347 enccrd = NULL;
2348 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
2349 crd1->crd_alg == CRYPTO_3DES_CBC ||
2350 crd1->crd_alg == CRYPTO_AES_CBC ||
2351 crd1->crd_alg == CRYPTO_ARC4) {
2352 if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
2353 cmd->base_masks |= HIFN_BASE_CMD_DECODE;
2354 maccrd = NULL;
2355 enccrd = crd1;
2356 } else {
2357 err = EINVAL;
2358 goto errout;
2359 }
2360 } else {
2361 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
2362 crd1->crd_alg == CRYPTO_SHA1_HMAC ||
2363 crd1->crd_alg == CRYPTO_MD5 ||
2364 crd1->crd_alg == CRYPTO_SHA1) &&
2365 (crd2->crd_alg == CRYPTO_DES_CBC ||
2366 crd2->crd_alg == CRYPTO_3DES_CBC ||
2367 crd2->crd_alg == CRYPTO_AES_CBC ||
2368 crd2->crd_alg == CRYPTO_ARC4) &&
2369 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
2370 cmd->base_masks = HIFN_BASE_CMD_DECODE;
2371 maccrd = crd1;
2372 enccrd = crd2;
2373 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
2374 crd1->crd_alg == CRYPTO_ARC4 ||
2375 crd1->crd_alg == CRYPTO_3DES_CBC ||
2376 crd1->crd_alg == CRYPTO_AES_CBC) &&
2377 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
2378 crd2->crd_alg == CRYPTO_SHA1_HMAC ||
2379 crd2->crd_alg == CRYPTO_MD5 ||
2380 crd2->crd_alg == CRYPTO_SHA1) &&
2381 (crd1->crd_flags & CRD_F_ENCRYPT)) {
2382 enccrd = crd1;
2383 maccrd = crd2;
2384 } else {
2385 /*
2386 * We cannot order the 7751 as requested
2387 */
2388 err = EINVAL;
2389 goto errout;
2390 }
2391 }
2392
2393 if (enccrd) {
2394 cmd->enccrd = enccrd;
2395 cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
2396 switch (enccrd->crd_alg) {
2397 case CRYPTO_ARC4:
2398 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
2399 break;
2400 case CRYPTO_DES_CBC:
2401 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_DES |
2402 HIFN_CRYPT_CMD_MODE_CBC |
2403 HIFN_CRYPT_CMD_NEW_IV;
2404 break;
2405 case CRYPTO_3DES_CBC:
2406 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_3DES |
2407 HIFN_CRYPT_CMD_MODE_CBC |
2408 HIFN_CRYPT_CMD_NEW_IV;
2409 break;
2410 case CRYPTO_AES_CBC:
2411 cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_AES |
2412 HIFN_CRYPT_CMD_MODE_CBC |
2413 HIFN_CRYPT_CMD_NEW_IV;
2414 break;
2415 default:
2416 err = EINVAL;
2417 goto errout;
2418 }
2419 if (enccrd->crd_alg != CRYPTO_ARC4) {
2420 ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
2421 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2422 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
2423 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2424 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2425 else
2426 bcopy(sc->sc_sessions[session].hs_iv,
2427 cmd->iv, ivlen);
2428
2429 if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
2430 == 0) {
2431 if (crp->crp_flags & CRYPTO_F_IMBUF)
2432 m_copyback(cmd->src_m,
2433 enccrd->crd_inject,
2434 ivlen, cmd->iv);
2435 else if (crp->crp_flags & CRYPTO_F_IOV)
2436 cuio_copyback(cmd->src_io,
2437 enccrd->crd_inject,
2438 ivlen, cmd->iv);
2439 }
2440 } else {
2441 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
2442 bcopy(enccrd->crd_iv, cmd->iv, ivlen);
2443 else if (crp->crp_flags & CRYPTO_F_IMBUF)
2444 m_copydata(cmd->src_m,
2445 enccrd->crd_inject, ivlen, cmd->iv);
2446 else if (crp->crp_flags & CRYPTO_F_IOV)
2447 cuio_copydata(cmd->src_io,
2448 enccrd->crd_inject, ivlen, cmd->iv);
2449 }
2450 }
2451
2452 cmd->ck = enccrd->crd_key;
2453 cmd->cklen = enccrd->crd_klen >> 3;
2454 cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
2455
2456 /*
2457 * Need to specify the size for the AES key in the masks.
2458 */
2459 if ((cmd->cry_masks & HIFN_CRYPT_CMD_ALG_MASK) ==
2460 HIFN_CRYPT_CMD_ALG_AES) {
2461 switch (cmd->cklen) {
2462 case 16:
2463 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_128;
2464 break;
2465 case 24:
2466 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_192;
2467 break;
2468 case 32:
2469 cmd->cry_masks |= HIFN_CRYPT_CMD_KSZ_256;
2470 break;
2471 default:
2472 err = EINVAL;
2473 goto errout;
2474 }
2475 }
2476 }
2477
2478 if (maccrd) {
2479 cmd->maccrd = maccrd;
2480 cmd->base_masks |= HIFN_BASE_CMD_MAC;
2481
2482 switch (maccrd->crd_alg) {
2483 case CRYPTO_MD5:
2484 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2485 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2486 HIFN_MAC_CMD_POS_IPSEC;
2487 break;
2488 case CRYPTO_MD5_HMAC:
2489 cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
2490 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2491 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2492 break;
2493 case CRYPTO_SHA1:
2494 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2495 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
2496 HIFN_MAC_CMD_POS_IPSEC;
2497 break;
2498 case CRYPTO_SHA1_HMAC:
2499 cmd->mac_masks |= HIFN_MAC_CMD_ALG_SHA1 |
2500 HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HMAC |
2501 HIFN_MAC_CMD_POS_IPSEC | HIFN_MAC_CMD_TRUNC;
2502 break;
2503 }
2504
2505 if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
2506 maccrd->crd_alg == CRYPTO_MD5_HMAC) {
2507 cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
2508 bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
2509 bzero(cmd->mac + (maccrd->crd_klen >> 3),
2510 HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
2511 }
2512 }
2513
2514 cmd->crp = crp;
2515 cmd->session_num = session;
2516 cmd->softc = sc;
2517
2518 err = hifn_crypto(sc, cmd, crp, hint);
2519 if (!err) {
2520 return 0;
2521 } else if (err == ERESTART) {
2522 /*
2523 * There weren't enough resources to dispatch the request
2524 * to the part. Notify the caller so they'll requeue this
2525 * request and resubmit it again soon.
2526 */
2527 #ifdef HIFN_DEBUG
2528 if (hifn_debug)
2529 device_printf(sc->sc_dev, "requeue request\n");
2530 #endif
2531 free(cmd, M_DEVBUF);
2532 sc->sc_needwakeup |= CRYPTO_SYMQ;
2533 return (err);
2534 }
2535
2536 errout:
2537 if (cmd != NULL)
2538 free(cmd, M_DEVBUF);
2539 if (err == EINVAL)
2540 hifnstats.hst_invalid++;
2541 else
2542 hifnstats.hst_nomem++;
2543 crp->crp_etype = err;
2544 crypto_done(crp);
2545 return (err);
2546 }
2547
2548 static void
2549 hifn_abort(struct hifn_softc *sc)
2550 {
2551 struct hifn_dma *dma = sc->sc_dma;
2552 struct hifn_command *cmd;
2553 struct cryptop *crp;
2554 int i, u;
2555
2556 i = dma->resk; u = dma->resu;
2557 while (u != 0) {
2558 cmd = dma->hifn_commands[i];
2559 KASSERT(cmd != NULL, ("hifn_abort: null command slot %u", i));
2560 dma->hifn_commands[i] = NULL;
2561 crp = cmd->crp;
2562
2563 if ((dma->resr[i].l & htole32(HIFN_D_VALID)) == 0) {
2564 /* Salvage what we can. */
2565 u_int8_t *macbuf;
2566
2567 if (cmd->base_masks & HIFN_BASE_CMD_MAC) {
2568 macbuf = dma->result_bufs[i];
2569 macbuf += 12;
2570 } else
2571 macbuf = NULL;
2572 hifnstats.hst_opackets++;
2573 hifn_callback(sc, cmd, macbuf);
2574 } else {
2575 if (cmd->src_map == cmd->dst_map) {
2576 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2577 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
2578 } else {
2579 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2580 BUS_DMASYNC_POSTWRITE);
2581 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2582 BUS_DMASYNC_POSTREAD);
2583 }
2584
2585 if (cmd->src_m != cmd->dst_m) {
2586 m_freem(cmd->src_m);
2587 crp->crp_buf = (caddr_t)cmd->dst_m;
2588 }
2589
2590 /* non-shared buffers cannot be restarted */
2591 if (cmd->src_map != cmd->dst_map) {
2592 /*
2593 * XXX should be EAGAIN, delayed until
2594 * after the reset.
2595 */
2596 crp->crp_etype = ENOMEM;
2597 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2598 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2599 } else
2600 crp->crp_etype = ENOMEM;
2601
2602 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2603 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2604
2605 free(cmd, M_DEVBUF);
2606 if (crp->crp_etype != EAGAIN)
2607 crypto_done(crp);
2608 }
2609
2610 if (++i == HIFN_D_RES_RSIZE)
2611 i = 0;
2612 u--;
2613 }
2614 dma->resk = i; dma->resu = u;
2615
2616 hifn_reset_board(sc, 1);
2617 hifn_init_dma(sc);
2618 hifn_init_pci_registers(sc);
2619 }
2620
2621 static void
2622 hifn_callback(struct hifn_softc *sc, struct hifn_command *cmd, u_int8_t *macbuf)
2623 {
2624 struct hifn_dma *dma = sc->sc_dma;
2625 struct cryptop *crp = cmd->crp;
2626 struct cryptodesc *crd;
2627 struct mbuf *m;
2628 int totlen, i, u, ivlen;
2629
2630 if (cmd->src_map == cmd->dst_map) {
2631 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2632 BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);
2633 } else {
2634 bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
2635 BUS_DMASYNC_POSTWRITE);
2636 bus_dmamap_sync(sc->sc_dmat, cmd->dst_map,
2637 BUS_DMASYNC_POSTREAD);
2638 }
2639
2640 if (crp->crp_flags & CRYPTO_F_IMBUF) {
2641 if (cmd->src_m != cmd->dst_m) {
2642 crp->crp_buf = (caddr_t)cmd->dst_m;
2643 totlen = cmd->src_mapsize;
2644 for (m = cmd->dst_m; m != NULL; m = m->m_next) {
2645 if (totlen < m->m_len) {
2646 m->m_len = totlen;
2647 totlen = 0;
2648 } else
2649 totlen -= m->m_len;
2650 }
2651 cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
2652 m_freem(cmd->src_m);
2653 }
2654 }
2655
2656 if (cmd->sloplen != 0) {
2657 if (crp->crp_flags & CRYPTO_F_IMBUF)
2658 m_copyback((struct mbuf *)crp->crp_buf,
2659 cmd->src_mapsize - cmd->sloplen,
2660 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2661 else if (crp->crp_flags & CRYPTO_F_IOV)
2662 cuio_copyback((struct uio *)crp->crp_buf,
2663 cmd->src_mapsize - cmd->sloplen,
2664 cmd->sloplen, (caddr_t)&dma->slop[cmd->slopidx]);
2665 }
2666
2667 i = dma->dstk; u = dma->dstu;
2668 while (u != 0) {
2669 if (i == HIFN_D_DST_RSIZE)
2670 i = 0;
2671 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2672 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2673 if (dma->dstr[i].l & htole32(HIFN_D_VALID)) {
2674 bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
2675 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2676 break;
2677 }
2678 i++, u--;
2679 }
2680 dma->dstk = i; dma->dstu = u;
2681
2682 hifnstats.hst_obytes += cmd->dst_mapsize;
2683
2684 if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
2685 HIFN_BASE_CMD_CRYPT) {
2686 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2687 if (crd->crd_alg != CRYPTO_DES_CBC &&
2688 crd->crd_alg != CRYPTO_3DES_CBC &&
2689 crd->crd_alg != CRYPTO_AES_CBC)
2690 continue;
2691 ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
2692 HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
2693 if (crp->crp_flags & CRYPTO_F_IMBUF)
2694 m_copydata((struct mbuf *)crp->crp_buf,
2695 crd->crd_skip + crd->crd_len - ivlen, ivlen,
2696 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2697 else if (crp->crp_flags & CRYPTO_F_IOV) {
2698 cuio_copydata((struct uio *)crp->crp_buf,
2699 crd->crd_skip + crd->crd_len - ivlen, ivlen,
2700 cmd->softc->sc_sessions[cmd->session_num].hs_iv);
2701 }
2702 break;
2703 }
2704 }
2705
2706 if (macbuf != NULL) {
2707 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
2708 int len;
2709
2710 if (crd->crd_alg == CRYPTO_MD5)
2711 len = 16;
2712 else if (crd->crd_alg == CRYPTO_SHA1)
2713 len = 20;
2714 else if (crd->crd_alg == CRYPTO_MD5_HMAC ||
2715 crd->crd_alg == CRYPTO_SHA1_HMAC)
2716 len = 12;
2717 else
2718 continue;
2719
2720 if (crp->crp_flags & CRYPTO_F_IMBUF)
2721 m_copyback((struct mbuf *)crp->crp_buf,
2722 crd->crd_inject, len, macbuf);
2723 else if ((crp->crp_flags & CRYPTO_F_IOV) && crp->crp_mac)
2724 bcopy((caddr_t)macbuf, crp->crp_mac, len);
2725 break;
2726 }
2727 }
2728
2729 if (cmd->src_map != cmd->dst_map) {
2730 bus_dmamap_unload(sc->sc_dmat, cmd->dst_map);
2731 bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
2732 }
2733 bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
2734 bus_dmamap_destroy(sc->sc_dmat, cmd->src_map);
2735 free(cmd, M_DEVBUF);
2736 crypto_done(crp);
2737 }
2738
2739 /*
2740 * 7811 PB3 rev/2 parts lock-up on burst writes to Group 0
2741 * and Group 1 registers; avoid conditions that could create
2742 * burst writes by doing a read in between the writes.
2743 *
2744 * NB: The read we interpose is always to the same register;
2745 * we do this because reading from an arbitrary (e.g. last)
2746 * register may not always work.
2747 */
2748 static void
2749 hifn_write_reg_0(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2750 {
2751 if (sc->sc_flags & HIFN_IS_7811) {
2752 if (sc->sc_bar0_lastreg == reg - 4)
2753 bus_space_read_4(sc->sc_st0, sc->sc_sh0, HIFN_0_PUCNFG);
2754 sc->sc_bar0_lastreg = reg;
2755 }
2756 bus_space_write_4(sc->sc_st0, sc->sc_sh0, reg, val);
2757 }
2758
2759 static void
2760 hifn_write_reg_1(struct hifn_softc *sc, bus_size_t reg, u_int32_t val)
2761 {
2762 if (sc->sc_flags & HIFN_IS_7811) {
2763 if (sc->sc_bar1_lastreg == reg - 4)
2764 bus_space_read_4(sc->sc_st1, sc->sc_sh1, HIFN_1_REVID);
2765 sc->sc_bar1_lastreg = reg;
2766 }
2767 bus_space_write_4(sc->sc_st1, sc->sc_sh1, reg, val);
2768 }
Cache object: 98c4e57cf51b14f70ce109e87260cbd5
|