FreeBSD/Linux Kernel Cross Reference
sys/dev/ubsec/ubsec.c
1 /* $OpenBSD: ubsec.c,v 1.115 2002/09/24 18:33:26 jason Exp $ */
2
3 /*-
4 * Copyright (c) 2000 Jason L. Wright (jason@thought.net)
5 * Copyright (c) 2000 Theo de Raadt (deraadt@openbsd.org)
6 * Copyright (c) 2001 Patrik Lindergren (patrik@ipunplugged.com)
7 *
8 * All rights reserved.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by Jason L. Wright
21 * 4. The name of the author may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
27 * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
28 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
29 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
30 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
32 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 *
36 * Effort sponsored in part by the Defense Advanced Research Projects
37 * Agency (DARPA) and Air Force Research Laboratory, Air Force
38 * Materiel Command, USAF, under agreement number F30602-01-2-0537.
39 */
40
41 #include <sys/cdefs.h>
42 __FBSDID("$FreeBSD$");
43
44 /*
45 * uBsec 5[56]01, 58xx hardware crypto accelerator
46 */
47
48 #include "opt_ubsec.h"
49
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/proc.h>
53 #include <sys/errno.h>
54 #include <sys/malloc.h>
55 #include <sys/kernel.h>
56 #include <sys/module.h>
57 #include <sys/mbuf.h>
58 #include <sys/lock.h>
59 #include <sys/mutex.h>
60 #include <sys/sysctl.h>
61 #include <sys/endian.h>
62
63 #include <vm/vm.h>
64 #include <vm/pmap.h>
65
66 #include <machine/bus.h>
67 #include <machine/resource.h>
68 #include <sys/bus.h>
69 #include <sys/rman.h>
70
71 #include <crypto/sha1.h>
72 #include <opencrypto/cryptodev.h>
73 #include <opencrypto/cryptosoft.h>
74 #include <sys/md5.h>
75 #include <sys/random.h>
76 #include <sys/kobj.h>
77
78 #include "cryptodev_if.h"
79
80 #include <dev/pci/pcivar.h>
81 #include <dev/pci/pcireg.h>
82
83 /* grr, #defines for gratuitous incompatibility in queue.h */
84 #define SIMPLEQ_HEAD STAILQ_HEAD
85 #define SIMPLEQ_ENTRY STAILQ_ENTRY
86 #define SIMPLEQ_INIT STAILQ_INIT
87 #define SIMPLEQ_INSERT_TAIL STAILQ_INSERT_TAIL
88 #define SIMPLEQ_EMPTY STAILQ_EMPTY
89 #define SIMPLEQ_FIRST STAILQ_FIRST
90 #define SIMPLEQ_REMOVE_HEAD STAILQ_REMOVE_HEAD
91 #define SIMPLEQ_FOREACH STAILQ_FOREACH
92 /* ditto for endian.h */
93 #define letoh16(x) le16toh(x)
94 #define letoh32(x) le32toh(x)
95
96 #ifdef UBSEC_RNDTEST
97 #include <dev/rndtest/rndtest.h>
98 #endif
99 #include <dev/ubsec/ubsecreg.h>
100 #include <dev/ubsec/ubsecvar.h>
101
102 /*
103 * Prototypes and count for the pci_device structure
104 */
105 static int ubsec_probe(device_t);
106 static int ubsec_attach(device_t);
107 static int ubsec_detach(device_t);
108 static int ubsec_suspend(device_t);
109 static int ubsec_resume(device_t);
110 static int ubsec_shutdown(device_t);
111
112 static int ubsec_newsession(device_t, u_int32_t *, struct cryptoini *);
113 static int ubsec_freesession(device_t, u_int64_t);
114 static int ubsec_process(device_t, struct cryptop *, int);
115 static int ubsec_kprocess(device_t, struct cryptkop *, int);
116
117 static device_method_t ubsec_methods[] = {
118 /* Device interface */
119 DEVMETHOD(device_probe, ubsec_probe),
120 DEVMETHOD(device_attach, ubsec_attach),
121 DEVMETHOD(device_detach, ubsec_detach),
122 DEVMETHOD(device_suspend, ubsec_suspend),
123 DEVMETHOD(device_resume, ubsec_resume),
124 DEVMETHOD(device_shutdown, ubsec_shutdown),
125
126 /* crypto device methods */
127 DEVMETHOD(cryptodev_newsession, ubsec_newsession),
128 DEVMETHOD(cryptodev_freesession,ubsec_freesession),
129 DEVMETHOD(cryptodev_process, ubsec_process),
130 DEVMETHOD(cryptodev_kprocess, ubsec_kprocess),
131
132 DEVMETHOD_END
133 };
134 static driver_t ubsec_driver = {
135 "ubsec",
136 ubsec_methods,
137 sizeof (struct ubsec_softc)
138 };
139 static devclass_t ubsec_devclass;
140
141 DRIVER_MODULE(ubsec, pci, ubsec_driver, ubsec_devclass, 0, 0);
142 MODULE_DEPEND(ubsec, crypto, 1, 1, 1);
143 #ifdef UBSEC_RNDTEST
144 MODULE_DEPEND(ubsec, rndtest, 1, 1, 1);
145 #endif
146
147 static void ubsec_intr(void *);
148 static void ubsec_callback(struct ubsec_softc *, struct ubsec_q *);
149 static void ubsec_feed(struct ubsec_softc *);
150 static void ubsec_mcopy(struct mbuf *, struct mbuf *, int, int);
151 static void ubsec_callback2(struct ubsec_softc *, struct ubsec_q2 *);
152 static int ubsec_feed2(struct ubsec_softc *);
153 static void ubsec_rng(void *);
154 static int ubsec_dma_malloc(struct ubsec_softc *, bus_size_t,
155 struct ubsec_dma_alloc *, int);
156 #define ubsec_dma_sync(_dma, _flags) \
157 bus_dmamap_sync((_dma)->dma_tag, (_dma)->dma_map, (_flags))
158 static void ubsec_dma_free(struct ubsec_softc *, struct ubsec_dma_alloc *);
159 static int ubsec_dmamap_aligned(struct ubsec_operand *op);
160
161 static void ubsec_reset_board(struct ubsec_softc *sc);
162 static void ubsec_init_board(struct ubsec_softc *sc);
163 static void ubsec_init_pciregs(device_t dev);
164 static void ubsec_totalreset(struct ubsec_softc *sc);
165
166 static int ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q);
167
168 static int ubsec_kprocess_modexp_hw(struct ubsec_softc *, struct cryptkop *, int);
169 static int ubsec_kprocess_modexp_sw(struct ubsec_softc *, struct cryptkop *, int);
170 static int ubsec_kprocess_rsapriv(struct ubsec_softc *, struct cryptkop *, int);
171 static void ubsec_kfree(struct ubsec_softc *, struct ubsec_q2 *);
172 static int ubsec_ksigbits(struct crparam *);
173 static void ubsec_kshift_r(u_int, u_int8_t *, u_int, u_int8_t *, u_int);
174 static void ubsec_kshift_l(u_int, u_int8_t *, u_int, u_int8_t *, u_int);
175
176 static SYSCTL_NODE(_hw, OID_AUTO, ubsec, CTLFLAG_RD, 0,
177 "Broadcom driver parameters");
178
179 #ifdef UBSEC_DEBUG
180 static void ubsec_dump_pb(volatile struct ubsec_pktbuf *);
181 static void ubsec_dump_mcr(struct ubsec_mcr *);
182 static void ubsec_dump_ctx2(struct ubsec_ctx_keyop *);
183
184 static int ubsec_debug = 0;
185 SYSCTL_INT(_hw_ubsec, OID_AUTO, debug, CTLFLAG_RW, &ubsec_debug,
186 0, "control debugging msgs");
187 #endif
188
189 #define READ_REG(sc,r) \
190 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (r))
191
192 #define WRITE_REG(sc,reg,val) \
193 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, reg, val)
194
195 #define SWAP32(x) (x) = htole32(ntohl((x)))
196 #define HTOLE32(x) (x) = htole32(x)
197
198 struct ubsec_stats ubsecstats;
199 SYSCTL_STRUCT(_hw_ubsec, OID_AUTO, stats, CTLFLAG_RD, &ubsecstats,
200 ubsec_stats, "driver statistics");
201
202 static int
203 ubsec_probe(device_t dev)
204 {
205 if (pci_get_vendor(dev) == PCI_VENDOR_SUN &&
206 (pci_get_device(dev) == PCI_PRODUCT_SUN_5821 ||
207 pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K))
208 return (BUS_PROBE_DEFAULT);
209 if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL &&
210 (pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5501 ||
211 pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601))
212 return (BUS_PROBE_DEFAULT);
213 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM &&
214 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5801 ||
215 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 ||
216 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805 ||
217 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820 ||
218 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 ||
219 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 ||
220 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 ||
221 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5825
222 ))
223 return (BUS_PROBE_DEFAULT);
224 return (ENXIO);
225 }
226
227 static const char*
228 ubsec_partname(struct ubsec_softc *sc)
229 {
230 /* XXX sprintf numbers when not decoded */
231 switch (pci_get_vendor(sc->sc_dev)) {
232 case PCI_VENDOR_BROADCOM:
233 switch (pci_get_device(sc->sc_dev)) {
234 case PCI_PRODUCT_BROADCOM_5801: return "Broadcom 5801";
235 case PCI_PRODUCT_BROADCOM_5802: return "Broadcom 5802";
236 case PCI_PRODUCT_BROADCOM_5805: return "Broadcom 5805";
237 case PCI_PRODUCT_BROADCOM_5820: return "Broadcom 5820";
238 case PCI_PRODUCT_BROADCOM_5821: return "Broadcom 5821";
239 case PCI_PRODUCT_BROADCOM_5822: return "Broadcom 5822";
240 case PCI_PRODUCT_BROADCOM_5823: return "Broadcom 5823";
241 case PCI_PRODUCT_BROADCOM_5825: return "Broadcom 5825";
242 }
243 return "Broadcom unknown-part";
244 case PCI_VENDOR_BLUESTEEL:
245 switch (pci_get_device(sc->sc_dev)) {
246 case PCI_PRODUCT_BLUESTEEL_5601: return "Bluesteel 5601";
247 }
248 return "Bluesteel unknown-part";
249 case PCI_VENDOR_SUN:
250 switch (pci_get_device(sc->sc_dev)) {
251 case PCI_PRODUCT_SUN_5821: return "Sun Crypto 5821";
252 case PCI_PRODUCT_SUN_SCA1K: return "Sun Crypto 1K";
253 }
254 return "Sun unknown-part";
255 }
256 return "Unknown-vendor unknown-part";
257 }
258
259 static void
260 default_harvest(struct rndtest_state *rsp, void *buf, u_int count)
261 {
262 /* MarkM: FIX!! Check that this does not swamp the harvester! */
263 random_harvest_queue(buf, count, count*NBBY/2, RANDOM_PURE_UBSEC);
264 }
265
266 static int
267 ubsec_attach(device_t dev)
268 {
269 struct ubsec_softc *sc = device_get_softc(dev);
270 struct ubsec_dma *dmap;
271 u_int32_t i;
272 int rid;
273
274 bzero(sc, sizeof (*sc));
275 sc->sc_dev = dev;
276
277 SIMPLEQ_INIT(&sc->sc_queue);
278 SIMPLEQ_INIT(&sc->sc_qchip);
279 SIMPLEQ_INIT(&sc->sc_queue2);
280 SIMPLEQ_INIT(&sc->sc_qchip2);
281 SIMPLEQ_INIT(&sc->sc_q2free);
282
283 /* XXX handle power management */
284
285 sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR;
286
287 if (pci_get_vendor(dev) == PCI_VENDOR_BLUESTEEL &&
288 pci_get_device(dev) == PCI_PRODUCT_BLUESTEEL_5601)
289 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG;
290
291 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM &&
292 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5802 ||
293 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5805))
294 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG;
295
296 if (pci_get_vendor(dev) == PCI_VENDOR_BROADCOM &&
297 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5820)
298 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG |
299 UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY;
300
301 if ((pci_get_vendor(dev) == PCI_VENDOR_BROADCOM &&
302 (pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5821 ||
303 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5822 ||
304 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5823 ||
305 pci_get_device(dev) == PCI_PRODUCT_BROADCOM_5825)) ||
306 (pci_get_vendor(dev) == PCI_VENDOR_SUN &&
307 (pci_get_device(dev) == PCI_PRODUCT_SUN_SCA1K ||
308 pci_get_device(dev) == PCI_PRODUCT_SUN_5821))) {
309 /* NB: the 5821/5822 defines some additional status bits */
310 sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY |
311 BS_STAT_MCR2_ALLEMPTY;
312 sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG |
313 UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY;
314 }
315
316 pci_enable_busmaster(dev);
317
318 /*
319 * Setup memory-mapping of PCI registers.
320 */
321 rid = BS_BAR;
322 sc->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
323 RF_ACTIVE);
324 if (sc->sc_sr == NULL) {
325 device_printf(dev, "cannot map register space\n");
326 goto bad;
327 }
328 sc->sc_st = rman_get_bustag(sc->sc_sr);
329 sc->sc_sh = rman_get_bushandle(sc->sc_sr);
330
331 /*
332 * Arrange interrupt line.
333 */
334 rid = 0;
335 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
336 RF_SHAREABLE|RF_ACTIVE);
337 if (sc->sc_irq == NULL) {
338 device_printf(dev, "could not map interrupt\n");
339 goto bad1;
340 }
341 /*
342 * NB: Network code assumes we are blocked with splimp()
343 * so make sure the IRQ is mapped appropriately.
344 */
345 if (bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
346 NULL, ubsec_intr, sc, &sc->sc_ih)) {
347 device_printf(dev, "could not establish interrupt\n");
348 goto bad2;
349 }
350
351 sc->sc_cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
352 if (sc->sc_cid < 0) {
353 device_printf(dev, "could not get crypto driver id\n");
354 goto bad3;
355 }
356
357 /*
358 * Setup DMA descriptor area.
359 */
360 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */
361 1, 0, /* alignment, bounds */
362 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
363 BUS_SPACE_MAXADDR, /* highaddr */
364 NULL, NULL, /* filter, filterarg */
365 0x3ffff, /* maxsize */
366 UBS_MAX_SCATTER, /* nsegments */
367 0xffff, /* maxsegsize */
368 BUS_DMA_ALLOCNOW, /* flags */
369 NULL, NULL, /* lockfunc, lockarg */
370 &sc->sc_dmat)) {
371 device_printf(dev, "cannot allocate DMA tag\n");
372 goto bad4;
373 }
374 SIMPLEQ_INIT(&sc->sc_freequeue);
375 dmap = sc->sc_dmaa;
376 for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) {
377 struct ubsec_q *q;
378
379 q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q),
380 M_DEVBUF, M_NOWAIT);
381 if (q == NULL) {
382 device_printf(dev, "cannot allocate queue buffers\n");
383 break;
384 }
385
386 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk),
387 &dmap->d_alloc, 0)) {
388 device_printf(dev, "cannot allocate dma buffers\n");
389 free(q, M_DEVBUF);
390 break;
391 }
392 dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr;
393
394 q->q_dma = dmap;
395 sc->sc_queuea[i] = q;
396
397 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
398 }
399 mtx_init(&sc->sc_mcr1lock, device_get_nameunit(dev),
400 "mcr1 operations", MTX_DEF);
401 mtx_init(&sc->sc_freeqlock, device_get_nameunit(dev),
402 "mcr1 free q", MTX_DEF);
403
404 device_printf(sc->sc_dev, "%s\n", ubsec_partname(sc));
405
406 crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
407 crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
408 crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
409 crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
410
411 /*
412 * Reset Broadcom chip
413 */
414 ubsec_reset_board(sc);
415
416 /*
417 * Init Broadcom specific PCI settings
418 */
419 ubsec_init_pciregs(dev);
420
421 /*
422 * Init Broadcom chip
423 */
424 ubsec_init_board(sc);
425
426 #ifndef UBSEC_NO_RNG
427 if (sc->sc_flags & UBS_FLAGS_RNG) {
428 sc->sc_statmask |= BS_STAT_MCR2_DONE;
429 #ifdef UBSEC_RNDTEST
430 sc->sc_rndtest = rndtest_attach(dev);
431 if (sc->sc_rndtest)
432 sc->sc_harvest = rndtest_harvest;
433 else
434 sc->sc_harvest = default_harvest;
435 #else
436 sc->sc_harvest = default_harvest;
437 #endif
438
439 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
440 &sc->sc_rng.rng_q.q_mcr, 0))
441 goto skip_rng;
442
443 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass),
444 &sc->sc_rng.rng_q.q_ctx, 0)) {
445 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr);
446 goto skip_rng;
447 }
448
449 if (ubsec_dma_malloc(sc, sizeof(u_int32_t) *
450 UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) {
451 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx);
452 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr);
453 goto skip_rng;
454 }
455
456 if (hz >= 100)
457 sc->sc_rnghz = hz / 100;
458 else
459 sc->sc_rnghz = 1;
460 callout_init(&sc->sc_rngto, 1);
461 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc);
462 skip_rng:
463 ;
464 }
465 #endif /* UBSEC_NO_RNG */
466 mtx_init(&sc->sc_mcr2lock, device_get_nameunit(dev),
467 "mcr2 operations", MTX_DEF);
468
469 if (sc->sc_flags & UBS_FLAGS_KEY) {
470 sc->sc_statmask |= BS_STAT_MCR2_DONE;
471
472 crypto_kregister(sc->sc_cid, CRK_MOD_EXP, 0);
473 #if 0
474 crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
475 #endif
476 }
477 gone_in_dev(dev, 13, "Does not support modern crypto algorithms");
478 return (0);
479 bad4:
480 crypto_unregister_all(sc->sc_cid);
481 bad3:
482 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
483 bad2:
484 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
485 bad1:
486 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
487 bad:
488 return (ENXIO);
489 }
490
491 /*
492 * Detach a device that successfully probed.
493 */
494 static int
495 ubsec_detach(device_t dev)
496 {
497 struct ubsec_softc *sc = device_get_softc(dev);
498
499 /* XXX wait/abort active ops */
500
501 /* disable interrupts */
502 WRITE_REG(sc, BS_CTRL, READ_REG(sc, BS_CTRL) &~
503 (BS_CTRL_MCR2INT | BS_CTRL_MCR1INT | BS_CTRL_DMAERR));
504
505 callout_stop(&sc->sc_rngto);
506
507 crypto_unregister_all(sc->sc_cid);
508
509 #ifdef UBSEC_RNDTEST
510 if (sc->sc_rndtest)
511 rndtest_detach(sc->sc_rndtest);
512 #endif
513
514 while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
515 struct ubsec_q *q;
516
517 q = SIMPLEQ_FIRST(&sc->sc_freequeue);
518 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
519 ubsec_dma_free(sc, &q->q_dma->d_alloc);
520 free(q, M_DEVBUF);
521 }
522 mtx_destroy(&sc->sc_mcr1lock);
523 mtx_destroy(&sc->sc_freeqlock);
524 #ifndef UBSEC_NO_RNG
525 if (sc->sc_flags & UBS_FLAGS_RNG) {
526 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr);
527 ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx);
528 ubsec_dma_free(sc, &sc->sc_rng.rng_buf);
529 }
530 #endif /* UBSEC_NO_RNG */
531 mtx_destroy(&sc->sc_mcr2lock);
532
533 bus_generic_detach(dev);
534 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
535 bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
536
537 bus_dma_tag_destroy(sc->sc_dmat);
538 bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_sr);
539
540 return (0);
541 }
542
543 /*
544 * Stop all chip i/o so that the kernel's probe routines don't
545 * get confused by errant DMAs when rebooting.
546 */
547 static int
548 ubsec_shutdown(device_t dev)
549 {
550 #ifdef notyet
551 ubsec_stop(device_get_softc(dev));
552 #endif
553 return (0);
554 }
555
556 /*
557 * Device suspend routine.
558 */
559 static int
560 ubsec_suspend(device_t dev)
561 {
562 struct ubsec_softc *sc = device_get_softc(dev);
563
564 #ifdef notyet
565 /* XXX stop the device and save PCI settings */
566 #endif
567 sc->sc_suspended = 1;
568
569 return (0);
570 }
571
572 static int
573 ubsec_resume(device_t dev)
574 {
575 struct ubsec_softc *sc = device_get_softc(dev);
576
577 #ifdef notyet
578 /* XXX retore PCI settings and start the device */
579 #endif
580 sc->sc_suspended = 0;
581 return (0);
582 }
583
584 /*
585 * UBSEC Interrupt routine
586 */
587 static void
588 ubsec_intr(void *arg)
589 {
590 struct ubsec_softc *sc = arg;
591 volatile u_int32_t stat;
592 struct ubsec_q *q;
593 struct ubsec_dma *dmap;
594 int npkts = 0, i;
595
596 stat = READ_REG(sc, BS_STAT);
597 stat &= sc->sc_statmask;
598 if (stat == 0)
599 return;
600
601 WRITE_REG(sc, BS_STAT, stat); /* IACK */
602
603 /*
604 * Check to see if we have any packets waiting for us
605 */
606 if ((stat & BS_STAT_MCR1_DONE)) {
607 mtx_lock(&sc->sc_mcr1lock);
608 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) {
609 q = SIMPLEQ_FIRST(&sc->sc_qchip);
610 dmap = q->q_dma;
611
612 if ((dmap->d_dma->d_mcr.mcr_flags & htole16(UBS_MCR_DONE)) == 0)
613 break;
614
615 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
616
617 npkts = q->q_nstacked_mcrs;
618 sc->sc_nqchip -= 1+npkts;
619 /*
620 * search for further sc_qchip ubsec_q's that share
621 * the same MCR, and complete them too, they must be
622 * at the top.
623 */
624 for (i = 0; i < npkts; i++) {
625 if(q->q_stacked_mcr[i]) {
626 ubsec_callback(sc, q->q_stacked_mcr[i]);
627 } else {
628 break;
629 }
630 }
631 ubsec_callback(sc, q);
632 }
633 /*
634 * Don't send any more packet to chip if there has been
635 * a DMAERR.
636 */
637 if (!(stat & BS_STAT_DMAERR))
638 ubsec_feed(sc);
639 mtx_unlock(&sc->sc_mcr1lock);
640 }
641
642 /*
643 * Check to see if we have any key setups/rng's waiting for us
644 */
645 if ((sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG)) &&
646 (stat & BS_STAT_MCR2_DONE)) {
647 struct ubsec_q2 *q2;
648 struct ubsec_mcr *mcr;
649
650 mtx_lock(&sc->sc_mcr2lock);
651 while (!SIMPLEQ_EMPTY(&sc->sc_qchip2)) {
652 q2 = SIMPLEQ_FIRST(&sc->sc_qchip2);
653
654 ubsec_dma_sync(&q2->q_mcr,
655 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
656
657 mcr = (struct ubsec_mcr *)q2->q_mcr.dma_vaddr;
658 if ((mcr->mcr_flags & htole16(UBS_MCR_DONE)) == 0) {
659 ubsec_dma_sync(&q2->q_mcr,
660 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
661 break;
662 }
663 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip2, q_next);
664 ubsec_callback2(sc, q2);
665 /*
666 * Don't send any more packet to chip if there has been
667 * a DMAERR.
668 */
669 if (!(stat & BS_STAT_DMAERR))
670 ubsec_feed2(sc);
671 }
672 mtx_unlock(&sc->sc_mcr2lock);
673 }
674
675 /*
676 * Check to see if we got any DMA Error
677 */
678 if (stat & BS_STAT_DMAERR) {
679 #ifdef UBSEC_DEBUG
680 if (ubsec_debug) {
681 volatile u_int32_t a = READ_REG(sc, BS_ERR);
682
683 printf("dmaerr %s@%08x\n",
684 (a & BS_ERR_READ) ? "read" : "write",
685 a & BS_ERR_ADDR);
686 }
687 #endif /* UBSEC_DEBUG */
688 ubsecstats.hst_dmaerr++;
689 mtx_lock(&sc->sc_mcr1lock);
690 ubsec_totalreset(sc);
691 ubsec_feed(sc);
692 mtx_unlock(&sc->sc_mcr1lock);
693 }
694
695 if (sc->sc_needwakeup) { /* XXX check high watermark */
696 int wakeup;
697
698 mtx_lock(&sc->sc_freeqlock);
699 wakeup = sc->sc_needwakeup & (CRYPTO_SYMQ|CRYPTO_ASYMQ);
700 #ifdef UBSEC_DEBUG
701 if (ubsec_debug)
702 device_printf(sc->sc_dev, "wakeup crypto (%x)\n",
703 sc->sc_needwakeup);
704 #endif /* UBSEC_DEBUG */
705 sc->sc_needwakeup &= ~wakeup;
706 mtx_unlock(&sc->sc_freeqlock);
707 crypto_unblock(sc->sc_cid, wakeup);
708 }
709 }
710
711 /*
712 * ubsec_feed() - aggregate and post requests to chip
713 */
714 static void
715 ubsec_feed(struct ubsec_softc *sc)
716 {
717 struct ubsec_q *q, *q2;
718 int npkts, i;
719 void *v;
720 u_int32_t stat;
721
722 /*
723 * Decide how many ops to combine in a single MCR. We cannot
724 * aggregate more than UBS_MAX_AGGR because this is the number
725 * of slots defined in the data structure. Note that
726 * aggregation only happens if ops are marked batch'able.
727 * Aggregating ops reduces the number of interrupts to the host
728 * but also (potentially) increases the latency for processing
729 * completed ops as we only get an interrupt when all aggregated
730 * ops have completed.
731 */
732 if (sc->sc_nqueue == 0)
733 return;
734 if (sc->sc_nqueue > 1) {
735 npkts = 0;
736 SIMPLEQ_FOREACH(q, &sc->sc_queue, q_next) {
737 npkts++;
738 if ((q->q_crp->crp_flags & CRYPTO_F_BATCH) == 0)
739 break;
740 }
741 } else
742 npkts = 1;
743 /*
744 * Check device status before going any further.
745 */
746 if ((stat = READ_REG(sc, BS_STAT)) & (BS_STAT_MCR1_FULL | BS_STAT_DMAERR)) {
747 if (stat & BS_STAT_DMAERR) {
748 ubsec_totalreset(sc);
749 ubsecstats.hst_dmaerr++;
750 } else
751 ubsecstats.hst_mcr1full++;
752 return;
753 }
754 if (sc->sc_nqueue > ubsecstats.hst_maxqueue)
755 ubsecstats.hst_maxqueue = sc->sc_nqueue;
756 if (npkts > UBS_MAX_AGGR)
757 npkts = UBS_MAX_AGGR;
758 if (npkts < 2) /* special case 1 op */
759 goto feed1;
760
761 ubsecstats.hst_totbatch += npkts-1;
762 #ifdef UBSEC_DEBUG
763 if (ubsec_debug)
764 printf("merging %d records\n", npkts);
765 #endif /* UBSEC_DEBUG */
766
767 q = SIMPLEQ_FIRST(&sc->sc_queue);
768 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
769 --sc->sc_nqueue;
770
771 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE);
772 if (q->q_dst_map != NULL)
773 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD);
774
775 q->q_nstacked_mcrs = npkts - 1; /* Number of packets stacked */
776
777 for (i = 0; i < q->q_nstacked_mcrs; i++) {
778 q2 = SIMPLEQ_FIRST(&sc->sc_queue);
779 bus_dmamap_sync(sc->sc_dmat, q2->q_src_map,
780 BUS_DMASYNC_PREWRITE);
781 if (q2->q_dst_map != NULL)
782 bus_dmamap_sync(sc->sc_dmat, q2->q_dst_map,
783 BUS_DMASYNC_PREREAD);
784 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
785 --sc->sc_nqueue;
786
787 v = (void*)(((char *)&q2->q_dma->d_dma->d_mcr) + sizeof(struct ubsec_mcr) -
788 sizeof(struct ubsec_mcr_add));
789 bcopy(v, &q->q_dma->d_dma->d_mcradd[i], sizeof(struct ubsec_mcr_add));
790 q->q_stacked_mcr[i] = q2;
791 }
792 q->q_dma->d_dma->d_mcr.mcr_pkts = htole16(npkts);
793 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
794 sc->sc_nqchip += npkts;
795 if (sc->sc_nqchip > ubsecstats.hst_maxqchip)
796 ubsecstats.hst_maxqchip = sc->sc_nqchip;
797 ubsec_dma_sync(&q->q_dma->d_alloc,
798 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
799 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
800 offsetof(struct ubsec_dmachunk, d_mcr));
801 return;
802 feed1:
803 q = SIMPLEQ_FIRST(&sc->sc_queue);
804
805 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_PREWRITE);
806 if (q->q_dst_map != NULL)
807 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map, BUS_DMASYNC_PREREAD);
808 ubsec_dma_sync(&q->q_dma->d_alloc,
809 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
810
811 WRITE_REG(sc, BS_MCR1, q->q_dma->d_alloc.dma_paddr +
812 offsetof(struct ubsec_dmachunk, d_mcr));
813 #ifdef UBSEC_DEBUG
814 if (ubsec_debug)
815 printf("feed1: q->chip %p %08x stat %08x\n",
816 q, (u_int32_t)vtophys(&q->q_dma->d_dma->d_mcr),
817 stat);
818 #endif /* UBSEC_DEBUG */
819 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue, q_next);
820 --sc->sc_nqueue;
821 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip, q, q_next);
822 sc->sc_nqchip++;
823 if (sc->sc_nqchip > ubsecstats.hst_maxqchip)
824 ubsecstats.hst_maxqchip = sc->sc_nqchip;
825 return;
826 }
827
828 static void
829 ubsec_setup_enckey(struct ubsec_session *ses, int algo, caddr_t key)
830 {
831
832 /* Go ahead and compute key in ubsec's byte order */
833 if (algo == CRYPTO_DES_CBC) {
834 bcopy(key, &ses->ses_deskey[0], 8);
835 bcopy(key, &ses->ses_deskey[2], 8);
836 bcopy(key, &ses->ses_deskey[4], 8);
837 } else
838 bcopy(key, ses->ses_deskey, 24);
839
840 SWAP32(ses->ses_deskey[0]);
841 SWAP32(ses->ses_deskey[1]);
842 SWAP32(ses->ses_deskey[2]);
843 SWAP32(ses->ses_deskey[3]);
844 SWAP32(ses->ses_deskey[4]);
845 SWAP32(ses->ses_deskey[5]);
846 }
847
848 static void
849 ubsec_setup_mackey(struct ubsec_session *ses, int algo, caddr_t key, int klen)
850 {
851 MD5_CTX md5ctx;
852 SHA1_CTX sha1ctx;
853 int i;
854
855 for (i = 0; i < klen; i++)
856 key[i] ^= HMAC_IPAD_VAL;
857
858 if (algo == CRYPTO_MD5_HMAC) {
859 MD5Init(&md5ctx);
860 MD5Update(&md5ctx, key, klen);
861 MD5Update(&md5ctx, hmac_ipad_buffer, MD5_HMAC_BLOCK_LEN - klen);
862 bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state));
863 } else {
864 SHA1Init(&sha1ctx);
865 SHA1Update(&sha1ctx, key, klen);
866 SHA1Update(&sha1ctx, hmac_ipad_buffer,
867 SHA1_HMAC_BLOCK_LEN - klen);
868 bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
869 }
870
871 for (i = 0; i < klen; i++)
872 key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
873
874 if (algo == CRYPTO_MD5_HMAC) {
875 MD5Init(&md5ctx);
876 MD5Update(&md5ctx, key, klen);
877 MD5Update(&md5ctx, hmac_opad_buffer, MD5_HMAC_BLOCK_LEN - klen);
878 bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state));
879 } else {
880 SHA1Init(&sha1ctx);
881 SHA1Update(&sha1ctx, key, klen);
882 SHA1Update(&sha1ctx, hmac_opad_buffer,
883 SHA1_HMAC_BLOCK_LEN - klen);
884 bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
885 }
886
887 for (i = 0; i < klen; i++)
888 key[i] ^= HMAC_OPAD_VAL;
889 }
890
891 /*
892 * Allocate a new 'session' and return an encoded session id. 'sidp'
893 * contains our registration id, and should contain an encoded session
894 * id on successful allocation.
895 */
896 static int
897 ubsec_newsession(device_t dev, u_int32_t *sidp, struct cryptoini *cri)
898 {
899 struct ubsec_softc *sc = device_get_softc(dev);
900 struct cryptoini *c, *encini = NULL, *macini = NULL;
901 struct ubsec_session *ses = NULL;
902 int sesn;
903
904 if (sidp == NULL || cri == NULL || sc == NULL)
905 return (EINVAL);
906
907 for (c = cri; c != NULL; c = c->cri_next) {
908 if (c->cri_alg == CRYPTO_MD5_HMAC ||
909 c->cri_alg == CRYPTO_SHA1_HMAC) {
910 if (macini)
911 return (EINVAL);
912 macini = c;
913 } else if (c->cri_alg == CRYPTO_DES_CBC ||
914 c->cri_alg == CRYPTO_3DES_CBC) {
915 if (encini)
916 return (EINVAL);
917 encini = c;
918 } else
919 return (EINVAL);
920 }
921 if (encini == NULL && macini == NULL)
922 return (EINVAL);
923
924 if (sc->sc_sessions == NULL) {
925 ses = sc->sc_sessions = (struct ubsec_session *)malloc(
926 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT);
927 if (ses == NULL)
928 return (ENOMEM);
929 sesn = 0;
930 sc->sc_nsessions = 1;
931 } else {
932 for (sesn = 0; sesn < sc->sc_nsessions; sesn++) {
933 if (sc->sc_sessions[sesn].ses_used == 0) {
934 ses = &sc->sc_sessions[sesn];
935 break;
936 }
937 }
938
939 if (ses == NULL) {
940 sesn = sc->sc_nsessions;
941 ses = (struct ubsec_session *)malloc((sesn + 1) *
942 sizeof(struct ubsec_session), M_DEVBUF, M_NOWAIT);
943 if (ses == NULL)
944 return (ENOMEM);
945 bcopy(sc->sc_sessions, ses, sesn *
946 sizeof(struct ubsec_session));
947 bzero(sc->sc_sessions, sesn *
948 sizeof(struct ubsec_session));
949 free(sc->sc_sessions, M_DEVBUF);
950 sc->sc_sessions = ses;
951 ses = &sc->sc_sessions[sesn];
952 sc->sc_nsessions++;
953 }
954 }
955 bzero(ses, sizeof(struct ubsec_session));
956 ses->ses_used = 1;
957
958 if (encini) {
959 /* get an IV, network byte order */
960 /* XXX may read fewer than requested */
961 read_random(ses->ses_iv, sizeof(ses->ses_iv));
962
963 if (encini->cri_key != NULL) {
964 ubsec_setup_enckey(ses, encini->cri_alg,
965 encini->cri_key);
966 }
967 }
968
969 if (macini) {
970 ses->ses_mlen = macini->cri_mlen;
971 if (ses->ses_mlen == 0) {
972 if (macini->cri_alg == CRYPTO_MD5_HMAC)
973 ses->ses_mlen = MD5_HASH_LEN;
974 else
975 ses->ses_mlen = SHA1_HASH_LEN;
976 }
977
978 if (macini->cri_key != NULL) {
979 ubsec_setup_mackey(ses, macini->cri_alg,
980 macini->cri_key, macini->cri_klen / 8);
981 }
982 }
983
984 *sidp = UBSEC_SID(device_get_unit(sc->sc_dev), sesn);
985 return (0);
986 }
987
988 /*
989 * Deallocate a session.
990 */
991 static int
992 ubsec_freesession(device_t dev, u_int64_t tid)
993 {
994 struct ubsec_softc *sc = device_get_softc(dev);
995 int session, ret;
996 u_int32_t sid = CRYPTO_SESID2LID(tid);
997
998 if (sc == NULL)
999 return (EINVAL);
1000
1001 session = UBSEC_SESSION(sid);
1002 if (session < sc->sc_nsessions) {
1003 bzero(&sc->sc_sessions[session],
1004 sizeof(sc->sc_sessions[session]));
1005 ret = 0;
1006 } else
1007 ret = EINVAL;
1008
1009 return (ret);
1010 }
1011
1012 static void
1013 ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
1014 {
1015 struct ubsec_operand *op = arg;
1016
1017 KASSERT(nsegs <= UBS_MAX_SCATTER,
1018 ("Too many DMA segments returned when mapping operand"));
1019 #ifdef UBSEC_DEBUG
1020 if (ubsec_debug)
1021 printf("ubsec_op_cb: mapsize %u nsegs %d error %d\n",
1022 (u_int) mapsize, nsegs, error);
1023 #endif
1024 if (error != 0)
1025 return;
1026 op->mapsize = mapsize;
1027 op->nsegs = nsegs;
1028 bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
1029 }
1030
1031 static int
1032 ubsec_process(device_t dev, struct cryptop *crp, int hint)
1033 {
1034 struct ubsec_softc *sc = device_get_softc(dev);
1035 struct ubsec_q *q = NULL;
1036 int err = 0, i, j, nicealign;
1037 struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
1038 int encoffset = 0, macoffset = 0, cpskip, cpoffset;
1039 int sskip, dskip, stheend, dtheend;
1040 int16_t coffset;
1041 struct ubsec_session *ses;
1042 struct ubsec_pktctx ctx;
1043 struct ubsec_dma *dmap = NULL;
1044
1045 if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
1046 ubsecstats.hst_invalid++;
1047 return (EINVAL);
1048 }
1049 if (UBSEC_SESSION(crp->crp_sid) >= sc->sc_nsessions) {
1050 ubsecstats.hst_badsession++;
1051 return (EINVAL);
1052 }
1053
1054 mtx_lock(&sc->sc_freeqlock);
1055 if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
1056 ubsecstats.hst_queuefull++;
1057 sc->sc_needwakeup |= CRYPTO_SYMQ;
1058 mtx_unlock(&sc->sc_freeqlock);
1059 return (ERESTART);
1060 }
1061 q = SIMPLEQ_FIRST(&sc->sc_freequeue);
1062 SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
1063 mtx_unlock(&sc->sc_freeqlock);
1064
1065 dmap = q->q_dma; /* Save dma pointer */
1066 bzero(q, sizeof(struct ubsec_q));
1067 bzero(&ctx, sizeof(ctx));
1068
1069 q->q_sesn = UBSEC_SESSION(crp->crp_sid);
1070 q->q_dma = dmap;
1071 ses = &sc->sc_sessions[q->q_sesn];
1072
1073 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1074 q->q_src_m = (struct mbuf *)crp->crp_buf;
1075 q->q_dst_m = (struct mbuf *)crp->crp_buf;
1076 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1077 q->q_src_io = (struct uio *)crp->crp_buf;
1078 q->q_dst_io = (struct uio *)crp->crp_buf;
1079 } else {
1080 ubsecstats.hst_badflags++;
1081 err = EINVAL;
1082 goto errout; /* XXX we don't handle contiguous blocks! */
1083 }
1084
1085 bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr));
1086
1087 dmap->d_dma->d_mcr.mcr_pkts = htole16(1);
1088 dmap->d_dma->d_mcr.mcr_flags = 0;
1089 q->q_crp = crp;
1090
1091 crd1 = crp->crp_desc;
1092 if (crd1 == NULL) {
1093 ubsecstats.hst_nodesc++;
1094 err = EINVAL;
1095 goto errout;
1096 }
1097 crd2 = crd1->crd_next;
1098
1099 if (crd2 == NULL) {
1100 if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
1101 crd1->crd_alg == CRYPTO_SHA1_HMAC) {
1102 maccrd = crd1;
1103 enccrd = NULL;
1104 } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
1105 crd1->crd_alg == CRYPTO_3DES_CBC) {
1106 maccrd = NULL;
1107 enccrd = crd1;
1108 } else {
1109 ubsecstats.hst_badalg++;
1110 err = EINVAL;
1111 goto errout;
1112 }
1113 } else {
1114 if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
1115 crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
1116 (crd2->crd_alg == CRYPTO_DES_CBC ||
1117 crd2->crd_alg == CRYPTO_3DES_CBC) &&
1118 ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
1119 maccrd = crd1;
1120 enccrd = crd2;
1121 } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
1122 crd1->crd_alg == CRYPTO_3DES_CBC) &&
1123 (crd2->crd_alg == CRYPTO_MD5_HMAC ||
1124 crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
1125 (crd1->crd_flags & CRD_F_ENCRYPT)) {
1126 enccrd = crd1;
1127 maccrd = crd2;
1128 } else {
1129 /*
1130 * We cannot order the ubsec as requested
1131 */
1132 ubsecstats.hst_badalg++;
1133 err = EINVAL;
1134 goto errout;
1135 }
1136 }
1137
1138 if (enccrd) {
1139 if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
1140 ubsec_setup_enckey(ses, enccrd->crd_alg,
1141 enccrd->crd_key);
1142 }
1143
1144 encoffset = enccrd->crd_skip;
1145 ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES);
1146
1147 if (enccrd->crd_flags & CRD_F_ENCRYPT) {
1148 q->q_flags |= UBSEC_QFLAGS_COPYOUTIV;
1149
1150 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1151 bcopy(enccrd->crd_iv, ctx.pc_iv, 8);
1152 else {
1153 ctx.pc_iv[0] = ses->ses_iv[0];
1154 ctx.pc_iv[1] = ses->ses_iv[1];
1155 }
1156
1157 if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
1158 crypto_copyback(crp->crp_flags, crp->crp_buf,
1159 enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv);
1160 }
1161 } else {
1162 ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND);
1163
1164 if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
1165 bcopy(enccrd->crd_iv, ctx.pc_iv, 8);
1166 else {
1167 crypto_copydata(crp->crp_flags, crp->crp_buf,
1168 enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv);
1169 }
1170 }
1171
1172 ctx.pc_deskey[0] = ses->ses_deskey[0];
1173 ctx.pc_deskey[1] = ses->ses_deskey[1];
1174 ctx.pc_deskey[2] = ses->ses_deskey[2];
1175 ctx.pc_deskey[3] = ses->ses_deskey[3];
1176 ctx.pc_deskey[4] = ses->ses_deskey[4];
1177 ctx.pc_deskey[5] = ses->ses_deskey[5];
1178 SWAP32(ctx.pc_iv[0]);
1179 SWAP32(ctx.pc_iv[1]);
1180 }
1181
1182 if (maccrd) {
1183 if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
1184 ubsec_setup_mackey(ses, maccrd->crd_alg,
1185 maccrd->crd_key, maccrd->crd_klen / 8);
1186 }
1187
1188 macoffset = maccrd->crd_skip;
1189
1190 if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
1191 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5);
1192 else
1193 ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1);
1194
1195 for (i = 0; i < 5; i++) {
1196 ctx.pc_hminner[i] = ses->ses_hminner[i];
1197 ctx.pc_hmouter[i] = ses->ses_hmouter[i];
1198
1199 HTOLE32(ctx.pc_hminner[i]);
1200 HTOLE32(ctx.pc_hmouter[i]);
1201 }
1202 }
1203
1204 if (enccrd && maccrd) {
1205 /*
1206 * ubsec cannot handle packets where the end of encryption
1207 * and authentication are not the same, or where the
1208 * encrypted part begins before the authenticated part.
1209 */
1210 if ((encoffset + enccrd->crd_len) !=
1211 (macoffset + maccrd->crd_len)) {
1212 ubsecstats.hst_lenmismatch++;
1213 err = EINVAL;
1214 goto errout;
1215 }
1216 if (enccrd->crd_skip < maccrd->crd_skip) {
1217 ubsecstats.hst_skipmismatch++;
1218 err = EINVAL;
1219 goto errout;
1220 }
1221 sskip = maccrd->crd_skip;
1222 cpskip = dskip = enccrd->crd_skip;
1223 stheend = maccrd->crd_len;
1224 dtheend = enccrd->crd_len;
1225 coffset = enccrd->crd_skip - maccrd->crd_skip;
1226 cpoffset = cpskip + dtheend;
1227 #ifdef UBSEC_DEBUG
1228 if (ubsec_debug) {
1229 printf("mac: skip %d, len %d, inject %d\n",
1230 maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject);
1231 printf("enc: skip %d, len %d, inject %d\n",
1232 enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject);
1233 printf("src: skip %d, len %d\n", sskip, stheend);
1234 printf("dst: skip %d, len %d\n", dskip, dtheend);
1235 printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
1236 coffset, stheend, cpskip, cpoffset);
1237 }
1238 #endif
1239 } else {
1240 cpskip = dskip = sskip = macoffset + encoffset;
1241 dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len;
1242 cpoffset = cpskip + dtheend;
1243 coffset = 0;
1244 }
1245 ctx.pc_offset = htole16(coffset >> 2);
1246
1247 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &q->q_src_map)) {
1248 ubsecstats.hst_nomap++;
1249 err = ENOMEM;
1250 goto errout;
1251 }
1252 if (crp->crp_flags & CRYPTO_F_IMBUF) {
1253 if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
1254 q->q_src_m, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) {
1255 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1256 q->q_src_map = NULL;
1257 ubsecstats.hst_noload++;
1258 err = ENOMEM;
1259 goto errout;
1260 }
1261 } else if (crp->crp_flags & CRYPTO_F_IOV) {
1262 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
1263 q->q_src_io, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) {
1264 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1265 q->q_src_map = NULL;
1266 ubsecstats.hst_noload++;
1267 err = ENOMEM;
1268 goto errout;
1269 }
1270 }
1271 nicealign = ubsec_dmamap_aligned(&q->q_src);
1272
1273 dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend);
1274
1275 #ifdef UBSEC_DEBUG
1276 if (ubsec_debug)
1277 printf("src skip: %d nicealign: %u\n", sskip, nicealign);
1278 #endif
1279 for (i = j = 0; i < q->q_src_nsegs; i++) {
1280 struct ubsec_pktbuf *pb;
1281 bus_size_t packl = q->q_src_segs[i].ds_len;
1282 bus_addr_t packp = q->q_src_segs[i].ds_addr;
1283
1284 if (sskip >= packl) {
1285 sskip -= packl;
1286 continue;
1287 }
1288
1289 packl -= sskip;
1290 packp += sskip;
1291 sskip = 0;
1292
1293 if (packl > 0xfffc) {
1294 err = EIO;
1295 goto errout;
1296 }
1297
1298 if (j == 0)
1299 pb = &dmap->d_dma->d_mcr.mcr_ipktbuf;
1300 else
1301 pb = &dmap->d_dma->d_sbuf[j - 1];
1302
1303 pb->pb_addr = htole32(packp);
1304
1305 if (stheend) {
1306 if (packl > stheend) {
1307 pb->pb_len = htole32(stheend);
1308 stheend = 0;
1309 } else {
1310 pb->pb_len = htole32(packl);
1311 stheend -= packl;
1312 }
1313 } else
1314 pb->pb_len = htole32(packl);
1315
1316 if ((i + 1) == q->q_src_nsegs)
1317 pb->pb_next = 0;
1318 else
1319 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1320 offsetof(struct ubsec_dmachunk, d_sbuf[j]));
1321 j++;
1322 }
1323
1324 if (enccrd == NULL && maccrd != NULL) {
1325 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0;
1326 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0;
1327 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr +
1328 offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1329 #ifdef UBSEC_DEBUG
1330 if (ubsec_debug)
1331 printf("opkt: %x %x %x\n",
1332 dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr,
1333 dmap->d_dma->d_mcr.mcr_opktbuf.pb_len,
1334 dmap->d_dma->d_mcr.mcr_opktbuf.pb_next);
1335 #endif
1336 } else {
1337 if (crp->crp_flags & CRYPTO_F_IOV) {
1338 if (!nicealign) {
1339 ubsecstats.hst_iovmisaligned++;
1340 err = EINVAL;
1341 goto errout;
1342 }
1343 if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
1344 &q->q_dst_map)) {
1345 ubsecstats.hst_nomap++;
1346 err = ENOMEM;
1347 goto errout;
1348 }
1349 if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map,
1350 q->q_dst_io, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) {
1351 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1352 q->q_dst_map = NULL;
1353 ubsecstats.hst_noload++;
1354 err = ENOMEM;
1355 goto errout;
1356 }
1357 } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
1358 if (nicealign) {
1359 q->q_dst = q->q_src;
1360 } else {
1361 int totlen, len;
1362 struct mbuf *m, *top, **mp;
1363
1364 ubsecstats.hst_unaligned++;
1365 totlen = q->q_src_mapsize;
1366 if (totlen >= MINCLSIZE) {
1367 m = m_getcl(M_NOWAIT, MT_DATA,
1368 q->q_src_m->m_flags & M_PKTHDR);
1369 len = MCLBYTES;
1370 } else if (q->q_src_m->m_flags & M_PKTHDR) {
1371 m = m_gethdr(M_NOWAIT, MT_DATA);
1372 len = MHLEN;
1373 } else {
1374 m = m_get(M_NOWAIT, MT_DATA);
1375 len = MLEN;
1376 }
1377 if (m && q->q_src_m->m_flags & M_PKTHDR &&
1378 !m_dup_pkthdr(m, q->q_src_m, M_NOWAIT)) {
1379 m_free(m);
1380 m = NULL;
1381 }
1382 if (m == NULL) {
1383 ubsecstats.hst_nombuf++;
1384 err = sc->sc_nqueue ? ERESTART : ENOMEM;
1385 goto errout;
1386 }
1387 m->m_len = len = min(totlen, len);
1388 totlen -= len;
1389 top = m;
1390 mp = ⊤
1391
1392 while (totlen > 0) {
1393 if (totlen >= MINCLSIZE) {
1394 m = m_getcl(M_NOWAIT,
1395 MT_DATA, 0);
1396 len = MCLBYTES;
1397 } else {
1398 m = m_get(M_NOWAIT, MT_DATA);
1399 len = MLEN;
1400 }
1401 if (m == NULL) {
1402 m_freem(top);
1403 ubsecstats.hst_nombuf++;
1404 err = sc->sc_nqueue ? ERESTART : ENOMEM;
1405 goto errout;
1406 }
1407 m->m_len = len = min(totlen, len);
1408 totlen -= len;
1409 *mp = m;
1410 mp = &m->m_next;
1411 }
1412 q->q_dst_m = top;
1413 ubsec_mcopy(q->q_src_m, q->q_dst_m,
1414 cpskip, cpoffset);
1415 if (bus_dmamap_create(sc->sc_dmat,
1416 BUS_DMA_NOWAIT, &q->q_dst_map) != 0) {
1417 ubsecstats.hst_nomap++;
1418 err = ENOMEM;
1419 goto errout;
1420 }
1421 if (bus_dmamap_load_mbuf(sc->sc_dmat,
1422 q->q_dst_map, q->q_dst_m,
1423 ubsec_op_cb, &q->q_dst,
1424 BUS_DMA_NOWAIT) != 0) {
1425 bus_dmamap_destroy(sc->sc_dmat,
1426 q->q_dst_map);
1427 q->q_dst_map = NULL;
1428 ubsecstats.hst_noload++;
1429 err = ENOMEM;
1430 goto errout;
1431 }
1432 }
1433 } else {
1434 ubsecstats.hst_badflags++;
1435 err = EINVAL;
1436 goto errout;
1437 }
1438
1439 #ifdef UBSEC_DEBUG
1440 if (ubsec_debug)
1441 printf("dst skip: %d\n", dskip);
1442 #endif
1443 for (i = j = 0; i < q->q_dst_nsegs; i++) {
1444 struct ubsec_pktbuf *pb;
1445 bus_size_t packl = q->q_dst_segs[i].ds_len;
1446 bus_addr_t packp = q->q_dst_segs[i].ds_addr;
1447
1448 if (dskip >= packl) {
1449 dskip -= packl;
1450 continue;
1451 }
1452
1453 packl -= dskip;
1454 packp += dskip;
1455 dskip = 0;
1456
1457 if (packl > 0xfffc) {
1458 err = EIO;
1459 goto errout;
1460 }
1461
1462 if (j == 0)
1463 pb = &dmap->d_dma->d_mcr.mcr_opktbuf;
1464 else
1465 pb = &dmap->d_dma->d_dbuf[j - 1];
1466
1467 pb->pb_addr = htole32(packp);
1468
1469 if (dtheend) {
1470 if (packl > dtheend) {
1471 pb->pb_len = htole32(dtheend);
1472 dtheend = 0;
1473 } else {
1474 pb->pb_len = htole32(packl);
1475 dtheend -= packl;
1476 }
1477 } else
1478 pb->pb_len = htole32(packl);
1479
1480 if ((i + 1) == q->q_dst_nsegs) {
1481 if (maccrd)
1482 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1483 offsetof(struct ubsec_dmachunk, d_macbuf[0]));
1484 else
1485 pb->pb_next = 0;
1486 } else
1487 pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
1488 offsetof(struct ubsec_dmachunk, d_dbuf[j]));
1489 j++;
1490 }
1491 }
1492
1493 dmap->d_dma->d_mcr.mcr_cmdctxp = htole32(dmap->d_alloc.dma_paddr +
1494 offsetof(struct ubsec_dmachunk, d_ctx));
1495
1496 if (sc->sc_flags & UBS_FLAGS_LONGCTX) {
1497 struct ubsec_pktctx_long *ctxl;
1498
1499 ctxl = (struct ubsec_pktctx_long *)(dmap->d_alloc.dma_vaddr +
1500 offsetof(struct ubsec_dmachunk, d_ctx));
1501
1502 /* transform small context into long context */
1503 ctxl->pc_len = htole16(sizeof(struct ubsec_pktctx_long));
1504 ctxl->pc_type = htole16(UBS_PKTCTX_TYPE_IPSEC);
1505 ctxl->pc_flags = ctx.pc_flags;
1506 ctxl->pc_offset = ctx.pc_offset;
1507 for (i = 0; i < 6; i++)
1508 ctxl->pc_deskey[i] = ctx.pc_deskey[i];
1509 for (i = 0; i < 5; i++)
1510 ctxl->pc_hminner[i] = ctx.pc_hminner[i];
1511 for (i = 0; i < 5; i++)
1512 ctxl->pc_hmouter[i] = ctx.pc_hmouter[i];
1513 ctxl->pc_iv[0] = ctx.pc_iv[0];
1514 ctxl->pc_iv[1] = ctx.pc_iv[1];
1515 } else
1516 bcopy(&ctx, dmap->d_alloc.dma_vaddr +
1517 offsetof(struct ubsec_dmachunk, d_ctx),
1518 sizeof(struct ubsec_pktctx));
1519
1520 mtx_lock(&sc->sc_mcr1lock);
1521 SIMPLEQ_INSERT_TAIL(&sc->sc_queue, q, q_next);
1522 sc->sc_nqueue++;
1523 ubsecstats.hst_ipackets++;
1524 ubsecstats.hst_ibytes += dmap->d_alloc.dma_size;
1525 if ((hint & CRYPTO_HINT_MORE) == 0 || sc->sc_nqueue >= UBS_MAX_AGGR)
1526 ubsec_feed(sc);
1527 mtx_unlock(&sc->sc_mcr1lock);
1528 return (0);
1529
1530 errout:
1531 if (q != NULL) {
1532 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
1533 m_freem(q->q_dst_m);
1534
1535 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
1536 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1537 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1538 }
1539 if (q->q_src_map != NULL) {
1540 bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1541 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1542 }
1543 }
1544 if (q != NULL || err == ERESTART) {
1545 mtx_lock(&sc->sc_freeqlock);
1546 if (q != NULL)
1547 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1548 if (err == ERESTART)
1549 sc->sc_needwakeup |= CRYPTO_SYMQ;
1550 mtx_unlock(&sc->sc_freeqlock);
1551 }
1552 if (err != ERESTART) {
1553 crp->crp_etype = err;
1554 crypto_done(crp);
1555 }
1556 return (err);
1557 }
1558
1559 static void
1560 ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q)
1561 {
1562 struct cryptop *crp = (struct cryptop *)q->q_crp;
1563 struct cryptodesc *crd;
1564 struct ubsec_dma *dmap = q->q_dma;
1565
1566 ubsecstats.hst_opackets++;
1567 ubsecstats.hst_obytes += dmap->d_alloc.dma_size;
1568
1569 ubsec_dma_sync(&dmap->d_alloc,
1570 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1571 if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
1572 bus_dmamap_sync(sc->sc_dmat, q->q_dst_map,
1573 BUS_DMASYNC_POSTREAD);
1574 bus_dmamap_unload(sc->sc_dmat, q->q_dst_map);
1575 bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
1576 }
1577 bus_dmamap_sync(sc->sc_dmat, q->q_src_map, BUS_DMASYNC_POSTWRITE);
1578 bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
1579 bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
1580
1581 if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) {
1582 m_freem(q->q_src_m);
1583 crp->crp_buf = (caddr_t)q->q_dst_m;
1584 }
1585
1586 /* copy out IV for future use */
1587 if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) {
1588 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1589 if (crd->crd_alg != CRYPTO_DES_CBC &&
1590 crd->crd_alg != CRYPTO_3DES_CBC)
1591 continue;
1592 crypto_copydata(crp->crp_flags, crp->crp_buf,
1593 crd->crd_skip + crd->crd_len - 8, 8,
1594 (caddr_t)sc->sc_sessions[q->q_sesn].ses_iv);
1595 break;
1596 }
1597 }
1598
1599 for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
1600 if (crd->crd_alg != CRYPTO_MD5_HMAC &&
1601 crd->crd_alg != CRYPTO_SHA1_HMAC)
1602 continue;
1603 crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
1604 sc->sc_sessions[q->q_sesn].ses_mlen,
1605 (caddr_t)dmap->d_dma->d_macbuf);
1606 break;
1607 }
1608 mtx_lock(&sc->sc_freeqlock);
1609 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
1610 mtx_unlock(&sc->sc_freeqlock);
1611 crypto_done(crp);
1612 }
1613
1614 static void
1615 ubsec_mcopy(struct mbuf *srcm, struct mbuf *dstm, int hoffset, int toffset)
1616 {
1617 int i, j, dlen, slen;
1618 caddr_t dptr, sptr;
1619
1620 j = 0;
1621 sptr = srcm->m_data;
1622 slen = srcm->m_len;
1623 dptr = dstm->m_data;
1624 dlen = dstm->m_len;
1625
1626 while (1) {
1627 for (i = 0; i < min(slen, dlen); i++) {
1628 if (j < hoffset || j >= toffset)
1629 *dptr++ = *sptr++;
1630 slen--;
1631 dlen--;
1632 j++;
1633 }
1634 if (slen == 0) {
1635 srcm = srcm->m_next;
1636 if (srcm == NULL)
1637 return;
1638 sptr = srcm->m_data;
1639 slen = srcm->m_len;
1640 }
1641 if (dlen == 0) {
1642 dstm = dstm->m_next;
1643 if (dstm == NULL)
1644 return;
1645 dptr = dstm->m_data;
1646 dlen = dstm->m_len;
1647 }
1648 }
1649 }
1650
1651 /*
1652 * feed the key generator, must be called at splimp() or higher.
1653 */
1654 static int
1655 ubsec_feed2(struct ubsec_softc *sc)
1656 {
1657 struct ubsec_q2 *q;
1658
1659 while (!SIMPLEQ_EMPTY(&sc->sc_queue2)) {
1660 if (READ_REG(sc, BS_STAT) & BS_STAT_MCR2_FULL)
1661 break;
1662 q = SIMPLEQ_FIRST(&sc->sc_queue2);
1663
1664 ubsec_dma_sync(&q->q_mcr,
1665 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1666 ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_PREWRITE);
1667
1668 WRITE_REG(sc, BS_MCR2, q->q_mcr.dma_paddr);
1669 SIMPLEQ_REMOVE_HEAD(&sc->sc_queue2, q_next);
1670 --sc->sc_nqueue2;
1671 SIMPLEQ_INSERT_TAIL(&sc->sc_qchip2, q, q_next);
1672 }
1673 return (0);
1674 }
1675
1676 /*
1677 * Callback for handling random numbers
1678 */
1679 static void
1680 ubsec_callback2(struct ubsec_softc *sc, struct ubsec_q2 *q)
1681 {
1682 struct cryptkop *krp;
1683 struct ubsec_ctx_keyop *ctx;
1684
1685 ctx = (struct ubsec_ctx_keyop *)q->q_ctx.dma_vaddr;
1686 ubsec_dma_sync(&q->q_ctx, BUS_DMASYNC_POSTWRITE);
1687
1688 switch (q->q_type) {
1689 #ifndef UBSEC_NO_RNG
1690 case UBS_CTXOP_RNGBYPASS: {
1691 struct ubsec_q2_rng *rng = (struct ubsec_q2_rng *)q;
1692
1693 ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_POSTREAD);
1694 (*sc->sc_harvest)(sc->sc_rndtest,
1695 rng->rng_buf.dma_vaddr,
1696 UBSEC_RNG_BUFSIZ*sizeof (u_int32_t));
1697 rng->rng_used = 0;
1698 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc);
1699 break;
1700 }
1701 #endif
1702 case UBS_CTXOP_MODEXP: {
1703 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q;
1704 u_int rlen, clen;
1705
1706 krp = me->me_krp;
1707 rlen = (me->me_modbits + 7) / 8;
1708 clen = (krp->krp_param[krp->krp_iparams].crp_nbits + 7) / 8;
1709
1710 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_POSTWRITE);
1711 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_POSTWRITE);
1712 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_POSTREAD);
1713 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_POSTWRITE);
1714
1715 if (clen < rlen)
1716 krp->krp_status = E2BIG;
1717 else {
1718 if (sc->sc_flags & UBS_FLAGS_HWNORM) {
1719 bzero(krp->krp_param[krp->krp_iparams].crp_p,
1720 (krp->krp_param[krp->krp_iparams].crp_nbits
1721 + 7) / 8);
1722 bcopy(me->me_C.dma_vaddr,
1723 krp->krp_param[krp->krp_iparams].crp_p,
1724 (me->me_modbits + 7) / 8);
1725 } else
1726 ubsec_kshift_l(me->me_shiftbits,
1727 me->me_C.dma_vaddr, me->me_normbits,
1728 krp->krp_param[krp->krp_iparams].crp_p,
1729 krp->krp_param[krp->krp_iparams].crp_nbits);
1730 }
1731
1732 crypto_kdone(krp);
1733
1734 /* bzero all potentially sensitive data */
1735 bzero(me->me_E.dma_vaddr, me->me_E.dma_size);
1736 bzero(me->me_M.dma_vaddr, me->me_M.dma_size);
1737 bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
1738 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size);
1739
1740 /* Can't free here, so put us on the free list. */
1741 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &me->me_q, q_next);
1742 break;
1743 }
1744 case UBS_CTXOP_RSAPRIV: {
1745 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q;
1746 u_int len;
1747
1748 krp = rp->rpr_krp;
1749 ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_POSTWRITE);
1750 ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_POSTREAD);
1751
1752 len = (krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_nbits + 7) / 8;
1753 bcopy(rp->rpr_msgout.dma_vaddr,
1754 krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT].crp_p, len);
1755
1756 crypto_kdone(krp);
1757
1758 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size);
1759 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size);
1760 bzero(rp->rpr_q.q_ctx.dma_vaddr, rp->rpr_q.q_ctx.dma_size);
1761
1762 /* Can't free here, so put us on the free list. */
1763 SIMPLEQ_INSERT_TAIL(&sc->sc_q2free, &rp->rpr_q, q_next);
1764 break;
1765 }
1766 default:
1767 device_printf(sc->sc_dev, "unknown ctx op: %x\n",
1768 letoh16(ctx->ctx_op));
1769 break;
1770 }
1771 }
1772
1773 #ifndef UBSEC_NO_RNG
1774 static void
1775 ubsec_rng(void *vsc)
1776 {
1777 struct ubsec_softc *sc = vsc;
1778 struct ubsec_q2_rng *rng = &sc->sc_rng;
1779 struct ubsec_mcr *mcr;
1780 struct ubsec_ctx_rngbypass *ctx;
1781
1782 mtx_lock(&sc->sc_mcr2lock);
1783 if (rng->rng_used) {
1784 mtx_unlock(&sc->sc_mcr2lock);
1785 return;
1786 }
1787 sc->sc_nqueue2++;
1788 if (sc->sc_nqueue2 >= UBS_MAX_NQUEUE)
1789 goto out;
1790
1791 mcr = (struct ubsec_mcr *)rng->rng_q.q_mcr.dma_vaddr;
1792 ctx = (struct ubsec_ctx_rngbypass *)rng->rng_q.q_ctx.dma_vaddr;
1793
1794 mcr->mcr_pkts = htole16(1);
1795 mcr->mcr_flags = 0;
1796 mcr->mcr_cmdctxp = htole32(rng->rng_q.q_ctx.dma_paddr);
1797 mcr->mcr_ipktbuf.pb_addr = mcr->mcr_ipktbuf.pb_next = 0;
1798 mcr->mcr_ipktbuf.pb_len = 0;
1799 mcr->mcr_reserved = mcr->mcr_pktlen = 0;
1800 mcr->mcr_opktbuf.pb_addr = htole32(rng->rng_buf.dma_paddr);
1801 mcr->mcr_opktbuf.pb_len = htole32(((sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ)) &
1802 UBS_PKTBUF_LEN);
1803 mcr->mcr_opktbuf.pb_next = 0;
1804
1805 ctx->rbp_len = htole16(sizeof(struct ubsec_ctx_rngbypass));
1806 ctx->rbp_op = htole16(UBS_CTXOP_RNGBYPASS);
1807 rng->rng_q.q_type = UBS_CTXOP_RNGBYPASS;
1808
1809 ubsec_dma_sync(&rng->rng_buf, BUS_DMASYNC_PREREAD);
1810
1811 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rng->rng_q, q_next);
1812 rng->rng_used = 1;
1813 ubsec_feed2(sc);
1814 ubsecstats.hst_rng++;
1815 mtx_unlock(&sc->sc_mcr2lock);
1816
1817 return;
1818
1819 out:
1820 /*
1821 * Something weird happened, generate our own call back.
1822 */
1823 sc->sc_nqueue2--;
1824 mtx_unlock(&sc->sc_mcr2lock);
1825 callout_reset(&sc->sc_rngto, sc->sc_rnghz, ubsec_rng, sc);
1826 }
1827 #endif /* UBSEC_NO_RNG */
1828
1829 static void
1830 ubsec_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1831 {
1832 bus_addr_t *paddr = (bus_addr_t*) arg;
1833 *paddr = segs->ds_addr;
1834 }
1835
1836 static int
1837 ubsec_dma_malloc(
1838 struct ubsec_softc *sc,
1839 bus_size_t size,
1840 struct ubsec_dma_alloc *dma,
1841 int mapflags
1842 )
1843 {
1844 int r;
1845
1846 /* XXX could specify sc_dmat as parent but that just adds overhead */
1847 r = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
1848 1, 0, /* alignment, bounds */
1849 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1850 BUS_SPACE_MAXADDR, /* highaddr */
1851 NULL, NULL, /* filter, filterarg */
1852 size, /* maxsize */
1853 1, /* nsegments */
1854 size, /* maxsegsize */
1855 BUS_DMA_ALLOCNOW, /* flags */
1856 NULL, NULL, /* lockfunc, lockarg */
1857 &dma->dma_tag);
1858 if (r != 0) {
1859 device_printf(sc->sc_dev, "ubsec_dma_malloc: "
1860 "bus_dma_tag_create failed; error %u\n", r);
1861 goto fail_1;
1862 }
1863
1864 r = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr,
1865 BUS_DMA_NOWAIT, &dma->dma_map);
1866 if (r != 0) {
1867 device_printf(sc->sc_dev, "ubsec_dma_malloc: "
1868 "bus_dmammem_alloc failed; size %ju, error %u\n",
1869 (intmax_t)size, r);
1870 goto fail_2;
1871 }
1872
1873 r = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr,
1874 size,
1875 ubsec_dmamap_cb,
1876 &dma->dma_paddr,
1877 mapflags | BUS_DMA_NOWAIT);
1878 if (r != 0) {
1879 device_printf(sc->sc_dev, "ubsec_dma_malloc: "
1880 "bus_dmamap_load failed; error %u\n", r);
1881 goto fail_3;
1882 }
1883
1884 dma->dma_size = size;
1885 return (0);
1886
1887 fail_3:
1888 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1889 fail_2:
1890 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1891 fail_1:
1892 bus_dma_tag_destroy(dma->dma_tag);
1893 dma->dma_tag = NULL;
1894 return (r);
1895 }
1896
1897 static void
1898 ubsec_dma_free(struct ubsec_softc *sc, struct ubsec_dma_alloc *dma)
1899 {
1900 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
1901 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
1902 bus_dma_tag_destroy(dma->dma_tag);
1903 }
1904
1905 /*
1906 * Resets the board. Values in the regesters are left as is
1907 * from the reset (i.e. initial values are assigned elsewhere).
1908 */
1909 static void
1910 ubsec_reset_board(struct ubsec_softc *sc)
1911 {
1912 volatile u_int32_t ctrl;
1913
1914 ctrl = READ_REG(sc, BS_CTRL);
1915 ctrl |= BS_CTRL_RESET;
1916 WRITE_REG(sc, BS_CTRL, ctrl);
1917
1918 /*
1919 * Wait aprox. 30 PCI clocks = 900 ns = 0.9 us
1920 */
1921 DELAY(10);
1922 }
1923
1924 /*
1925 * Init Broadcom registers
1926 */
1927 static void
1928 ubsec_init_board(struct ubsec_softc *sc)
1929 {
1930 u_int32_t ctrl;
1931
1932 ctrl = READ_REG(sc, BS_CTRL);
1933 ctrl &= ~(BS_CTRL_BE32 | BS_CTRL_BE64);
1934 ctrl |= BS_CTRL_LITTLE_ENDIAN | BS_CTRL_MCR1INT;
1935
1936 if (sc->sc_flags & (UBS_FLAGS_KEY|UBS_FLAGS_RNG))
1937 ctrl |= BS_CTRL_MCR2INT;
1938 else
1939 ctrl &= ~BS_CTRL_MCR2INT;
1940
1941 if (sc->sc_flags & UBS_FLAGS_HWNORM)
1942 ctrl &= ~BS_CTRL_SWNORM;
1943
1944 WRITE_REG(sc, BS_CTRL, ctrl);
1945 }
1946
1947 /*
1948 * Init Broadcom PCI registers
1949 */
1950 static void
1951 ubsec_init_pciregs(device_t dev)
1952 {
1953 #if 0
1954 u_int32_t misc;
1955
1956 misc = pci_conf_read(pc, pa->pa_tag, BS_RTY_TOUT);
1957 misc = (misc & ~(UBS_PCI_RTY_MASK << UBS_PCI_RTY_SHIFT))
1958 | ((UBS_DEF_RTY & 0xff) << UBS_PCI_RTY_SHIFT);
1959 misc = (misc & ~(UBS_PCI_TOUT_MASK << UBS_PCI_TOUT_SHIFT))
1960 | ((UBS_DEF_TOUT & 0xff) << UBS_PCI_TOUT_SHIFT);
1961 pci_conf_write(pc, pa->pa_tag, BS_RTY_TOUT, misc);
1962 #endif
1963
1964 /*
1965 * This will set the cache line size to 1, this will
1966 * force the BCM58xx chip just to do burst read/writes.
1967 * Cache line read/writes are to slow
1968 */
1969 pci_write_config(dev, PCIR_CACHELNSZ, UBS_DEF_CACHELINE, 1);
1970 }
1971
1972 /*
1973 * Clean up after a chip crash.
1974 * It is assumed that the caller in splimp()
1975 */
1976 static void
1977 ubsec_cleanchip(struct ubsec_softc *sc)
1978 {
1979 struct ubsec_q *q;
1980
1981 while (!SIMPLEQ_EMPTY(&sc->sc_qchip)) {
1982 q = SIMPLEQ_FIRST(&sc->sc_qchip);
1983 SIMPLEQ_REMOVE_HEAD(&sc->sc_qchip, q_next);
1984 ubsec_free_q(sc, q);
1985 }
1986 sc->sc_nqchip = 0;
1987 }
1988
1989 /*
1990 * free a ubsec_q
1991 * It is assumed that the caller is within splimp().
1992 */
1993 static int
1994 ubsec_free_q(struct ubsec_softc *sc, struct ubsec_q *q)
1995 {
1996 struct ubsec_q *q2;
1997 struct cryptop *crp;
1998 int npkts;
1999 int i;
2000
2001 npkts = q->q_nstacked_mcrs;
2002
2003 for (i = 0; i < npkts; i++) {
2004 if(q->q_stacked_mcr[i]) {
2005 q2 = q->q_stacked_mcr[i];
2006
2007 if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m))
2008 m_freem(q2->q_dst_m);
2009
2010 crp = (struct cryptop *)q2->q_crp;
2011
2012 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q2, q_next);
2013
2014 crp->crp_etype = EFAULT;
2015 crypto_done(crp);
2016 } else {
2017 break;
2018 }
2019 }
2020
2021 /*
2022 * Free header MCR
2023 */
2024 if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
2025 m_freem(q->q_dst_m);
2026
2027 crp = (struct cryptop *)q->q_crp;
2028
2029 SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
2030
2031 crp->crp_etype = EFAULT;
2032 crypto_done(crp);
2033 return(0);
2034 }
2035
2036 /*
2037 * Routine to reset the chip and clean up.
2038 * It is assumed that the caller is in splimp()
2039 */
2040 static void
2041 ubsec_totalreset(struct ubsec_softc *sc)
2042 {
2043 ubsec_reset_board(sc);
2044 ubsec_init_board(sc);
2045 ubsec_cleanchip(sc);
2046 }
2047
2048 static int
2049 ubsec_dmamap_aligned(struct ubsec_operand *op)
2050 {
2051 int i;
2052
2053 for (i = 0; i < op->nsegs; i++) {
2054 if (op->segs[i].ds_addr & 3)
2055 return (0);
2056 if ((i != (op->nsegs - 1)) &&
2057 (op->segs[i].ds_len & 3))
2058 return (0);
2059 }
2060 return (1);
2061 }
2062
2063 static void
2064 ubsec_kfree(struct ubsec_softc *sc, struct ubsec_q2 *q)
2065 {
2066 switch (q->q_type) {
2067 case UBS_CTXOP_MODEXP: {
2068 struct ubsec_q2_modexp *me = (struct ubsec_q2_modexp *)q;
2069
2070 ubsec_dma_free(sc, &me->me_q.q_mcr);
2071 ubsec_dma_free(sc, &me->me_q.q_ctx);
2072 ubsec_dma_free(sc, &me->me_M);
2073 ubsec_dma_free(sc, &me->me_E);
2074 ubsec_dma_free(sc, &me->me_C);
2075 ubsec_dma_free(sc, &me->me_epb);
2076 free(me, M_DEVBUF);
2077 break;
2078 }
2079 case UBS_CTXOP_RSAPRIV: {
2080 struct ubsec_q2_rsapriv *rp = (struct ubsec_q2_rsapriv *)q;
2081
2082 ubsec_dma_free(sc, &rp->rpr_q.q_mcr);
2083 ubsec_dma_free(sc, &rp->rpr_q.q_ctx);
2084 ubsec_dma_free(sc, &rp->rpr_msgin);
2085 ubsec_dma_free(sc, &rp->rpr_msgout);
2086 free(rp, M_DEVBUF);
2087 break;
2088 }
2089 default:
2090 device_printf(sc->sc_dev, "invalid kfree 0x%x\n", q->q_type);
2091 break;
2092 }
2093 }
2094
2095 static int
2096 ubsec_kprocess(device_t dev, struct cryptkop *krp, int hint)
2097 {
2098 struct ubsec_softc *sc = device_get_softc(dev);
2099 int r;
2100
2101 if (krp == NULL || krp->krp_callback == NULL)
2102 return (EINVAL);
2103
2104 while (!SIMPLEQ_EMPTY(&sc->sc_q2free)) {
2105 struct ubsec_q2 *q;
2106
2107 q = SIMPLEQ_FIRST(&sc->sc_q2free);
2108 SIMPLEQ_REMOVE_HEAD(&sc->sc_q2free, q_next);
2109 ubsec_kfree(sc, q);
2110 }
2111
2112 switch (krp->krp_op) {
2113 case CRK_MOD_EXP:
2114 if (sc->sc_flags & UBS_FLAGS_HWNORM)
2115 r = ubsec_kprocess_modexp_hw(sc, krp, hint);
2116 else
2117 r = ubsec_kprocess_modexp_sw(sc, krp, hint);
2118 break;
2119 case CRK_MOD_EXP_CRT:
2120 return (ubsec_kprocess_rsapriv(sc, krp, hint));
2121 default:
2122 device_printf(sc->sc_dev, "kprocess: invalid op 0x%x\n",
2123 krp->krp_op);
2124 krp->krp_status = EOPNOTSUPP;
2125 crypto_kdone(krp);
2126 return (0);
2127 }
2128 return (0); /* silence compiler */
2129 }
2130
2131 /*
2132 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (sw normalization)
2133 */
2134 static int
2135 ubsec_kprocess_modexp_sw(struct ubsec_softc *sc, struct cryptkop *krp, int hint)
2136 {
2137 struct ubsec_q2_modexp *me;
2138 struct ubsec_mcr *mcr;
2139 struct ubsec_ctx_modexp *ctx;
2140 struct ubsec_pktbuf *epb;
2141 int err = 0;
2142 u_int nbits, normbits, mbits, shiftbits, ebits;
2143
2144 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT);
2145 if (me == NULL) {
2146 err = ENOMEM;
2147 goto errout;
2148 }
2149 bzero(me, sizeof *me);
2150 me->me_krp = krp;
2151 me->me_q.q_type = UBS_CTXOP_MODEXP;
2152
2153 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]);
2154 if (nbits <= 512)
2155 normbits = 512;
2156 else if (nbits <= 768)
2157 normbits = 768;
2158 else if (nbits <= 1024)
2159 normbits = 1024;
2160 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536)
2161 normbits = 1536;
2162 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048)
2163 normbits = 2048;
2164 else {
2165 err = E2BIG;
2166 goto errout;
2167 }
2168
2169 shiftbits = normbits - nbits;
2170
2171 me->me_modbits = nbits;
2172 me->me_shiftbits = shiftbits;
2173 me->me_normbits = normbits;
2174
2175 /* Sanity check: result bits must be >= true modulus bits. */
2176 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) {
2177 err = ERANGE;
2178 goto errout;
2179 }
2180
2181 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
2182 &me->me_q.q_mcr, 0)) {
2183 err = ENOMEM;
2184 goto errout;
2185 }
2186 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr;
2187
2188 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp),
2189 &me->me_q.q_ctx, 0)) {
2190 err = ENOMEM;
2191 goto errout;
2192 }
2193
2194 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]);
2195 if (mbits > nbits) {
2196 err = E2BIG;
2197 goto errout;
2198 }
2199 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) {
2200 err = ENOMEM;
2201 goto errout;
2202 }
2203 ubsec_kshift_r(shiftbits,
2204 krp->krp_param[UBS_MODEXP_PAR_M].crp_p, mbits,
2205 me->me_M.dma_vaddr, normbits);
2206
2207 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) {
2208 err = ENOMEM;
2209 goto errout;
2210 }
2211 bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2212
2213 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]);
2214 if (ebits > nbits) {
2215 err = E2BIG;
2216 goto errout;
2217 }
2218 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) {
2219 err = ENOMEM;
2220 goto errout;
2221 }
2222 ubsec_kshift_r(shiftbits,
2223 krp->krp_param[UBS_MODEXP_PAR_E].crp_p, ebits,
2224 me->me_E.dma_vaddr, normbits);
2225
2226 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf),
2227 &me->me_epb, 0)) {
2228 err = ENOMEM;
2229 goto errout;
2230 }
2231 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr;
2232 epb->pb_addr = htole32(me->me_E.dma_paddr);
2233 epb->pb_next = 0;
2234 epb->pb_len = htole32(normbits / 8);
2235
2236 #ifdef UBSEC_DEBUG
2237 if (ubsec_debug) {
2238 printf("Epb ");
2239 ubsec_dump_pb(epb);
2240 }
2241 #endif
2242
2243 mcr->mcr_pkts = htole16(1);
2244 mcr->mcr_flags = 0;
2245 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr);
2246 mcr->mcr_reserved = 0;
2247 mcr->mcr_pktlen = 0;
2248
2249 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr);
2250 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8);
2251 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr);
2252
2253 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr);
2254 mcr->mcr_opktbuf.pb_next = 0;
2255 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8);
2256
2257 #ifdef DIAGNOSTIC
2258 /* Misaligned output buffer will hang the chip. */
2259 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0)
2260 panic("%s: modexp invalid addr 0x%x\n",
2261 device_get_nameunit(sc->sc_dev),
2262 letoh32(mcr->mcr_opktbuf.pb_addr));
2263 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0)
2264 panic("%s: modexp invalid len 0x%x\n",
2265 device_get_nameunit(sc->sc_dev),
2266 letoh32(mcr->mcr_opktbuf.pb_len));
2267 #endif
2268
2269 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr;
2270 bzero(ctx, sizeof(*ctx));
2271 ubsec_kshift_r(shiftbits,
2272 krp->krp_param[UBS_MODEXP_PAR_N].crp_p, nbits,
2273 ctx->me_N, normbits);
2274 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t)));
2275 ctx->me_op = htole16(UBS_CTXOP_MODEXP);
2276 ctx->me_E_len = htole16(nbits);
2277 ctx->me_N_len = htole16(nbits);
2278
2279 #ifdef UBSEC_DEBUG
2280 if (ubsec_debug) {
2281 ubsec_dump_mcr(mcr);
2282 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx);
2283 }
2284 #endif
2285
2286 /*
2287 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2288 * everything else.
2289 */
2290 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE);
2291 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE);
2292 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD);
2293 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE);
2294
2295 /* Enqueue and we're done... */
2296 mtx_lock(&sc->sc_mcr2lock);
2297 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next);
2298 ubsec_feed2(sc);
2299 ubsecstats.hst_modexp++;
2300 mtx_unlock(&sc->sc_mcr2lock);
2301
2302 return (0);
2303
2304 errout:
2305 if (me != NULL) {
2306 if (me->me_q.q_mcr.dma_tag != NULL)
2307 ubsec_dma_free(sc, &me->me_q.q_mcr);
2308 if (me->me_q.q_ctx.dma_tag != NULL) {
2309 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size);
2310 ubsec_dma_free(sc, &me->me_q.q_ctx);
2311 }
2312 if (me->me_M.dma_tag != NULL) {
2313 bzero(me->me_M.dma_vaddr, me->me_M.dma_size);
2314 ubsec_dma_free(sc, &me->me_M);
2315 }
2316 if (me->me_E.dma_tag != NULL) {
2317 bzero(me->me_E.dma_vaddr, me->me_E.dma_size);
2318 ubsec_dma_free(sc, &me->me_E);
2319 }
2320 if (me->me_C.dma_tag != NULL) {
2321 bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2322 ubsec_dma_free(sc, &me->me_C);
2323 }
2324 if (me->me_epb.dma_tag != NULL)
2325 ubsec_dma_free(sc, &me->me_epb);
2326 free(me, M_DEVBUF);
2327 }
2328 krp->krp_status = err;
2329 crypto_kdone(krp);
2330 return (0);
2331 }
2332
2333 /*
2334 * Start computation of cr[C] = (cr[M] ^ cr[E]) mod cr[N] (hw normalization)
2335 */
2336 static int
2337 ubsec_kprocess_modexp_hw(struct ubsec_softc *sc, struct cryptkop *krp, int hint)
2338 {
2339 struct ubsec_q2_modexp *me;
2340 struct ubsec_mcr *mcr;
2341 struct ubsec_ctx_modexp *ctx;
2342 struct ubsec_pktbuf *epb;
2343 int err = 0;
2344 u_int nbits, normbits, mbits, shiftbits, ebits;
2345
2346 me = (struct ubsec_q2_modexp *)malloc(sizeof *me, M_DEVBUF, M_NOWAIT);
2347 if (me == NULL) {
2348 err = ENOMEM;
2349 goto errout;
2350 }
2351 bzero(me, sizeof *me);
2352 me->me_krp = krp;
2353 me->me_q.q_type = UBS_CTXOP_MODEXP;
2354
2355 nbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_N]);
2356 if (nbits <= 512)
2357 normbits = 512;
2358 else if (nbits <= 768)
2359 normbits = 768;
2360 else if (nbits <= 1024)
2361 normbits = 1024;
2362 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 1536)
2363 normbits = 1536;
2364 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && nbits <= 2048)
2365 normbits = 2048;
2366 else {
2367 err = E2BIG;
2368 goto errout;
2369 }
2370
2371 shiftbits = normbits - nbits;
2372
2373 /* XXX ??? */
2374 me->me_modbits = nbits;
2375 me->me_shiftbits = shiftbits;
2376 me->me_normbits = normbits;
2377
2378 /* Sanity check: result bits must be >= true modulus bits. */
2379 if (krp->krp_param[krp->krp_iparams].crp_nbits < nbits) {
2380 err = ERANGE;
2381 goto errout;
2382 }
2383
2384 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
2385 &me->me_q.q_mcr, 0)) {
2386 err = ENOMEM;
2387 goto errout;
2388 }
2389 mcr = (struct ubsec_mcr *)me->me_q.q_mcr.dma_vaddr;
2390
2391 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_modexp),
2392 &me->me_q.q_ctx, 0)) {
2393 err = ENOMEM;
2394 goto errout;
2395 }
2396
2397 mbits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_M]);
2398 if (mbits > nbits) {
2399 err = E2BIG;
2400 goto errout;
2401 }
2402 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_M, 0)) {
2403 err = ENOMEM;
2404 goto errout;
2405 }
2406 bzero(me->me_M.dma_vaddr, normbits / 8);
2407 bcopy(krp->krp_param[UBS_MODEXP_PAR_M].crp_p,
2408 me->me_M.dma_vaddr, (mbits + 7) / 8);
2409
2410 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_C, 0)) {
2411 err = ENOMEM;
2412 goto errout;
2413 }
2414 bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2415
2416 ebits = ubsec_ksigbits(&krp->krp_param[UBS_MODEXP_PAR_E]);
2417 if (ebits > nbits) {
2418 err = E2BIG;
2419 goto errout;
2420 }
2421 if (ubsec_dma_malloc(sc, normbits / 8, &me->me_E, 0)) {
2422 err = ENOMEM;
2423 goto errout;
2424 }
2425 bzero(me->me_E.dma_vaddr, normbits / 8);
2426 bcopy(krp->krp_param[UBS_MODEXP_PAR_E].crp_p,
2427 me->me_E.dma_vaddr, (ebits + 7) / 8);
2428
2429 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_pktbuf),
2430 &me->me_epb, 0)) {
2431 err = ENOMEM;
2432 goto errout;
2433 }
2434 epb = (struct ubsec_pktbuf *)me->me_epb.dma_vaddr;
2435 epb->pb_addr = htole32(me->me_E.dma_paddr);
2436 epb->pb_next = 0;
2437 epb->pb_len = htole32((ebits + 7) / 8);
2438
2439 #ifdef UBSEC_DEBUG
2440 if (ubsec_debug) {
2441 printf("Epb ");
2442 ubsec_dump_pb(epb);
2443 }
2444 #endif
2445
2446 mcr->mcr_pkts = htole16(1);
2447 mcr->mcr_flags = 0;
2448 mcr->mcr_cmdctxp = htole32(me->me_q.q_ctx.dma_paddr);
2449 mcr->mcr_reserved = 0;
2450 mcr->mcr_pktlen = 0;
2451
2452 mcr->mcr_ipktbuf.pb_addr = htole32(me->me_M.dma_paddr);
2453 mcr->mcr_ipktbuf.pb_len = htole32(normbits / 8);
2454 mcr->mcr_ipktbuf.pb_next = htole32(me->me_epb.dma_paddr);
2455
2456 mcr->mcr_opktbuf.pb_addr = htole32(me->me_C.dma_paddr);
2457 mcr->mcr_opktbuf.pb_next = 0;
2458 mcr->mcr_opktbuf.pb_len = htole32(normbits / 8);
2459
2460 #ifdef DIAGNOSTIC
2461 /* Misaligned output buffer will hang the chip. */
2462 if ((letoh32(mcr->mcr_opktbuf.pb_addr) & 3) != 0)
2463 panic("%s: modexp invalid addr 0x%x\n",
2464 device_get_nameunit(sc->sc_dev),
2465 letoh32(mcr->mcr_opktbuf.pb_addr));
2466 if ((letoh32(mcr->mcr_opktbuf.pb_len) & 3) != 0)
2467 panic("%s: modexp invalid len 0x%x\n",
2468 device_get_nameunit(sc->sc_dev),
2469 letoh32(mcr->mcr_opktbuf.pb_len));
2470 #endif
2471
2472 ctx = (struct ubsec_ctx_modexp *)me->me_q.q_ctx.dma_vaddr;
2473 bzero(ctx, sizeof(*ctx));
2474 bcopy(krp->krp_param[UBS_MODEXP_PAR_N].crp_p, ctx->me_N,
2475 (nbits + 7) / 8);
2476 ctx->me_len = htole16((normbits / 8) + (4 * sizeof(u_int16_t)));
2477 ctx->me_op = htole16(UBS_CTXOP_MODEXP);
2478 ctx->me_E_len = htole16(ebits);
2479 ctx->me_N_len = htole16(nbits);
2480
2481 #ifdef UBSEC_DEBUG
2482 if (ubsec_debug) {
2483 ubsec_dump_mcr(mcr);
2484 ubsec_dump_ctx2((struct ubsec_ctx_keyop *)ctx);
2485 }
2486 #endif
2487
2488 /*
2489 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2490 * everything else.
2491 */
2492 ubsec_dma_sync(&me->me_M, BUS_DMASYNC_PREWRITE);
2493 ubsec_dma_sync(&me->me_E, BUS_DMASYNC_PREWRITE);
2494 ubsec_dma_sync(&me->me_C, BUS_DMASYNC_PREREAD);
2495 ubsec_dma_sync(&me->me_epb, BUS_DMASYNC_PREWRITE);
2496
2497 /* Enqueue and we're done... */
2498 mtx_lock(&sc->sc_mcr2lock);
2499 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &me->me_q, q_next);
2500 ubsec_feed2(sc);
2501 mtx_unlock(&sc->sc_mcr2lock);
2502
2503 return (0);
2504
2505 errout:
2506 if (me != NULL) {
2507 if (me->me_q.q_mcr.dma_tag != NULL)
2508 ubsec_dma_free(sc, &me->me_q.q_mcr);
2509 if (me->me_q.q_ctx.dma_tag != NULL) {
2510 bzero(me->me_q.q_ctx.dma_vaddr, me->me_q.q_ctx.dma_size);
2511 ubsec_dma_free(sc, &me->me_q.q_ctx);
2512 }
2513 if (me->me_M.dma_tag != NULL) {
2514 bzero(me->me_M.dma_vaddr, me->me_M.dma_size);
2515 ubsec_dma_free(sc, &me->me_M);
2516 }
2517 if (me->me_E.dma_tag != NULL) {
2518 bzero(me->me_E.dma_vaddr, me->me_E.dma_size);
2519 ubsec_dma_free(sc, &me->me_E);
2520 }
2521 if (me->me_C.dma_tag != NULL) {
2522 bzero(me->me_C.dma_vaddr, me->me_C.dma_size);
2523 ubsec_dma_free(sc, &me->me_C);
2524 }
2525 if (me->me_epb.dma_tag != NULL)
2526 ubsec_dma_free(sc, &me->me_epb);
2527 free(me, M_DEVBUF);
2528 }
2529 krp->krp_status = err;
2530 crypto_kdone(krp);
2531 return (0);
2532 }
2533
2534 static int
2535 ubsec_kprocess_rsapriv(struct ubsec_softc *sc, struct cryptkop *krp, int hint)
2536 {
2537 struct ubsec_q2_rsapriv *rp = NULL;
2538 struct ubsec_mcr *mcr;
2539 struct ubsec_ctx_rsapriv *ctx;
2540 int err = 0;
2541 u_int padlen, msglen;
2542
2543 msglen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_P]);
2544 padlen = ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_Q]);
2545 if (msglen > padlen)
2546 padlen = msglen;
2547
2548 if (padlen <= 256)
2549 padlen = 256;
2550 else if (padlen <= 384)
2551 padlen = 384;
2552 else if (padlen <= 512)
2553 padlen = 512;
2554 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 768)
2555 padlen = 768;
2556 else if (sc->sc_flags & UBS_FLAGS_BIGKEY && padlen <= 1024)
2557 padlen = 1024;
2558 else {
2559 err = E2BIG;
2560 goto errout;
2561 }
2562
2563 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DP]) > padlen) {
2564 err = E2BIG;
2565 goto errout;
2566 }
2567
2568 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_DQ]) > padlen) {
2569 err = E2BIG;
2570 goto errout;
2571 }
2572
2573 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_PINV]) > padlen) {
2574 err = E2BIG;
2575 goto errout;
2576 }
2577
2578 rp = (struct ubsec_q2_rsapriv *)malloc(sizeof *rp, M_DEVBUF, M_NOWAIT);
2579 if (rp == NULL)
2580 return (ENOMEM);
2581 bzero(rp, sizeof *rp);
2582 rp->rpr_krp = krp;
2583 rp->rpr_q.q_type = UBS_CTXOP_RSAPRIV;
2584
2585 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr),
2586 &rp->rpr_q.q_mcr, 0)) {
2587 err = ENOMEM;
2588 goto errout;
2589 }
2590 mcr = (struct ubsec_mcr *)rp->rpr_q.q_mcr.dma_vaddr;
2591
2592 if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rsapriv),
2593 &rp->rpr_q.q_ctx, 0)) {
2594 err = ENOMEM;
2595 goto errout;
2596 }
2597 ctx = (struct ubsec_ctx_rsapriv *)rp->rpr_q.q_ctx.dma_vaddr;
2598 bzero(ctx, sizeof *ctx);
2599
2600 /* Copy in p */
2601 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_P].crp_p,
2602 &ctx->rpr_buf[0 * (padlen / 8)],
2603 (krp->krp_param[UBS_RSAPRIV_PAR_P].crp_nbits + 7) / 8);
2604
2605 /* Copy in q */
2606 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_p,
2607 &ctx->rpr_buf[1 * (padlen / 8)],
2608 (krp->krp_param[UBS_RSAPRIV_PAR_Q].crp_nbits + 7) / 8);
2609
2610 /* Copy in dp */
2611 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_p,
2612 &ctx->rpr_buf[2 * (padlen / 8)],
2613 (krp->krp_param[UBS_RSAPRIV_PAR_DP].crp_nbits + 7) / 8);
2614
2615 /* Copy in dq */
2616 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_p,
2617 &ctx->rpr_buf[3 * (padlen / 8)],
2618 (krp->krp_param[UBS_RSAPRIV_PAR_DQ].crp_nbits + 7) / 8);
2619
2620 /* Copy in pinv */
2621 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_p,
2622 &ctx->rpr_buf[4 * (padlen / 8)],
2623 (krp->krp_param[UBS_RSAPRIV_PAR_PINV].crp_nbits + 7) / 8);
2624
2625 msglen = padlen * 2;
2626
2627 /* Copy in input message (aligned buffer/length). */
2628 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGIN]) > msglen) {
2629 /* Is this likely? */
2630 err = E2BIG;
2631 goto errout;
2632 }
2633 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgin, 0)) {
2634 err = ENOMEM;
2635 goto errout;
2636 }
2637 bzero(rp->rpr_msgin.dma_vaddr, (msglen + 7) / 8);
2638 bcopy(krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_p,
2639 rp->rpr_msgin.dma_vaddr,
2640 (krp->krp_param[UBS_RSAPRIV_PAR_MSGIN].crp_nbits + 7) / 8);
2641
2642 /* Prepare space for output message (aligned buffer/length). */
2643 if (ubsec_ksigbits(&krp->krp_param[UBS_RSAPRIV_PAR_MSGOUT]) < msglen) {
2644 /* Is this likely? */
2645 err = E2BIG;
2646 goto errout;
2647 }
2648 if (ubsec_dma_malloc(sc, (msglen + 7) / 8, &rp->rpr_msgout, 0)) {
2649 err = ENOMEM;
2650 goto errout;
2651 }
2652 bzero(rp->rpr_msgout.dma_vaddr, (msglen + 7) / 8);
2653
2654 mcr->mcr_pkts = htole16(1);
2655 mcr->mcr_flags = 0;
2656 mcr->mcr_cmdctxp = htole32(rp->rpr_q.q_ctx.dma_paddr);
2657 mcr->mcr_ipktbuf.pb_addr = htole32(rp->rpr_msgin.dma_paddr);
2658 mcr->mcr_ipktbuf.pb_next = 0;
2659 mcr->mcr_ipktbuf.pb_len = htole32(rp->rpr_msgin.dma_size);
2660 mcr->mcr_reserved = 0;
2661 mcr->mcr_pktlen = htole16(msglen);
2662 mcr->mcr_opktbuf.pb_addr = htole32(rp->rpr_msgout.dma_paddr);
2663 mcr->mcr_opktbuf.pb_next = 0;
2664 mcr->mcr_opktbuf.pb_len = htole32(rp->rpr_msgout.dma_size);
2665
2666 #ifdef DIAGNOSTIC
2667 if (rp->rpr_msgin.dma_paddr & 3 || rp->rpr_msgin.dma_size & 3) {
2668 panic("%s: rsapriv: invalid msgin %x(0x%jx)",
2669 device_get_nameunit(sc->sc_dev),
2670 rp->rpr_msgin.dma_paddr, (uintmax_t)rp->rpr_msgin.dma_size);
2671 }
2672 if (rp->rpr_msgout.dma_paddr & 3 || rp->rpr_msgout.dma_size & 3) {
2673 panic("%s: rsapriv: invalid msgout %x(0x%jx)",
2674 device_get_nameunit(sc->sc_dev),
2675 rp->rpr_msgout.dma_paddr, (uintmax_t)rp->rpr_msgout.dma_size);
2676 }
2677 #endif
2678
2679 ctx->rpr_len = (sizeof(u_int16_t) * 4) + (5 * (padlen / 8));
2680 ctx->rpr_op = htole16(UBS_CTXOP_RSAPRIV);
2681 ctx->rpr_q_len = htole16(padlen);
2682 ctx->rpr_p_len = htole16(padlen);
2683
2684 /*
2685 * ubsec_feed2 will sync mcr and ctx, we just need to sync
2686 * everything else.
2687 */
2688 ubsec_dma_sync(&rp->rpr_msgin, BUS_DMASYNC_PREWRITE);
2689 ubsec_dma_sync(&rp->rpr_msgout, BUS_DMASYNC_PREREAD);
2690
2691 /* Enqueue and we're done... */
2692 mtx_lock(&sc->sc_mcr2lock);
2693 SIMPLEQ_INSERT_TAIL(&sc->sc_queue2, &rp->rpr_q, q_next);
2694 ubsec_feed2(sc);
2695 ubsecstats.hst_modexpcrt++;
2696 mtx_unlock(&sc->sc_mcr2lock);
2697 return (0);
2698
2699 errout:
2700 if (rp != NULL) {
2701 if (rp->rpr_q.q_mcr.dma_tag != NULL)
2702 ubsec_dma_free(sc, &rp->rpr_q.q_mcr);
2703 if (rp->rpr_msgin.dma_tag != NULL) {
2704 bzero(rp->rpr_msgin.dma_vaddr, rp->rpr_msgin.dma_size);
2705 ubsec_dma_free(sc, &rp->rpr_msgin);
2706 }
2707 if (rp->rpr_msgout.dma_tag != NULL) {
2708 bzero(rp->rpr_msgout.dma_vaddr, rp->rpr_msgout.dma_size);
2709 ubsec_dma_free(sc, &rp->rpr_msgout);
2710 }
2711 free(rp, M_DEVBUF);
2712 }
2713 krp->krp_status = err;
2714 crypto_kdone(krp);
2715 return (0);
2716 }
2717
2718 #ifdef UBSEC_DEBUG
2719 static void
2720 ubsec_dump_pb(volatile struct ubsec_pktbuf *pb)
2721 {
2722 printf("addr 0x%x (0x%x) next 0x%x\n",
2723 pb->pb_addr, pb->pb_len, pb->pb_next);
2724 }
2725
2726 static void
2727 ubsec_dump_ctx2(struct ubsec_ctx_keyop *c)
2728 {
2729 printf("CTX (0x%x):\n", c->ctx_len);
2730 switch (letoh16(c->ctx_op)) {
2731 case UBS_CTXOP_RNGBYPASS:
2732 case UBS_CTXOP_RNGSHA1:
2733 break;
2734 case UBS_CTXOP_MODEXP:
2735 {
2736 struct ubsec_ctx_modexp *cx = (void *)c;
2737 int i, len;
2738
2739 printf(" Elen %u, Nlen %u\n",
2740 letoh16(cx->me_E_len), letoh16(cx->me_N_len));
2741 len = (cx->me_N_len + 7)/8;
2742 for (i = 0; i < len; i++)
2743 printf("%s%02x", (i == 0) ? " N: " : ":", cx->me_N[i]);
2744 printf("\n");
2745 break;
2746 }
2747 default:
2748 printf("unknown context: %x\n", c->ctx_op);
2749 }
2750 printf("END CTX\n");
2751 }
2752
2753 static void
2754 ubsec_dump_mcr(struct ubsec_mcr *mcr)
2755 {
2756 volatile struct ubsec_mcr_add *ma;
2757 int i;
2758
2759 printf("MCR:\n");
2760 printf(" pkts: %u, flags 0x%x\n",
2761 letoh16(mcr->mcr_pkts), letoh16(mcr->mcr_flags));
2762 ma = (volatile struct ubsec_mcr_add *)&mcr->mcr_cmdctxp;
2763 for (i = 0; i < letoh16(mcr->mcr_pkts); i++) {
2764 printf(" %d: ctx 0x%x len 0x%x rsvd 0x%x\n", i,
2765 letoh32(ma->mcr_cmdctxp), letoh16(ma->mcr_pktlen),
2766 letoh16(ma->mcr_reserved));
2767 printf(" %d: ipkt ", i);
2768 ubsec_dump_pb(&ma->mcr_ipktbuf);
2769 printf(" %d: opkt ", i);
2770 ubsec_dump_pb(&ma->mcr_opktbuf);
2771 ma++;
2772 }
2773 printf("END MCR\n");
2774 }
2775 #endif /* UBSEC_DEBUG */
2776
2777 /*
2778 * Return the number of significant bits of a big number.
2779 */
2780 static int
2781 ubsec_ksigbits(struct crparam *cr)
2782 {
2783 u_int plen = (cr->crp_nbits + 7) / 8;
2784 int i, sig = plen * 8;
2785 u_int8_t c, *p = cr->crp_p;
2786
2787 for (i = plen - 1; i >= 0; i--) {
2788 c = p[i];
2789 if (c != 0) {
2790 while ((c & 0x80) == 0) {
2791 sig--;
2792 c <<= 1;
2793 }
2794 break;
2795 }
2796 sig -= 8;
2797 }
2798 return (sig);
2799 }
2800
2801 static void
2802 ubsec_kshift_r(
2803 u_int shiftbits,
2804 u_int8_t *src, u_int srcbits,
2805 u_int8_t *dst, u_int dstbits)
2806 {
2807 u_int slen, dlen;
2808 int i, si, di, n;
2809
2810 slen = (srcbits + 7) / 8;
2811 dlen = (dstbits + 7) / 8;
2812
2813 for (i = 0; i < slen; i++)
2814 dst[i] = src[i];
2815 for (i = 0; i < dlen - slen; i++)
2816 dst[slen + i] = 0;
2817
2818 n = shiftbits / 8;
2819 if (n != 0) {
2820 si = dlen - n - 1;
2821 di = dlen - 1;
2822 while (si >= 0)
2823 dst[di--] = dst[si--];
2824 while (di >= 0)
2825 dst[di--] = 0;
2826 }
2827
2828 n = shiftbits % 8;
2829 if (n != 0) {
2830 for (i = dlen - 1; i > 0; i--)
2831 dst[i] = (dst[i] << n) |
2832 (dst[i - 1] >> (8 - n));
2833 dst[0] = dst[0] << n;
2834 }
2835 }
2836
2837 static void
2838 ubsec_kshift_l(
2839 u_int shiftbits,
2840 u_int8_t *src, u_int srcbits,
2841 u_int8_t *dst, u_int dstbits)
2842 {
2843 int slen, dlen, i, n;
2844
2845 slen = (srcbits + 7) / 8;
2846 dlen = (dstbits + 7) / 8;
2847
2848 n = shiftbits / 8;
2849 for (i = 0; i < slen; i++)
2850 dst[i] = src[i + n];
2851 for (i = 0; i < dlen - slen; i++)
2852 dst[slen + i] = 0;
2853
2854 n = shiftbits % 8;
2855 if (n != 0) {
2856 for (i = 0; i < (dlen - 1); i++)
2857 dst[i] = (dst[i] >> n) | (dst[i + 1] << (8 - n));
2858 dst[dlen - 1] = dst[dlen - 1] >> n;
2859 }
2860 }
Cache object: 97f37e1e7a45cee66257f7e33ed18ea9
|