1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 2020, 2021 Rubicon Communications, LLC (Netgate)
5 * Copyright (c) 2021 The FreeBSD Foundation
6 *
7 * Portions of this software were developed by Ararat River
8 * Consulting, LLC under sponsorship of the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/counter.h>
37 #include <sys/endian.h>
38 #include <sys/kernel.h>
39 #include <sys/lock.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
43 #include <sys/rman.h>
44 #include <sys/smp.h>
45 #include <sys/sglist.h>
46 #include <sys/sysctl.h>
47
48 #include <machine/atomic.h>
49 #include <machine/bus.h>
50
51 #include <crypto/rijndael/rijndael.h>
52 #include <opencrypto/cryptodev.h>
53 #include <opencrypto/xform.h>
54
55 #include <dev/ofw/ofw_bus.h>
56 #include <dev/ofw/ofw_bus_subr.h>
57
58 #include "cryptodev_if.h"
59
60 #include "safexcel_reg.h"
61 #include "safexcel_var.h"
62
63 /*
64 * We only support the EIP97 for now.
65 */
66 static struct ofw_compat_data safexcel_compat[] = {
67 { "inside-secure,safexcel-eip97ies", (uintptr_t)97 },
68 { "inside-secure,safexcel-eip97", (uintptr_t)97 },
69 { NULL, 0 }
70 };
71
72 const struct safexcel_reg_offsets eip97_regs_offset = {
73 .hia_aic = SAFEXCEL_EIP97_HIA_AIC_BASE,
74 .hia_aic_g = SAFEXCEL_EIP97_HIA_AIC_G_BASE,
75 .hia_aic_r = SAFEXCEL_EIP97_HIA_AIC_R_BASE,
76 .hia_aic_xdr = SAFEXCEL_EIP97_HIA_AIC_xDR_BASE,
77 .hia_dfe = SAFEXCEL_EIP97_HIA_DFE_BASE,
78 .hia_dfe_thr = SAFEXCEL_EIP97_HIA_DFE_THR_BASE,
79 .hia_dse = SAFEXCEL_EIP97_HIA_DSE_BASE,
80 .hia_dse_thr = SAFEXCEL_EIP97_HIA_DSE_THR_BASE,
81 .hia_gen_cfg = SAFEXCEL_EIP97_HIA_GEN_CFG_BASE,
82 .pe = SAFEXCEL_EIP97_PE_BASE,
83 };
84
85 const struct safexcel_reg_offsets eip197_regs_offset = {
86 .hia_aic = SAFEXCEL_EIP197_HIA_AIC_BASE,
87 .hia_aic_g = SAFEXCEL_EIP197_HIA_AIC_G_BASE,
88 .hia_aic_r = SAFEXCEL_EIP197_HIA_AIC_R_BASE,
89 .hia_aic_xdr = SAFEXCEL_EIP197_HIA_AIC_xDR_BASE,
90 .hia_dfe = SAFEXCEL_EIP197_HIA_DFE_BASE,
91 .hia_dfe_thr = SAFEXCEL_EIP197_HIA_DFE_THR_BASE,
92 .hia_dse = SAFEXCEL_EIP197_HIA_DSE_BASE,
93 .hia_dse_thr = SAFEXCEL_EIP197_HIA_DSE_THR_BASE,
94 .hia_gen_cfg = SAFEXCEL_EIP197_HIA_GEN_CFG_BASE,
95 .pe = SAFEXCEL_EIP197_PE_BASE,
96 };
97
98 static struct safexcel_request *
99 safexcel_next_request(struct safexcel_ring *ring)
100 {
101 int i;
102
103 i = ring->cdr.read;
104 KASSERT(i >= 0 && i < SAFEXCEL_RING_SIZE,
105 ("%s: out of bounds request index %d", __func__, i));
106 return (&ring->requests[i]);
107 }
108
109 static struct safexcel_cmd_descr *
110 safexcel_cmd_descr_next(struct safexcel_cmd_descr_ring *ring)
111 {
112 struct safexcel_cmd_descr *cdesc;
113
114 if (ring->write == ring->read)
115 return (NULL);
116 cdesc = &ring->desc[ring->read];
117 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
118 return (cdesc);
119 }
120
121 static struct safexcel_res_descr *
122 safexcel_res_descr_next(struct safexcel_res_descr_ring *ring)
123 {
124 struct safexcel_res_descr *rdesc;
125
126 if (ring->write == ring->read)
127 return (NULL);
128 rdesc = &ring->desc[ring->read];
129 ring->read = (ring->read + 1) % SAFEXCEL_RING_SIZE;
130 return (rdesc);
131 }
132
133 static struct safexcel_request *
134 safexcel_alloc_request(struct safexcel_softc *sc, struct safexcel_ring *ring)
135 {
136 int i;
137
138 mtx_assert(&ring->mtx, MA_OWNED);
139
140 i = ring->cdr.write;
141 if ((i + 1) % SAFEXCEL_RING_SIZE == ring->cdr.read)
142 return (NULL);
143 return (&ring->requests[i]);
144 }
145
146 static void
147 safexcel_free_request(struct safexcel_ring *ring, struct safexcel_request *req)
148 {
149 struct safexcel_context_record *ctx;
150
151 mtx_assert(&ring->mtx, MA_OWNED);
152
153 if (req->dmap_loaded) {
154 bus_dmamap_unload(ring->data_dtag, req->dmap);
155 req->dmap_loaded = false;
156 }
157 ctx = (struct safexcel_context_record *)req->ctx.vaddr;
158 explicit_bzero(ctx->data, sizeof(ctx->data));
159 explicit_bzero(req->iv, sizeof(req->iv));
160 }
161
162 static void
163 safexcel_rdr_intr(struct safexcel_softc *sc, int ringidx)
164 {
165 TAILQ_HEAD(, cryptop) cq;
166 struct cryptop *crp, *tmp;
167 struct safexcel_cmd_descr *cdesc __diagused;
168 struct safexcel_res_descr *rdesc;
169 struct safexcel_request *req;
170 struct safexcel_ring *ring;
171 uint32_t blocked, error, i, nrdescs, nreqs;
172
173 blocked = 0;
174 ring = &sc->sc_ring[ringidx];
175
176 nreqs = SAFEXCEL_READ(sc,
177 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT);
178 nreqs >>= SAFEXCEL_xDR_PROC_xD_PKT_OFFSET;
179 nreqs &= SAFEXCEL_xDR_PROC_xD_PKT_MASK;
180 if (nreqs == 0) {
181 SAFEXCEL_DPRINTF(sc, 1,
182 "zero pending requests on ring %d\n", ringidx);
183 mtx_lock(&ring->mtx);
184 goto out;
185 }
186
187 TAILQ_INIT(&cq);
188
189 ring = &sc->sc_ring[ringidx];
190 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
191 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
192 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
193 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
194 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
195 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
196
197 nrdescs = 0;
198 for (i = 0; i < nreqs; i++) {
199 req = safexcel_next_request(ring);
200
201 bus_dmamap_sync(req->ctx.tag, req->ctx.map,
202 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
203 bus_dmamap_sync(ring->data_dtag, req->dmap,
204 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
205
206 while (req->cdescs-- > 0) {
207 cdesc = safexcel_cmd_descr_next(&ring->cdr);
208 KASSERT(cdesc != NULL,
209 ("%s: missing control descriptor", __func__));
210 if (req->cdescs == 0)
211 KASSERT(cdesc->last_seg,
212 ("%s: chain is not terminated", __func__));
213 }
214 nrdescs += req->rdescs;
215 while (req->rdescs-- > 0) {
216 rdesc = safexcel_res_descr_next(&ring->rdr);
217 error = rdesc->result_data.error_code;
218 if (error != 0) {
219 if (error == SAFEXCEL_RESULT_ERR_AUTH_FAILED &&
220 req->crp->crp_etype == 0) {
221 req->crp->crp_etype = EBADMSG;
222 } else {
223 SAFEXCEL_DPRINTF(sc, 1,
224 "error code %#x\n", error);
225 req->crp->crp_etype = EIO;
226 }
227 }
228 }
229
230 TAILQ_INSERT_TAIL(&cq, req->crp, crp_next);
231 }
232
233 mtx_lock(&ring->mtx);
234 if (nreqs != 0) {
235 KASSERT(ring->queued >= nreqs,
236 ("%s: request count underflow, %d queued %d completed",
237 __func__, ring->queued, nreqs));
238 ring->queued -= nreqs;
239
240 SAFEXCEL_WRITE(sc,
241 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PROC_COUNT,
242 SAFEXCEL_xDR_PROC_xD_PKT(nreqs) |
243 (sc->sc_config.rd_offset * nrdescs * sizeof(uint32_t)));
244 blocked = ring->blocked;
245 ring->blocked = 0;
246 }
247 out:
248 if (ring->queued != 0) {
249 SAFEXCEL_WRITE(sc,
250 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
251 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | imin(ring->queued, 16));
252 }
253 mtx_unlock(&ring->mtx);
254
255 if (blocked)
256 crypto_unblock(sc->sc_cid, blocked);
257
258 TAILQ_FOREACH_SAFE(crp, &cq, crp_next, tmp)
259 crypto_done(crp);
260 }
261
262 static void
263 safexcel_ring_intr(void *arg)
264 {
265 struct safexcel_softc *sc;
266 struct safexcel_intr_handle *ih;
267 uint32_t status, stat;
268 int ring;
269 bool rdrpending;
270
271 ih = arg;
272 sc = ih->sc;
273 ring = ih->ring;
274
275 status = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
276 SAFEXCEL_HIA_AIC_R_ENABLED_STAT(ring));
277 /* CDR interrupts */
278 if (status & SAFEXCEL_CDR_IRQ(ring)) {
279 stat = SAFEXCEL_READ(sc,
280 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
281 SAFEXCEL_WRITE(sc,
282 SAFEXCEL_HIA_CDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
283 stat & SAFEXCEL_CDR_INTR_MASK);
284 }
285 /* RDR interrupts */
286 rdrpending = false;
287 if (status & SAFEXCEL_RDR_IRQ(ring)) {
288 stat = SAFEXCEL_READ(sc,
289 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT);
290 if ((stat & SAFEXCEL_xDR_ERR) == 0)
291 rdrpending = true;
292 SAFEXCEL_WRITE(sc,
293 SAFEXCEL_HIA_RDR(sc, ring) + SAFEXCEL_HIA_xDR_STAT,
294 stat & SAFEXCEL_RDR_INTR_MASK);
295 }
296 SAFEXCEL_WRITE(sc,
297 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ACK(ring),
298 status);
299
300 if (rdrpending)
301 safexcel_rdr_intr(sc, ring);
302 }
303
304 static int
305 safexcel_configure(struct safexcel_softc *sc)
306 {
307 uint32_t i, mask, pemask, reg;
308
309 if (sc->sc_type == 197) {
310 sc->sc_offsets = eip197_regs_offset;
311 pemask = SAFEXCEL_N_PES_MASK;
312 } else {
313 sc->sc_offsets = eip97_regs_offset;
314 pemask = EIP97_N_PES_MASK;
315 }
316
317 /* Scan for valid ring interrupt controllers. */
318 for (i = 0; i < SAFEXCEL_MAX_RING_AIC; i++) {
319 reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_R(sc) +
320 SAFEXCEL_HIA_AIC_R_VERSION(i));
321 if (SAFEXCEL_REG_LO16(reg) != EIP201_VERSION_LE)
322 break;
323 }
324 sc->sc_config.aic_rings = i;
325 if (sc->sc_config.aic_rings == 0)
326 return (-1);
327
328 reg = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_OPTIONS);
329 /* Check for 64bit addressing. */
330 if ((reg & SAFEXCEL_OPT_ADDR_64) == 0)
331 return (-1);
332 /* Check alignment constraints (which we do not support). */
333 if (((reg & SAFEXCEL_OPT_TGT_ALIGN_MASK) >>
334 SAFEXCEL_OPT_TGT_ALIGN_OFFSET) != 0)
335 return (-1);
336
337 sc->sc_config.hdw =
338 (reg & SAFEXCEL_xDR_HDW_MASK) >> SAFEXCEL_xDR_HDW_OFFSET;
339 mask = (1 << sc->sc_config.hdw) - 1;
340
341 sc->sc_config.rings = reg & SAFEXCEL_N_RINGS_MASK;
342 /* Limit the number of rings to the number of the AIC Rings. */
343 sc->sc_config.rings = MIN(sc->sc_config.rings, sc->sc_config.aic_rings);
344
345 sc->sc_config.pes = (reg & pemask) >> SAFEXCEL_N_PES_OFFSET;
346
347 sc->sc_config.cd_size =
348 sizeof(struct safexcel_cmd_descr) / sizeof(uint32_t);
349 sc->sc_config.cd_offset = (sc->sc_config.cd_size + mask) & ~mask;
350
351 sc->sc_config.rd_size =
352 sizeof(struct safexcel_res_descr) / sizeof(uint32_t);
353 sc->sc_config.rd_offset = (sc->sc_config.rd_size + mask) & ~mask;
354
355 sc->sc_config.atok_offset =
356 (SAFEXCEL_MAX_ATOKENS * sizeof(struct safexcel_instr) + mask) &
357 ~mask;
358
359 return (0);
360 }
361
362 static void
363 safexcel_init_hia_bus_access(struct safexcel_softc *sc)
364 {
365 uint32_t version, val;
366
367 /* Determine endianness and configure byte swap. */
368 version = SAFEXCEL_READ(sc,
369 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_VERSION);
370 val = SAFEXCEL_READ(sc, SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
371 if (SAFEXCEL_REG_HI16(version) == SAFEXCEL_HIA_VERSION_BE) {
372 val = SAFEXCEL_READ(sc,
373 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL);
374 val = val ^ (SAFEXCEL_MST_CTRL_NO_BYTE_SWAP >> 24);
375 SAFEXCEL_WRITE(sc,
376 SAFEXCEL_HIA_AIC(sc) + SAFEXCEL_HIA_MST_CTRL,
377 val);
378 }
379
380 /* Configure wr/rd cache values. */
381 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_GEN_CFG(sc) + SAFEXCEL_HIA_MST_CTRL,
382 SAFEXCEL_MST_CTRL_RD_CACHE(RD_CACHE_4BITS) |
383 SAFEXCEL_MST_CTRL_WD_CACHE(WR_CACHE_4BITS));
384 }
385
386 static void
387 safexcel_disable_global_interrupts(struct safexcel_softc *sc)
388 {
389 /* Disable and clear pending interrupts. */
390 SAFEXCEL_WRITE(sc,
391 SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ENABLE_CTRL, 0);
392 SAFEXCEL_WRITE(sc,
393 SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
394 SAFEXCEL_AIC_G_ACK_ALL_MASK);
395 }
396
397 /*
398 * Configure the data fetch engine. This component parses command descriptors
399 * and sets up DMA transfers from host memory to the corresponding processing
400 * engine.
401 */
402 static void
403 safexcel_configure_dfe_engine(struct safexcel_softc *sc, int pe)
404 {
405 /* Reset all DFE threads. */
406 SAFEXCEL_WRITE(sc,
407 SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
408 SAFEXCEL_DxE_THR_CTRL_RESET_PE);
409
410 /* Deassert the DFE reset. */
411 SAFEXCEL_WRITE(sc,
412 SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe), 0);
413
414 /* DMA transfer size to use. */
415 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE(sc) + SAFEXCEL_HIA_DFE_CFG(pe),
416 SAFEXCEL_HIA_DFE_CFG_DIS_DEBUG |
417 SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(6) |
418 SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(9) |
419 SAFEXCEL_HIA_DxE_CFG_MIN_CTRL_SIZE(6) |
420 SAFEXCEL_HIA_DxE_CFG_MAX_CTRL_SIZE(7) |
421 SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(RD_CACHE_3BITS) |
422 SAFEXCEL_HIA_DxE_CFG_CTRL_CACHE_CTRL(RD_CACHE_3BITS));
423
424 /* Configure the PE DMA transfer thresholds. */
425 SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_DBUF_THRES(pe),
426 SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
427 SAFEXCEL_PE_IN_xBUF_THRES_MAX(9));
428 SAFEXCEL_WRITE(sc, SAFEXCEL_PE(sc) + SAFEXCEL_PE_IN_TBUF_THRES(pe),
429 SAFEXCEL_PE_IN_xBUF_THRES_MIN(6) |
430 SAFEXCEL_PE_IN_xBUF_THRES_MAX(7));
431 }
432
433 /*
434 * Configure the data store engine. This component parses result descriptors
435 * and sets up DMA transfers from the processing engine to host memory.
436 */
437 static int
438 safexcel_configure_dse(struct safexcel_softc *sc, int pe)
439 {
440 uint32_t val;
441 int count;
442
443 /* Disable and reset all DSE threads. */
444 SAFEXCEL_WRITE(sc,
445 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
446 SAFEXCEL_DxE_THR_CTRL_RESET_PE);
447
448 /* Wait for a second for threads to go idle. */
449 for (count = 0;;) {
450 val = SAFEXCEL_READ(sc,
451 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_STAT(pe));
452 if ((val & SAFEXCEL_DSE_THR_RDR_ID_MASK) ==
453 SAFEXCEL_DSE_THR_RDR_ID_MASK)
454 break;
455 if (count++ > 10000) {
456 device_printf(sc->sc_dev, "DSE reset timeout\n");
457 return (-1);
458 }
459 DELAY(100);
460 }
461
462 /* Exit the reset state. */
463 SAFEXCEL_WRITE(sc,
464 SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe), 0);
465
466 /* DMA transfer size to use */
467 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE(sc) + SAFEXCEL_HIA_DSE_CFG(pe),
468 SAFEXCEL_HIA_DSE_CFG_DIS_DEBUG |
469 SAFEXCEL_HIA_DxE_CFG_MIN_DATA_SIZE(7) |
470 SAFEXCEL_HIA_DxE_CFG_MAX_DATA_SIZE(8) |
471 SAFEXCEL_HIA_DxE_CFG_DATA_CACHE_CTRL(WR_CACHE_3BITS) |
472 SAFEXCEL_HIA_DSE_CFG_ALLWAYS_BUFFERABLE);
473
474 /* Configure the procesing engine thresholds */
475 SAFEXCEL_WRITE(sc,
476 SAFEXCEL_PE(sc) + SAFEXCEL_PE_OUT_DBUF_THRES(pe),
477 SAFEXCEL_PE_OUT_DBUF_THRES_MIN(7) |
478 SAFEXCEL_PE_OUT_DBUF_THRES_MAX(8));
479
480 return (0);
481 }
482
483 static void
484 safexcel_hw_prepare_rings(struct safexcel_softc *sc)
485 {
486 int i;
487
488 for (i = 0; i < sc->sc_config.rings; i++) {
489 /*
490 * Command descriptors.
491 */
492
493 /* Clear interrupts for this ring. */
494 SAFEXCEL_WRITE(sc,
495 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
496 SAFEXCEL_HIA_AIC_R_ENABLE_CLR_ALL_MASK);
497
498 /* Disable external triggering. */
499 SAFEXCEL_WRITE(sc,
500 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
501
502 /* Clear the pending prepared counter. */
503 SAFEXCEL_WRITE(sc,
504 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
505 SAFEXCEL_xDR_PREP_CLR_COUNT);
506
507 /* Clear the pending processed counter. */
508 SAFEXCEL_WRITE(sc,
509 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
510 SAFEXCEL_xDR_PROC_CLR_COUNT);
511
512 SAFEXCEL_WRITE(sc,
513 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
514 SAFEXCEL_WRITE(sc,
515 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
516
517 SAFEXCEL_WRITE(sc,
518 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
519 SAFEXCEL_RING_SIZE * sc->sc_config.cd_offset *
520 sizeof(uint32_t));
521
522 /*
523 * Result descriptors.
524 */
525
526 /* Disable external triggering. */
527 SAFEXCEL_WRITE(sc,
528 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG, 0);
529
530 /* Clear the pending prepared counter. */
531 SAFEXCEL_WRITE(sc,
532 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
533 SAFEXCEL_xDR_PREP_CLR_COUNT);
534
535 /* Clear the pending processed counter. */
536 SAFEXCEL_WRITE(sc,
537 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
538 SAFEXCEL_xDR_PROC_CLR_COUNT);
539
540 SAFEXCEL_WRITE(sc,
541 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
542 SAFEXCEL_WRITE(sc,
543 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
544
545 /* Ring size. */
546 SAFEXCEL_WRITE(sc,
547 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE,
548 SAFEXCEL_RING_SIZE * sc->sc_config.rd_offset *
549 sizeof(uint32_t));
550 }
551 }
552
553 static void
554 safexcel_hw_setup_rings(struct safexcel_softc *sc)
555 {
556 struct safexcel_ring *ring;
557 uint32_t cd_size_rnd, mask, rd_size_rnd, val;
558 int i;
559
560 mask = (1 << sc->sc_config.hdw) - 1;
561 cd_size_rnd = (sc->sc_config.cd_size + mask) >> sc->sc_config.hdw;
562 val = (sizeof(struct safexcel_res_descr) -
563 sizeof(struct safexcel_res_data)) / sizeof(uint32_t);
564 rd_size_rnd = (val + mask) >> sc->sc_config.hdw;
565
566 for (i = 0; i < sc->sc_config.rings; i++) {
567 ring = &sc->sc_ring[i];
568
569 /*
570 * Command descriptors.
571 */
572
573 /* Ring base address. */
574 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
575 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
576 SAFEXCEL_ADDR_LO(ring->cdr.dma.paddr));
577 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
578 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
579 SAFEXCEL_ADDR_HI(ring->cdr.dma.paddr));
580
581 SAFEXCEL_WRITE(sc,
582 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
583 SAFEXCEL_xDR_DESC_MODE_64BIT | SAFEXCEL_CDR_DESC_MODE_ADCP |
584 (sc->sc_config.cd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
585 sc->sc_config.cd_size);
586
587 SAFEXCEL_WRITE(sc,
588 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
589 ((SAFEXCEL_FETCH_COUNT * (cd_size_rnd << sc->sc_config.hdw)) <<
590 SAFEXCEL_xDR_xD_FETCH_THRESH) |
591 (SAFEXCEL_FETCH_COUNT * sc->sc_config.cd_offset));
592
593 /* Configure DMA tx control. */
594 SAFEXCEL_WRITE(sc,
595 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
596 SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
597 SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS));
598
599 /* Clear any pending interrupt. */
600 SAFEXCEL_WRITE(sc,
601 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
602 SAFEXCEL_CDR_INTR_MASK);
603
604 /*
605 * Result descriptors.
606 */
607
608 /* Ring base address. */
609 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
610 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO,
611 SAFEXCEL_ADDR_LO(ring->rdr.dma.paddr));
612 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
613 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI,
614 SAFEXCEL_ADDR_HI(ring->rdr.dma.paddr));
615
616 SAFEXCEL_WRITE(sc,
617 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DESC_SIZE,
618 SAFEXCEL_xDR_DESC_MODE_64BIT |
619 (sc->sc_config.rd_offset << SAFEXCEL_xDR_DESC_xD_OFFSET) |
620 sc->sc_config.rd_size);
621
622 SAFEXCEL_WRITE(sc,
623 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_CFG,
624 ((SAFEXCEL_FETCH_COUNT * (rd_size_rnd << sc->sc_config.hdw)) <<
625 SAFEXCEL_xDR_xD_FETCH_THRESH) |
626 (SAFEXCEL_FETCH_COUNT * sc->sc_config.rd_offset));
627
628 /* Configure DMA tx control. */
629 SAFEXCEL_WRITE(sc,
630 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_DMA_CFG,
631 SAFEXCEL_HIA_xDR_CFG_WR_CACHE(WR_CACHE_3BITS) |
632 SAFEXCEL_HIA_xDR_CFG_RD_CACHE(RD_CACHE_3BITS) |
633 SAFEXCEL_HIA_xDR_WR_RES_BUF | SAFEXCEL_HIA_xDR_WR_CTRL_BUF);
634
635 /* Clear any pending interrupt. */
636 SAFEXCEL_WRITE(sc,
637 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
638 SAFEXCEL_RDR_INTR_MASK);
639
640 /* Enable ring interrupt. */
641 SAFEXCEL_WRITE(sc,
642 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CTRL(i),
643 SAFEXCEL_RDR_IRQ(i));
644 }
645 }
646
647 /* Reset the command and result descriptor rings. */
648 static void
649 safexcel_hw_reset_rings(struct safexcel_softc *sc)
650 {
651 int i;
652
653 for (i = 0; i < sc->sc_config.rings; i++) {
654 /*
655 * Result descriptor ring operations.
656 */
657
658 /* Reset ring base address. */
659 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
660 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
661 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_RDR(sc, i) +
662 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
663
664 /* Clear the pending prepared counter. */
665 SAFEXCEL_WRITE(sc,
666 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
667 SAFEXCEL_xDR_PREP_CLR_COUNT);
668
669 /* Clear the pending processed counter. */
670 SAFEXCEL_WRITE(sc,
671 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
672 SAFEXCEL_xDR_PROC_CLR_COUNT);
673
674 SAFEXCEL_WRITE(sc,
675 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
676 SAFEXCEL_WRITE(sc,
677 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
678
679 SAFEXCEL_WRITE(sc,
680 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
681
682 /* Clear any pending interrupt. */
683 SAFEXCEL_WRITE(sc,
684 SAFEXCEL_HIA_RDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
685 SAFEXCEL_RDR_INTR_MASK);
686
687 /* Disable ring interrupt. */
688 SAFEXCEL_WRITE(sc,
689 SAFEXCEL_HIA_AIC_R(sc) + SAFEXCEL_HIA_AIC_R_ENABLE_CLR(i),
690 SAFEXCEL_RDR_IRQ(i));
691
692 /*
693 * Command descriptor ring operations.
694 */
695
696 /* Reset ring base address. */
697 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
698 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_LO, 0);
699 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_CDR(sc, i) +
700 SAFEXCEL_HIA_xDR_RING_BASE_ADDR_HI, 0);
701
702 /* Clear the pending prepared counter. */
703 SAFEXCEL_WRITE(sc,
704 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_COUNT,
705 SAFEXCEL_xDR_PREP_CLR_COUNT);
706
707 /* Clear the pending processed counter. */
708 SAFEXCEL_WRITE(sc,
709 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_COUNT,
710 SAFEXCEL_xDR_PROC_CLR_COUNT);
711
712 SAFEXCEL_WRITE(sc,
713 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PREP_PNTR, 0);
714 SAFEXCEL_WRITE(sc,
715 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_PROC_PNTR, 0);
716
717 SAFEXCEL_WRITE(sc,
718 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_RING_SIZE, 0);
719
720 /* Clear any pending interrupt. */
721 SAFEXCEL_WRITE(sc,
722 SAFEXCEL_HIA_CDR(sc, i) + SAFEXCEL_HIA_xDR_STAT,
723 SAFEXCEL_CDR_INTR_MASK);
724 }
725 }
726
727 static void
728 safexcel_enable_pe_engine(struct safexcel_softc *sc, int pe)
729 {
730 int i, ring_mask;
731
732 for (ring_mask = 0, i = 0; i < sc->sc_config.rings; i++) {
733 ring_mask <<= 1;
734 ring_mask |= 1;
735 }
736
737 /* Enable command descriptor rings. */
738 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DFE_THR(sc) + SAFEXCEL_HIA_DFE_THR_CTRL(pe),
739 SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
740
741 /* Enable result descriptor rings. */
742 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_DSE_THR(sc) + SAFEXCEL_HIA_DSE_THR_CTRL(pe),
743 SAFEXCEL_DxE_THR_CTRL_EN | ring_mask);
744
745 /* Clear any HIA interrupt. */
746 SAFEXCEL_WRITE(sc, SAFEXCEL_HIA_AIC_G(sc) + SAFEXCEL_HIA_AIC_G_ACK,
747 SAFEXCEL_AIC_G_ACK_HIA_MASK);
748 }
749
750 static void
751 safexcel_execute(struct safexcel_softc *sc, struct safexcel_ring *ring,
752 struct safexcel_request *req, int hint)
753 {
754 int ringidx, ncdesc, nrdesc;
755 bool busy;
756
757 mtx_assert(&ring->mtx, MA_OWNED);
758
759 if ((hint & CRYPTO_HINT_MORE) != 0) {
760 ring->pending++;
761 ring->pending_cdesc += req->cdescs;
762 ring->pending_rdesc += req->rdescs;
763 return;
764 }
765
766 ringidx = req->ringidx;
767
768 busy = ring->queued != 0;
769 ncdesc = ring->pending_cdesc + req->cdescs;
770 nrdesc = ring->pending_rdesc + req->rdescs;
771 ring->queued += ring->pending + 1;
772
773 if (!busy) {
774 SAFEXCEL_WRITE(sc,
775 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_THRESH,
776 SAFEXCEL_HIA_CDR_THRESH_PKT_MODE | ring->queued);
777 }
778 SAFEXCEL_WRITE(sc,
779 SAFEXCEL_HIA_RDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
780 nrdesc * sc->sc_config.rd_offset * sizeof(uint32_t));
781 SAFEXCEL_WRITE(sc,
782 SAFEXCEL_HIA_CDR(sc, ringidx) + SAFEXCEL_HIA_xDR_PREP_COUNT,
783 ncdesc * sc->sc_config.cd_offset * sizeof(uint32_t));
784
785 ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0;
786 }
787
788 static void
789 safexcel_init_rings(struct safexcel_softc *sc)
790 {
791 struct safexcel_cmd_descr *cdesc;
792 struct safexcel_ring *ring;
793 uint64_t atok;
794 int i, j;
795
796 for (i = 0; i < sc->sc_config.rings; i++) {
797 ring = &sc->sc_ring[i];
798
799 snprintf(ring->lockname, sizeof(ring->lockname),
800 "safexcel_ring%d", i);
801 mtx_init(&ring->mtx, ring->lockname, NULL, MTX_DEF);
802
803 ring->pending = ring->pending_cdesc = ring->pending_rdesc = 0;
804 ring->queued = 0;
805 ring->cdr.read = ring->cdr.write = 0;
806 ring->rdr.read = ring->rdr.write = 0;
807 for (j = 0; j < SAFEXCEL_RING_SIZE; j++) {
808 cdesc = &ring->cdr.desc[j];
809 atok = ring->dma_atok.paddr +
810 sc->sc_config.atok_offset * j;
811 cdesc->atok_lo = SAFEXCEL_ADDR_LO(atok);
812 cdesc->atok_hi = SAFEXCEL_ADDR_HI(atok);
813 }
814 }
815 }
816
817 static void
818 safexcel_dma_alloc_mem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
819 int error)
820 {
821 struct safexcel_dma_mem *sdm;
822
823 if (error != 0)
824 return;
825
826 KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
827 sdm = arg;
828 sdm->paddr = segs->ds_addr;
829 }
830
831 static int
832 safexcel_dma_alloc_mem(struct safexcel_softc *sc, struct safexcel_dma_mem *sdm,
833 bus_size_t size)
834 {
835 int error;
836
837 KASSERT(sdm->vaddr == NULL,
838 ("%s: DMA memory descriptor in use.", __func__));
839
840 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev), /* parent */
841 PAGE_SIZE, 0, /* alignment, boundary */
842 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
843 BUS_SPACE_MAXADDR, /* highaddr */
844 NULL, NULL, /* filtfunc, filtfuncarg */
845 size, 1, /* maxsize, nsegments */
846 size, BUS_DMA_COHERENT, /* maxsegsz, flags */
847 NULL, NULL, /* lockfunc, lockfuncarg */
848 &sdm->tag); /* dmat */
849 if (error != 0) {
850 device_printf(sc->sc_dev,
851 "failed to allocate busdma tag, error %d\n", error);
852 goto err1;
853 }
854
855 error = bus_dmamem_alloc(sdm->tag, (void **)&sdm->vaddr,
856 BUS_DMA_WAITOK | BUS_DMA_ZERO | BUS_DMA_COHERENT, &sdm->map);
857 if (error != 0) {
858 device_printf(sc->sc_dev,
859 "failed to allocate DMA safe memory, error %d\n", error);
860 goto err2;
861 }
862
863 error = bus_dmamap_load(sdm->tag, sdm->map, sdm->vaddr, size,
864 safexcel_dma_alloc_mem_cb, sdm, BUS_DMA_NOWAIT);
865 if (error != 0) {
866 device_printf(sc->sc_dev,
867 "cannot get address of the DMA memory, error %d\n", error);
868 goto err3;
869 }
870
871 return (0);
872 err3:
873 bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
874 err2:
875 bus_dma_tag_destroy(sdm->tag);
876 err1:
877 sdm->vaddr = NULL;
878
879 return (error);
880 }
881
882 static void
883 safexcel_dma_free_mem(struct safexcel_dma_mem *sdm)
884 {
885 bus_dmamap_unload(sdm->tag, sdm->map);
886 bus_dmamem_free(sdm->tag, sdm->vaddr, sdm->map);
887 bus_dma_tag_destroy(sdm->tag);
888 }
889
890 static void
891 safexcel_dma_free_rings(struct safexcel_softc *sc)
892 {
893 struct safexcel_ring *ring;
894 int i;
895
896 for (i = 0; i < sc->sc_config.rings; i++) {
897 ring = &sc->sc_ring[i];
898 safexcel_dma_free_mem(&ring->cdr.dma);
899 safexcel_dma_free_mem(&ring->dma_atok);
900 safexcel_dma_free_mem(&ring->rdr.dma);
901 bus_dma_tag_destroy(ring->data_dtag);
902 mtx_destroy(&ring->mtx);
903 }
904 }
905
906 static int
907 safexcel_dma_init(struct safexcel_softc *sc)
908 {
909 struct safexcel_ring *ring;
910 bus_size_t size;
911 int error, i;
912
913 for (i = 0; i < sc->sc_config.rings; i++) {
914 ring = &sc->sc_ring[i];
915
916 error = bus_dma_tag_create(
917 bus_get_dma_tag(sc->sc_dev),/* parent */
918 1, 0, /* alignment, boundary */
919 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
920 BUS_SPACE_MAXADDR, /* highaddr */
921 NULL, NULL, /* filtfunc, filtfuncarg */
922 SAFEXCEL_MAX_REQUEST_SIZE, /* maxsize */
923 SAFEXCEL_MAX_FRAGMENTS, /* nsegments */
924 SAFEXCEL_MAX_REQUEST_SIZE, /* maxsegsz */
925 BUS_DMA_COHERENT, /* flags */
926 NULL, NULL, /* lockfunc, lockfuncarg */
927 &ring->data_dtag); /* dmat */
928 if (error != 0) {
929 device_printf(sc->sc_dev,
930 "bus_dma_tag_create main failed; error %d\n", error);
931 return (error);
932 }
933
934 size = sizeof(uint32_t) * sc->sc_config.cd_offset *
935 SAFEXCEL_RING_SIZE;
936 error = safexcel_dma_alloc_mem(sc, &ring->cdr.dma, size);
937 if (error != 0) {
938 device_printf(sc->sc_dev,
939 "failed to allocate CDR DMA memory, error %d\n",
940 error);
941 goto err;
942 }
943 ring->cdr.desc =
944 (struct safexcel_cmd_descr *)ring->cdr.dma.vaddr;
945
946 /* Allocate additional CDR token memory. */
947 size = (bus_size_t)sc->sc_config.atok_offset *
948 SAFEXCEL_RING_SIZE;
949 error = safexcel_dma_alloc_mem(sc, &ring->dma_atok, size);
950 if (error != 0) {
951 device_printf(sc->sc_dev,
952 "failed to allocate atoken DMA memory, error %d\n",
953 error);
954 goto err;
955 }
956
957 size = sizeof(uint32_t) * sc->sc_config.rd_offset *
958 SAFEXCEL_RING_SIZE;
959 error = safexcel_dma_alloc_mem(sc, &ring->rdr.dma, size);
960 if (error) {
961 device_printf(sc->sc_dev,
962 "failed to allocate RDR DMA memory, error %d\n",
963 error);
964 goto err;
965 }
966 ring->rdr.desc =
967 (struct safexcel_res_descr *)ring->rdr.dma.vaddr;
968 }
969
970 return (0);
971 err:
972 safexcel_dma_free_rings(sc);
973 return (error);
974 }
975
976 static void
977 safexcel_deinit_hw(struct safexcel_softc *sc)
978 {
979 safexcel_hw_reset_rings(sc);
980 safexcel_dma_free_rings(sc);
981 }
982
983 static int
984 safexcel_init_hw(struct safexcel_softc *sc)
985 {
986 int pe;
987
988 /* 23.3.7 Initialization */
989 if (safexcel_configure(sc) != 0)
990 return (EINVAL);
991
992 if (safexcel_dma_init(sc) != 0)
993 return (ENOMEM);
994
995 safexcel_init_rings(sc);
996
997 safexcel_init_hia_bus_access(sc);
998
999 /* 23.3.7.2 Disable EIP-97 global Interrupts */
1000 safexcel_disable_global_interrupts(sc);
1001
1002 for (pe = 0; pe < sc->sc_config.pes; pe++) {
1003 /* 23.3.7.3 Configure Data Fetch Engine */
1004 safexcel_configure_dfe_engine(sc, pe);
1005
1006 /* 23.3.7.4 Configure Data Store Engine */
1007 if (safexcel_configure_dse(sc, pe)) {
1008 safexcel_deinit_hw(sc);
1009 return (-1);
1010 }
1011
1012 /* 23.3.7.5 1. Protocol enables */
1013 SAFEXCEL_WRITE(sc,
1014 SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION_EN(pe),
1015 0xffffffff);
1016 SAFEXCEL_WRITE(sc,
1017 SAFEXCEL_PE(sc) + SAFEXCEL_PE_EIP96_FUNCTION2_EN(pe),
1018 0xffffffff);
1019 }
1020
1021 safexcel_hw_prepare_rings(sc);
1022
1023 /* 23.3.7.5 Configure the Processing Engine(s). */
1024 for (pe = 0; pe < sc->sc_config.pes; pe++)
1025 safexcel_enable_pe_engine(sc, pe);
1026
1027 safexcel_hw_setup_rings(sc);
1028
1029 return (0);
1030 }
1031
1032 static int
1033 safexcel_setup_dev_interrupts(struct safexcel_softc *sc)
1034 {
1035 int error, i, j;
1036
1037 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++) {
1038 sc->sc_ih[i].sc = sc;
1039 sc->sc_ih[i].ring = i;
1040
1041 if (bus_setup_intr(sc->sc_dev, sc->sc_intr[i],
1042 INTR_TYPE_NET | INTR_MPSAFE, NULL, safexcel_ring_intr,
1043 &sc->sc_ih[i], &sc->sc_ih[i].handle)) {
1044 device_printf(sc->sc_dev,
1045 "couldn't setup interrupt %d\n", i);
1046 goto err;
1047 }
1048
1049 error = bus_bind_intr(sc->sc_dev, sc->sc_intr[i], i % mp_ncpus);
1050 if (error != 0)
1051 device_printf(sc->sc_dev,
1052 "failed to bind ring %d\n", error);
1053 }
1054
1055 return (0);
1056
1057 err:
1058 for (j = 0; j < i; j++)
1059 bus_teardown_intr(sc->sc_dev, sc->sc_intr[j],
1060 sc->sc_ih[j].handle);
1061
1062 return (ENXIO);
1063 }
1064
1065 static void
1066 safexcel_teardown_dev_interrupts(struct safexcel_softc *sc)
1067 {
1068 int i;
1069
1070 for (i = 0; i < SAFEXCEL_MAX_RINGS; i++)
1071 bus_teardown_intr(sc->sc_dev, sc->sc_intr[i],
1072 sc->sc_ih[i].handle);
1073 }
1074
1075 static int
1076 safexcel_alloc_dev_resources(struct safexcel_softc *sc)
1077 {
1078 char name[16];
1079 device_t dev;
1080 phandle_t node;
1081 int error, i, rid;
1082
1083 dev = sc->sc_dev;
1084 node = ofw_bus_get_node(dev);
1085
1086 rid = 0;
1087 sc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
1088 RF_ACTIVE);
1089 if (sc->sc_res == NULL) {
1090 device_printf(dev, "couldn't allocate memory resources\n");
1091 return (ENXIO);
1092 }
1093
1094 for (i = 0; i < SAFEXCEL_MAX_RINGS; i++) {
1095 (void)snprintf(name, sizeof(name), "ring%d", i);
1096 error = ofw_bus_find_string_index(node, "interrupt-names", name,
1097 &rid);
1098 if (error != 0)
1099 break;
1100
1101 sc->sc_intr[i] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1102 RF_ACTIVE | RF_SHAREABLE);
1103 if (sc->sc_intr[i] == NULL) {
1104 error = ENXIO;
1105 goto out;
1106 }
1107 }
1108 if (i == 0) {
1109 device_printf(dev, "couldn't allocate interrupt resources\n");
1110 error = ENXIO;
1111 goto out;
1112 }
1113
1114 return (0);
1115
1116 out:
1117 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1118 bus_release_resource(dev, SYS_RES_IRQ,
1119 rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1120 bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_res),
1121 sc->sc_res);
1122 return (error);
1123 }
1124
1125 static void
1126 safexcel_free_dev_resources(struct safexcel_softc *sc)
1127 {
1128 int i;
1129
1130 for (i = 0; i < SAFEXCEL_MAX_RINGS && sc->sc_intr[i] != NULL; i++)
1131 bus_release_resource(sc->sc_dev, SYS_RES_IRQ,
1132 rman_get_rid(sc->sc_intr[i]), sc->sc_intr[i]);
1133 if (sc->sc_res != NULL)
1134 bus_release_resource(sc->sc_dev, SYS_RES_MEMORY,
1135 rman_get_rid(sc->sc_res), sc->sc_res);
1136 }
1137
1138 static int
1139 safexcel_probe(device_t dev)
1140 {
1141 struct safexcel_softc *sc;
1142
1143 if (!ofw_bus_status_okay(dev))
1144 return (ENXIO);
1145
1146 sc = device_get_softc(dev);
1147 sc->sc_type = ofw_bus_search_compatible(dev, safexcel_compat)->ocd_data;
1148 if (sc->sc_type == 0)
1149 return (ENXIO);
1150
1151 device_set_desc(dev, "SafeXcel EIP-97 crypto accelerator");
1152
1153 return (BUS_PROBE_DEFAULT);
1154 }
1155
1156 static int
1157 safexcel_attach(device_t dev)
1158 {
1159 struct sysctl_ctx_list *ctx;
1160 struct sysctl_oid *oid;
1161 struct sysctl_oid_list *children;
1162 struct safexcel_softc *sc;
1163 struct safexcel_request *req;
1164 struct safexcel_ring *ring;
1165 int i, j, ringidx;
1166
1167 sc = device_get_softc(dev);
1168 sc->sc_dev = dev;
1169 sc->sc_cid = -1;
1170
1171 if (safexcel_alloc_dev_resources(sc))
1172 goto err;
1173
1174 if (safexcel_setup_dev_interrupts(sc))
1175 goto err1;
1176
1177 if (safexcel_init_hw(sc))
1178 goto err2;
1179
1180 for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1181 ring = &sc->sc_ring[ringidx];
1182
1183 ring->cmd_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1184 ring->res_data = sglist_alloc(SAFEXCEL_MAX_FRAGMENTS, M_WAITOK);
1185
1186 for (i = 0; i < SAFEXCEL_RING_SIZE; i++) {
1187 req = &ring->requests[i];
1188 req->sc = sc;
1189 req->ringidx = ringidx;
1190 if (bus_dmamap_create(ring->data_dtag,
1191 BUS_DMA_COHERENT, &req->dmap) != 0) {
1192 for (j = 0; j < i; j++)
1193 bus_dmamap_destroy(ring->data_dtag,
1194 ring->requests[j].dmap);
1195 goto err2;
1196 }
1197 if (safexcel_dma_alloc_mem(sc, &req->ctx,
1198 sizeof(struct safexcel_context_record)) != 0) {
1199 for (j = 0; j < i; j++) {
1200 bus_dmamap_destroy(ring->data_dtag,
1201 ring->requests[j].dmap);
1202 safexcel_dma_free_mem(
1203 &ring->requests[j].ctx);
1204 }
1205 goto err2;
1206 }
1207 }
1208 }
1209
1210 ctx = device_get_sysctl_ctx(dev);
1211 SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
1212 OID_AUTO, "debug", CTLFLAG_RWTUN, &sc->sc_debug, 0,
1213 "Debug message verbosity");
1214
1215 oid = device_get_sysctl_tree(sc->sc_dev);
1216 children = SYSCTL_CHILDREN(oid);
1217 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1218 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1219 children = SYSCTL_CHILDREN(oid);
1220
1221 sc->sc_req_alloc_failures = counter_u64_alloc(M_WAITOK);
1222 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "req_alloc_failures",
1223 CTLFLAG_RD, &sc->sc_req_alloc_failures,
1224 "Number of request allocation failures");
1225 sc->sc_cdesc_alloc_failures = counter_u64_alloc(M_WAITOK);
1226 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cdesc_alloc_failures",
1227 CTLFLAG_RD, &sc->sc_cdesc_alloc_failures,
1228 "Number of command descriptor ring overflows");
1229 sc->sc_rdesc_alloc_failures = counter_u64_alloc(M_WAITOK);
1230 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "rdesc_alloc_failures",
1231 CTLFLAG_RD, &sc->sc_rdesc_alloc_failures,
1232 "Number of result descriptor ring overflows");
1233
1234 sc->sc_cid = crypto_get_driverid(dev, sizeof(struct safexcel_session),
1235 CRYPTOCAP_F_HARDWARE);
1236 if (sc->sc_cid < 0)
1237 goto err2;
1238
1239 return (0);
1240
1241 err2:
1242 safexcel_teardown_dev_interrupts(sc);
1243 err1:
1244 safexcel_free_dev_resources(sc);
1245 err:
1246 return (ENXIO);
1247 }
1248
1249 static int
1250 safexcel_detach(device_t dev)
1251 {
1252 struct safexcel_ring *ring;
1253 struct safexcel_softc *sc;
1254 int i, ringidx;
1255
1256 sc = device_get_softc(dev);
1257
1258 if (sc->sc_cid >= 0)
1259 crypto_unregister_all(sc->sc_cid);
1260
1261 counter_u64_free(sc->sc_req_alloc_failures);
1262 counter_u64_free(sc->sc_cdesc_alloc_failures);
1263 counter_u64_free(sc->sc_rdesc_alloc_failures);
1264
1265 for (ringidx = 0; ringidx < sc->sc_config.rings; ringidx++) {
1266 ring = &sc->sc_ring[ringidx];
1267 for (i = 0; i < SAFEXCEL_RING_SIZE; i++) {
1268 bus_dmamap_destroy(ring->data_dtag,
1269 ring->requests[i].dmap);
1270 safexcel_dma_free_mem(&ring->requests[i].ctx);
1271 }
1272 sglist_free(ring->cmd_data);
1273 sglist_free(ring->res_data);
1274 }
1275 safexcel_deinit_hw(sc);
1276 safexcel_teardown_dev_interrupts(sc);
1277 safexcel_free_dev_resources(sc);
1278
1279 return (0);
1280 }
1281
1282 /*
1283 * Pre-compute the hash key used in GHASH, which is a block of zeroes encrypted
1284 * using the cipher key.
1285 */
1286 static void
1287 safexcel_setkey_ghash(const uint8_t *key, int klen, uint32_t *hashkey)
1288 {
1289 uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
1290 uint8_t zeros[AES_BLOCK_LEN];
1291 int i, rounds;
1292
1293 memset(zeros, 0, sizeof(zeros));
1294
1295 rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
1296 rijndaelEncrypt(ks, rounds, zeros, (uint8_t *)hashkey);
1297 for (i = 0; i < GMAC_BLOCK_LEN / sizeof(uint32_t); i++)
1298 hashkey[i] = htobe32(hashkey[i]);
1299
1300 explicit_bzero(ks, sizeof(ks));
1301 }
1302
1303 /*
1304 * Pre-compute the combined CBC-MAC key, which consists of three keys K1, K2, K3
1305 * in the hardware implementation. K1 is the cipher key and comes last in the
1306 * buffer since K2 and K3 have a fixed size of AES_BLOCK_LEN. For now XCBC-MAC
1307 * is not implemented so K2 and K3 are fixed.
1308 */
1309 static void
1310 safexcel_setkey_xcbcmac(const uint8_t *key, int klen, uint32_t *hashkey)
1311 {
1312 int i, off;
1313
1314 memset(hashkey, 0, 2 * AES_BLOCK_LEN);
1315 off = 2 * AES_BLOCK_LEN / sizeof(uint32_t);
1316 for (i = 0; i < klen / sizeof(uint32_t); i++, key += 4)
1317 hashkey[i + off] = htobe32(le32dec(key));
1318 }
1319
1320 static void
1321 safexcel_setkey_hmac_digest(const struct auth_hash *ahash, union authctx *ctx,
1322 char *buf)
1323 {
1324 int hashwords, i;
1325
1326 switch (ahash->type) {
1327 case CRYPTO_SHA1_HMAC:
1328 hashwords = ahash->hashsize / sizeof(uint32_t);
1329 for (i = 0; i < hashwords; i++)
1330 ((uint32_t *)buf)[i] = htobe32(ctx->sha1ctx.h.b32[i]);
1331 break;
1332 case CRYPTO_SHA2_224_HMAC:
1333 hashwords = auth_hash_hmac_sha2_256.hashsize / sizeof(uint32_t);
1334 for (i = 0; i < hashwords; i++)
1335 ((uint32_t *)buf)[i] = htobe32(ctx->sha224ctx.state[i]);
1336 break;
1337 case CRYPTO_SHA2_256_HMAC:
1338 hashwords = ahash->hashsize / sizeof(uint32_t);
1339 for (i = 0; i < hashwords; i++)
1340 ((uint32_t *)buf)[i] = htobe32(ctx->sha256ctx.state[i]);
1341 break;
1342 case CRYPTO_SHA2_384_HMAC:
1343 hashwords = auth_hash_hmac_sha2_512.hashsize / sizeof(uint64_t);
1344 for (i = 0; i < hashwords; i++)
1345 ((uint64_t *)buf)[i] = htobe64(ctx->sha384ctx.state[i]);
1346 break;
1347 case CRYPTO_SHA2_512_HMAC:
1348 hashwords = ahash->hashsize / sizeof(uint64_t);
1349 for (i = 0; i < hashwords; i++)
1350 ((uint64_t *)buf)[i] = htobe64(ctx->sha512ctx.state[i]);
1351 break;
1352 }
1353 }
1354
1355 /*
1356 * Pre-compute the inner and outer digests used in the HMAC algorithm.
1357 */
1358 static void
1359 safexcel_setkey_hmac(const struct crypto_session_params *csp,
1360 const uint8_t *key, int klen, uint8_t *ipad, uint8_t *opad)
1361 {
1362 union authctx ctx;
1363 const struct auth_hash *ahash;
1364
1365 ahash = crypto_auth_hash(csp);
1366 hmac_init_ipad(ahash, key, klen, &ctx);
1367 safexcel_setkey_hmac_digest(ahash, &ctx, ipad);
1368 hmac_init_opad(ahash, key, klen, &ctx);
1369 safexcel_setkey_hmac_digest(ahash, &ctx, opad);
1370 explicit_bzero(&ctx, ahash->ctxsize);
1371 }
1372
1373 static void
1374 safexcel_setkey_xts(const uint8_t *key, int klen, uint8_t *tweakkey)
1375 {
1376 memcpy(tweakkey, key + klen, klen);
1377 }
1378
1379 /*
1380 * Populate a context record with parameters from a session. Some consumers
1381 * specify per-request keys, in which case the context must be re-initialized
1382 * for each request.
1383 */
1384 static int
1385 safexcel_set_context(struct safexcel_context_record *ctx, int op,
1386 const uint8_t *ckey, const uint8_t *akey, struct safexcel_session *sess)
1387 {
1388 const struct crypto_session_params *csp;
1389 uint8_t *data;
1390 uint32_t ctrl0, ctrl1;
1391 int aklen, alg, cklen, off;
1392
1393 csp = crypto_get_params(sess->cses);
1394 aklen = csp->csp_auth_klen;
1395 cklen = csp->csp_cipher_klen;
1396 if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
1397 cklen /= 2;
1398
1399 ctrl0 = sess->alg | sess->digest | sess->hash;
1400 ctrl1 = sess->mode;
1401
1402 data = (uint8_t *)ctx->data;
1403 if (csp->csp_cipher_alg != 0) {
1404 memcpy(data, ckey, cklen);
1405 off = cklen;
1406 } else if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC) {
1407 memcpy(data, akey, aklen);
1408 off = aklen;
1409 } else {
1410 off = 0;
1411 }
1412
1413 switch (csp->csp_cipher_alg) {
1414 case CRYPTO_AES_NIST_GCM_16:
1415 safexcel_setkey_ghash(ckey, cklen, (uint32_t *)(data + off));
1416 off += GMAC_BLOCK_LEN;
1417 break;
1418 case CRYPTO_AES_CCM_16:
1419 safexcel_setkey_xcbcmac(ckey, cklen, (uint32_t *)(data + off));
1420 off += AES_BLOCK_LEN * 2 + cklen;
1421 break;
1422 case CRYPTO_AES_XTS:
1423 safexcel_setkey_xts(ckey, cklen, data + off);
1424 off += cklen;
1425 break;
1426 }
1427 switch (csp->csp_auth_alg) {
1428 case CRYPTO_AES_NIST_GMAC:
1429 safexcel_setkey_ghash(akey, aklen, (uint32_t *)(data + off));
1430 off += GMAC_BLOCK_LEN;
1431 break;
1432 case CRYPTO_SHA1_HMAC:
1433 case CRYPTO_SHA2_224_HMAC:
1434 case CRYPTO_SHA2_256_HMAC:
1435 case CRYPTO_SHA2_384_HMAC:
1436 case CRYPTO_SHA2_512_HMAC:
1437 safexcel_setkey_hmac(csp, akey, aklen,
1438 data + off, data + off + sess->statelen);
1439 off += sess->statelen * 2;
1440 break;
1441 }
1442 ctrl0 |= SAFEXCEL_CONTROL0_SIZE(off / sizeof(uint32_t));
1443
1444 alg = csp->csp_cipher_alg;
1445 if (alg == 0)
1446 alg = csp->csp_auth_alg;
1447
1448 switch (alg) {
1449 case CRYPTO_AES_CCM_16:
1450 if (CRYPTO_OP_IS_ENCRYPT(op)) {
1451 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_ENCRYPT_OUT |
1452 SAFEXCEL_CONTROL0_KEY_EN;
1453 } else {
1454 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_DECRYPT_HASH_IN |
1455 SAFEXCEL_CONTROL0_KEY_EN;
1456 }
1457 ctrl1 |= SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1458 SAFEXCEL_CONTROL1_IV2 | SAFEXCEL_CONTROL1_IV3;
1459 break;
1460 case CRYPTO_AES_CBC:
1461 case CRYPTO_AES_ICM:
1462 case CRYPTO_AES_XTS:
1463 if (CRYPTO_OP_IS_ENCRYPT(op)) {
1464 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1465 SAFEXCEL_CONTROL0_KEY_EN;
1466 if (csp->csp_auth_alg != 0)
1467 ctrl0 |=
1468 SAFEXCEL_CONTROL0_TYPE_ENCRYPT_HASH_OUT;
1469 } else {
1470 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1471 SAFEXCEL_CONTROL0_KEY_EN;
1472 if (csp->csp_auth_alg != 0)
1473 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1474 }
1475 break;
1476 case CRYPTO_AES_NIST_GCM_16:
1477 case CRYPTO_AES_NIST_GMAC:
1478 if (CRYPTO_OP_IS_ENCRYPT(op) || csp->csp_auth_alg != 0) {
1479 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_OUT |
1480 SAFEXCEL_CONTROL0_KEY_EN |
1481 SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1482 } else {
1483 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_CRYPTO_IN |
1484 SAFEXCEL_CONTROL0_KEY_EN |
1485 SAFEXCEL_CONTROL0_TYPE_HASH_DECRYPT_IN;
1486 }
1487 if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16) {
1488 ctrl1 |= SAFEXCEL_CONTROL1_COUNTER_MODE |
1489 SAFEXCEL_CONTROL1_IV0 | SAFEXCEL_CONTROL1_IV1 |
1490 SAFEXCEL_CONTROL1_IV2;
1491 }
1492 break;
1493 case CRYPTO_SHA1:
1494 case CRYPTO_SHA2_224:
1495 case CRYPTO_SHA2_256:
1496 case CRYPTO_SHA2_384:
1497 case CRYPTO_SHA2_512:
1498 ctrl0 |= SAFEXCEL_CONTROL0_RESTART_HASH;
1499 /* FALLTHROUGH */
1500 case CRYPTO_SHA1_HMAC:
1501 case CRYPTO_SHA2_224_HMAC:
1502 case CRYPTO_SHA2_256_HMAC:
1503 case CRYPTO_SHA2_384_HMAC:
1504 case CRYPTO_SHA2_512_HMAC:
1505 ctrl0 |= SAFEXCEL_CONTROL0_TYPE_HASH_OUT;
1506 break;
1507 }
1508
1509 ctx->control0 = ctrl0;
1510 ctx->control1 = ctrl1;
1511
1512 return (off);
1513 }
1514
1515 /*
1516 * Construct a no-op instruction, used to pad input tokens.
1517 */
1518 static void
1519 safexcel_instr_nop(struct safexcel_instr **instrp)
1520 {
1521 struct safexcel_instr *instr;
1522
1523 instr = *instrp;
1524 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1525 instr->length = (1 << 2);
1526 instr->status = 0;
1527 instr->instructions = 0;
1528
1529 *instrp = instr + 1;
1530 }
1531
1532 /*
1533 * Insert the digest of the input payload. This is typically the last
1534 * instruction of a sequence.
1535 */
1536 static void
1537 safexcel_instr_insert_digest(struct safexcel_instr **instrp, int len)
1538 {
1539 struct safexcel_instr *instr;
1540
1541 instr = *instrp;
1542 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1543 instr->length = len;
1544 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1545 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1546 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1547 SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1548
1549 *instrp = instr + 1;
1550 }
1551
1552 /*
1553 * Retrieve and verify a digest.
1554 */
1555 static void
1556 safexcel_instr_retrieve_digest(struct safexcel_instr **instrp, int len)
1557 {
1558 struct safexcel_instr *instr;
1559
1560 instr = *instrp;
1561 instr->opcode = SAFEXCEL_INSTR_OPCODE_RETRIEVE;
1562 instr->length = len;
1563 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1564 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1565 instr->instructions = SAFEXCEL_INSTR_INSERT_HASH_DIGEST;
1566 instr++;
1567
1568 instr->opcode = SAFEXCEL_INSTR_OPCODE_VERIFY_FIELDS;
1569 instr->length = len | SAFEXCEL_INSTR_VERIFY_HASH;
1570 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH |
1571 SAFEXCEL_INSTR_STATUS_LAST_PACKET;
1572 instr->instructions = SAFEXCEL_INSTR_VERIFY_PADDING;
1573
1574 *instrp = instr + 1;
1575 }
1576
1577 static void
1578 safexcel_instr_temp_aes_block(struct safexcel_instr **instrp)
1579 {
1580 struct safexcel_instr *instr;
1581
1582 instr = *instrp;
1583 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT_REMOVE_RESULT;
1584 instr->length = 0;
1585 instr->status = 0;
1586 instr->instructions = AES_BLOCK_LEN;
1587 instr++;
1588
1589 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1590 instr->length = AES_BLOCK_LEN;
1591 instr->status = 0;
1592 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1593 SAFEXCEL_INSTR_DEST_CRYPTO;
1594
1595 *instrp = instr + 1;
1596 }
1597
1598 /*
1599 * Handle a request for an unauthenticated block cipher.
1600 */
1601 static void
1602 safexcel_instr_cipher(struct safexcel_request *req,
1603 struct safexcel_instr *instr, struct safexcel_cmd_descr *cdesc)
1604 {
1605 struct cryptop *crp;
1606
1607 crp = req->crp;
1608
1609 /* Insert the payload. */
1610 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1611 instr->length = crp->crp_payload_length;
1612 instr->status = SAFEXCEL_INSTR_STATUS_LAST_PACKET |
1613 SAFEXCEL_INSTR_STATUS_LAST_HASH;
1614 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1615 SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_OUTPUT;
1616
1617 cdesc->additional_cdata_size = 1;
1618 }
1619
1620 static void
1621 safexcel_instr_eta(struct safexcel_request *req, struct safexcel_instr *instr,
1622 struct safexcel_cmd_descr *cdesc)
1623 {
1624 struct cryptop *crp;
1625 struct safexcel_instr *start;
1626
1627 crp = req->crp;
1628 start = instr;
1629
1630 /* Insert the AAD. */
1631 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1632 instr->length = crp->crp_aad_length;
1633 instr->status = crp->crp_payload_length == 0 ?
1634 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1635 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1636 SAFEXCEL_INSTR_DEST_HASH;
1637 instr++;
1638
1639 /* Encrypt any data left in the request. */
1640 if (crp->crp_payload_length > 0) {
1641 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1642 instr->length = crp->crp_payload_length;
1643 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1644 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1645 SAFEXCEL_INSTR_DEST_CRYPTO |
1646 SAFEXCEL_INSTR_DEST_HASH |
1647 SAFEXCEL_INSTR_DEST_OUTPUT;
1648 instr++;
1649 }
1650
1651 /*
1652 * Compute the digest, or extract it and place it in the output stream.
1653 */
1654 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1655 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1656 else
1657 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1658 cdesc->additional_cdata_size = instr - start;
1659 }
1660
1661 static void
1662 safexcel_instr_sha_hash(struct safexcel_request *req,
1663 struct safexcel_instr *instr)
1664 {
1665 struct cryptop *crp;
1666 struct safexcel_instr *start;
1667
1668 crp = req->crp;
1669 start = instr;
1670
1671 /* Pass the input data to the hash engine. */
1672 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1673 instr->length = crp->crp_payload_length;
1674 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1675 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1676 instr++;
1677
1678 /* Insert the hash result into the output stream. */
1679 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1680
1681 /* Pad the rest of the inline instruction space. */
1682 while (instr != start + SAFEXCEL_MAX_ITOKENS)
1683 safexcel_instr_nop(&instr);
1684 }
1685
1686 static void
1687 safexcel_instr_ccm(struct safexcel_request *req, struct safexcel_instr *instr,
1688 struct safexcel_cmd_descr *cdesc)
1689 {
1690 const struct crypto_session_params *csp;
1691 struct cryptop *crp;
1692 struct safexcel_instr *start;
1693 uint8_t *a0, *b0, *alenp, L;
1694 int aalign, blen;
1695
1696 crp = req->crp;
1697 csp = crypto_get_params(crp->crp_session);
1698 start = instr;
1699
1700 /*
1701 * Construct two blocks, A0 and B0, used in encryption and
1702 * authentication, respectively. A0 is embedded in the token
1703 * descriptor, and B0 is inserted directly into the data stream using
1704 * instructions below.
1705 *
1706 * An explicit check for overflow of the length field is not
1707 * needed since the maximum driver size of 65535 bytes fits in
1708 * the smallest length field used for a 13-byte nonce.
1709 */
1710 blen = AES_BLOCK_LEN;
1711 L = 15 - csp->csp_ivlen;
1712
1713 a0 = (uint8_t *)&cdesc->control_data.token[0];
1714 memset(a0, 0, blen);
1715 a0[0] = L - 1;
1716 memcpy(&a0[1], req->iv, csp->csp_ivlen);
1717
1718 /*
1719 * Insert B0 and the AAD length into the input stream.
1720 */
1721 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1722 instr->length = blen + (crp->crp_aad_length > 0 ? 2 : 0);
1723 instr->status = 0;
1724 instr->instructions = SAFEXCEL_INSTR_DEST_HASH |
1725 SAFEXCEL_INSTR_INSERT_IMMEDIATE;
1726 instr++;
1727
1728 b0 = (uint8_t *)instr;
1729 memset(b0, 0, blen);
1730 b0[0] =
1731 (L - 1) | /* payload length size */
1732 ((req->sess->digestlen - 2) / 2) << 3 /* digest length */ |
1733 (crp->crp_aad_length > 0 ? 1 : 0) << 6 /* AAD present bit */;
1734 memcpy(&b0[1], req->iv, csp->csp_ivlen);
1735 b0[14] = crp->crp_payload_length >> 8;
1736 b0[15] = crp->crp_payload_length & 0xff;
1737 instr += blen / sizeof(*instr);
1738
1739 /* Insert the AAD length and data into the input stream. */
1740 if (crp->crp_aad_length > 0) {
1741 alenp = (uint8_t *)instr;
1742 alenp[0] = crp->crp_aad_length >> 8;
1743 alenp[1] = crp->crp_aad_length & 0xff;
1744 alenp[2] = 0;
1745 alenp[3] = 0;
1746 instr++;
1747
1748 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1749 instr->length = crp->crp_aad_length;
1750 instr->status = 0;
1751 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1752 instr++;
1753
1754 /* Insert zero padding. */
1755 aalign = (crp->crp_aad_length + 2) & (blen - 1);
1756 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1757 instr->length = aalign == 0 ? 0 :
1758 blen - ((crp->crp_aad_length + 2) & (blen - 1));
1759 instr->status = crp->crp_payload_length == 0 ?
1760 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1761 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1762 instr++;
1763 }
1764
1765 safexcel_instr_temp_aes_block(&instr);
1766
1767 /* Insert the cipher payload into the input stream. */
1768 if (crp->crp_payload_length > 0) {
1769 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1770 instr->length = crp->crp_payload_length;
1771 instr->status = (crp->crp_payload_length & (blen - 1)) == 0 ?
1772 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1773 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1774 SAFEXCEL_INSTR_DEST_CRYPTO |
1775 SAFEXCEL_INSTR_DEST_HASH |
1776 SAFEXCEL_INSTR_INS_LAST;
1777 instr++;
1778
1779 /* Insert zero padding. */
1780 if (crp->crp_payload_length & (blen - 1)) {
1781 instr->opcode = SAFEXCEL_INSTR_OPCODE_INSERT;
1782 instr->length = blen -
1783 (crp->crp_payload_length & (blen - 1));
1784 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1785 instr->instructions = SAFEXCEL_INSTR_DEST_HASH;
1786 instr++;
1787 }
1788 }
1789
1790 /*
1791 * Compute the digest, or extract it and place it in the output stream.
1792 */
1793 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1794 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1795 else
1796 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1797
1798 cdesc->additional_cdata_size = instr - start;
1799 }
1800
1801 static void
1802 safexcel_instr_gcm(struct safexcel_request *req, struct safexcel_instr *instr,
1803 struct safexcel_cmd_descr *cdesc)
1804 {
1805 struct cryptop *crp;
1806 struct safexcel_instr *start;
1807
1808 memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1809 cdesc->control_data.token[3] = htobe32(1);
1810
1811 crp = req->crp;
1812 start = instr;
1813
1814 /* Insert the AAD into the input stream. */
1815 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1816 instr->length = crp->crp_aad_length;
1817 instr->status = crp->crp_payload_length == 0 ?
1818 SAFEXCEL_INSTR_STATUS_LAST_HASH : 0;
1819 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1820 SAFEXCEL_INSTR_DEST_HASH;
1821 instr++;
1822
1823 safexcel_instr_temp_aes_block(&instr);
1824
1825 /* Insert the cipher payload into the input stream. */
1826 if (crp->crp_payload_length > 0) {
1827 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1828 instr->length = crp->crp_payload_length;
1829 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1830 instr->instructions = SAFEXCEL_INSTR_DEST_OUTPUT |
1831 SAFEXCEL_INSTR_DEST_CRYPTO | SAFEXCEL_INSTR_DEST_HASH |
1832 SAFEXCEL_INSTR_INS_LAST;
1833 instr++;
1834 }
1835
1836 /*
1837 * Compute the digest, or extract it and place it in the output stream.
1838 */
1839 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1840 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1841 else
1842 safexcel_instr_retrieve_digest(&instr, req->sess->digestlen);
1843
1844 cdesc->additional_cdata_size = instr - start;
1845 }
1846
1847 static void
1848 safexcel_instr_gmac(struct safexcel_request *req, struct safexcel_instr *instr,
1849 struct safexcel_cmd_descr *cdesc)
1850 {
1851 struct cryptop *crp;
1852 struct safexcel_instr *start;
1853
1854 memcpy(cdesc->control_data.token, req->iv, AES_GCM_IV_LEN);
1855 cdesc->control_data.token[3] = htobe32(1);
1856
1857 crp = req->crp;
1858 start = instr;
1859
1860 instr->opcode = SAFEXCEL_INSTR_OPCODE_DIRECTION;
1861 instr->length = crp->crp_payload_length;
1862 instr->status = SAFEXCEL_INSTR_STATUS_LAST_HASH;
1863 instr->instructions = SAFEXCEL_INSTR_INS_LAST |
1864 SAFEXCEL_INSTR_DEST_HASH;
1865 instr++;
1866
1867 safexcel_instr_temp_aes_block(&instr);
1868
1869 safexcel_instr_insert_digest(&instr, req->sess->digestlen);
1870
1871 cdesc->additional_cdata_size = instr - start;
1872 }
1873
1874 static void
1875 safexcel_set_token(struct safexcel_request *req)
1876 {
1877 const struct crypto_session_params *csp;
1878 struct cryptop *crp;
1879 struct safexcel_cmd_descr *cdesc;
1880 struct safexcel_context_record *ctx;
1881 struct safexcel_context_template *ctxtmp;
1882 struct safexcel_instr *instr;
1883 struct safexcel_softc *sc;
1884 const uint8_t *akey, *ckey;
1885 int ringidx;
1886
1887 crp = req->crp;
1888 csp = crypto_get_params(crp->crp_session);
1889 cdesc = req->cdesc;
1890 sc = req->sc;
1891 ringidx = req->ringidx;
1892
1893 akey = crp->crp_auth_key;
1894 ckey = crp->crp_cipher_key;
1895 if (akey != NULL || ckey != NULL) {
1896 /*
1897 * If we have a per-request key we have to generate the context
1898 * record on the fly.
1899 */
1900 if (akey == NULL)
1901 akey = csp->csp_auth_key;
1902 if (ckey == NULL)
1903 ckey = csp->csp_cipher_key;
1904 ctx = (struct safexcel_context_record *)req->ctx.vaddr;
1905 (void)safexcel_set_context(ctx, crp->crp_op, ckey, akey,
1906 req->sess);
1907 } else {
1908 /*
1909 * Use the context record template computed at session
1910 * initialization time.
1911 */
1912 ctxtmp = CRYPTO_OP_IS_ENCRYPT(crp->crp_op) ?
1913 &req->sess->encctx : &req->sess->decctx;
1914 ctx = &ctxtmp->ctx;
1915 memcpy(req->ctx.vaddr + 2 * sizeof(uint32_t), ctx->data,
1916 ctxtmp->len);
1917 }
1918 cdesc->control_data.control0 = ctx->control0;
1919 cdesc->control_data.control1 = ctx->control1;
1920
1921 /*
1922 * For keyless hash operations, the token instructions can be embedded
1923 * in the token itself. Otherwise we use an additional token descriptor
1924 * and the embedded instruction space is used to store the IV.
1925 */
1926 if (csp->csp_cipher_alg == 0 &&
1927 csp->csp_auth_alg != CRYPTO_AES_NIST_GMAC) {
1928 instr = (void *)cdesc->control_data.token;
1929 } else {
1930 instr = (void *)(sc->sc_ring[ringidx].dma_atok.vaddr +
1931 sc->sc_config.atok_offset *
1932 (cdesc - sc->sc_ring[ringidx].cdr.desc));
1933 cdesc->control_data.options |= SAFEXCEL_OPTION_4_TOKEN_IV_CMD;
1934 }
1935
1936 switch (csp->csp_cipher_alg) {
1937 case CRYPTO_AES_NIST_GCM_16:
1938 safexcel_instr_gcm(req, instr, cdesc);
1939 break;
1940 case CRYPTO_AES_CCM_16:
1941 safexcel_instr_ccm(req, instr, cdesc);
1942 break;
1943 case CRYPTO_AES_XTS:
1944 memcpy(cdesc->control_data.token, req->iv, AES_XTS_IV_LEN);
1945 memset(cdesc->control_data.token +
1946 AES_XTS_IV_LEN / sizeof(uint32_t), 0, AES_XTS_IV_LEN);
1947
1948 safexcel_instr_cipher(req, instr, cdesc);
1949 break;
1950 case CRYPTO_AES_CBC:
1951 case CRYPTO_AES_ICM:
1952 memcpy(cdesc->control_data.token, req->iv, AES_BLOCK_LEN);
1953 if (csp->csp_auth_alg != 0)
1954 safexcel_instr_eta(req, instr, cdesc);
1955 else
1956 safexcel_instr_cipher(req, instr, cdesc);
1957 break;
1958 default:
1959 switch (csp->csp_auth_alg) {
1960 case CRYPTO_SHA1:
1961 case CRYPTO_SHA1_HMAC:
1962 case CRYPTO_SHA2_224:
1963 case CRYPTO_SHA2_224_HMAC:
1964 case CRYPTO_SHA2_256:
1965 case CRYPTO_SHA2_256_HMAC:
1966 case CRYPTO_SHA2_384:
1967 case CRYPTO_SHA2_384_HMAC:
1968 case CRYPTO_SHA2_512:
1969 case CRYPTO_SHA2_512_HMAC:
1970 safexcel_instr_sha_hash(req, instr);
1971 break;
1972 case CRYPTO_AES_NIST_GMAC:
1973 safexcel_instr_gmac(req, instr, cdesc);
1974 break;
1975 default:
1976 panic("unhandled auth request %d", csp->csp_auth_alg);
1977 }
1978 break;
1979 }
1980 }
1981
1982 static struct safexcel_res_descr *
1983 safexcel_res_descr_add(struct safexcel_ring *ring, bool first, bool last,
1984 bus_addr_t data, uint32_t len)
1985 {
1986 struct safexcel_res_descr *rdesc;
1987 struct safexcel_res_descr_ring *rring;
1988
1989 mtx_assert(&ring->mtx, MA_OWNED);
1990
1991 rring = &ring->rdr;
1992 if ((rring->write + 1) % SAFEXCEL_RING_SIZE == rring->read)
1993 return (NULL);
1994
1995 rdesc = &rring->desc[rring->write];
1996 rring->write = (rring->write + 1) % SAFEXCEL_RING_SIZE;
1997
1998 rdesc->particle_size = len;
1999 rdesc->rsvd0 = 0;
2000 rdesc->descriptor_overflow = 0;
2001 rdesc->buffer_overflow = 0;
2002 rdesc->last_seg = last;
2003 rdesc->first_seg = first;
2004 rdesc->result_size =
2005 sizeof(struct safexcel_res_data) / sizeof(uint32_t);
2006 rdesc->rsvd1 = 0;
2007 rdesc->data_lo = SAFEXCEL_ADDR_LO(data);
2008 rdesc->data_hi = SAFEXCEL_ADDR_HI(data);
2009
2010 if (first) {
2011 rdesc->result_data.packet_length = 0;
2012 rdesc->result_data.error_code = 0;
2013 }
2014
2015 return (rdesc);
2016 }
2017
2018 static struct safexcel_cmd_descr *
2019 safexcel_cmd_descr_add(struct safexcel_ring *ring, bool first, bool last,
2020 bus_addr_t data, uint32_t seglen, uint32_t reqlen, bus_addr_t context)
2021 {
2022 struct safexcel_cmd_descr *cdesc;
2023 struct safexcel_cmd_descr_ring *cring;
2024
2025 KASSERT(reqlen <= SAFEXCEL_MAX_REQUEST_SIZE,
2026 ("%s: request length %u too long", __func__, reqlen));
2027 mtx_assert(&ring->mtx, MA_OWNED);
2028
2029 cring = &ring->cdr;
2030 if ((cring->write + 1) % SAFEXCEL_RING_SIZE == cring->read)
2031 return (NULL);
2032
2033 cdesc = &cring->desc[cring->write];
2034 cring->write = (cring->write + 1) % SAFEXCEL_RING_SIZE;
2035
2036 cdesc->particle_size = seglen;
2037 cdesc->rsvd0 = 0;
2038 cdesc->last_seg = last;
2039 cdesc->first_seg = first;
2040 cdesc->additional_cdata_size = 0;
2041 cdesc->rsvd1 = 0;
2042 cdesc->data_lo = SAFEXCEL_ADDR_LO(data);
2043 cdesc->data_hi = SAFEXCEL_ADDR_HI(data);
2044 if (first) {
2045 cdesc->control_data.packet_length = reqlen;
2046 cdesc->control_data.options = SAFEXCEL_OPTION_IP |
2047 SAFEXCEL_OPTION_CP | SAFEXCEL_OPTION_CTX_CTRL_IN_CMD |
2048 SAFEXCEL_OPTION_RC_AUTO;
2049 cdesc->control_data.type = SAFEXCEL_TOKEN_TYPE_BYPASS;
2050 cdesc->control_data.context_lo = SAFEXCEL_ADDR_LO(context) |
2051 SAFEXCEL_CONTEXT_SMALL;
2052 cdesc->control_data.context_hi = SAFEXCEL_ADDR_HI(context);
2053 }
2054
2055 return (cdesc);
2056 }
2057
2058 static void
2059 safexcel_cmd_descr_rollback(struct safexcel_ring *ring, int count)
2060 {
2061 struct safexcel_cmd_descr_ring *cring;
2062
2063 mtx_assert(&ring->mtx, MA_OWNED);
2064
2065 cring = &ring->cdr;
2066 cring->write -= count;
2067 if (cring->write < 0)
2068 cring->write += SAFEXCEL_RING_SIZE;
2069 }
2070
2071 static void
2072 safexcel_res_descr_rollback(struct safexcel_ring *ring, int count)
2073 {
2074 struct safexcel_res_descr_ring *rring;
2075
2076 mtx_assert(&ring->mtx, MA_OWNED);
2077
2078 rring = &ring->rdr;
2079 rring->write -= count;
2080 if (rring->write < 0)
2081 rring->write += SAFEXCEL_RING_SIZE;
2082 }
2083
2084 static void
2085 safexcel_append_segs(bus_dma_segment_t *segs, int nseg, struct sglist *sg,
2086 int start, int len)
2087 {
2088 bus_dma_segment_t *seg;
2089 size_t seglen;
2090 int error, i;
2091
2092 for (i = 0; i < nseg && len > 0; i++) {
2093 seg = &segs[i];
2094
2095 if (seg->ds_len <= start) {
2096 start -= seg->ds_len;
2097 continue;
2098 }
2099
2100 seglen = MIN(len, seg->ds_len - start);
2101 error = sglist_append_phys(sg, seg->ds_addr + start, seglen);
2102 if (error != 0)
2103 panic("%s: ran out of segments: %d", __func__, error);
2104 len -= seglen;
2105 start = 0;
2106 }
2107 }
2108
2109 static void
2110 safexcel_create_chain_cb(void *arg, bus_dma_segment_t *segs, int nseg,
2111 int error)
2112 {
2113 const struct crypto_session_params *csp;
2114 struct cryptop *crp;
2115 struct safexcel_cmd_descr *cdesc;
2116 struct safexcel_request *req;
2117 struct safexcel_ring *ring;
2118 struct safexcel_session *sess;
2119 struct sglist *sg;
2120 size_t inlen;
2121 int i;
2122 bool first, last;
2123
2124 req = arg;
2125 if (error != 0) {
2126 req->error = error;
2127 return;
2128 }
2129
2130 crp = req->crp;
2131 csp = crypto_get_params(crp->crp_session);
2132 sess = req->sess;
2133 ring = &req->sc->sc_ring[req->ringidx];
2134
2135 mtx_assert(&ring->mtx, MA_OWNED);
2136
2137 /*
2138 * Set up descriptors for input and output data.
2139 *
2140 * The processing engine programs require that any AAD comes first,
2141 * followed by the cipher plaintext, followed by the digest. Some
2142 * consumers place the digest first in the input buffer, in which case
2143 * we have to create an extra descriptor.
2144 *
2145 * As an optimization, unmodified data is not passed to the output
2146 * stream.
2147 */
2148 sglist_reset(ring->cmd_data);
2149 sglist_reset(ring->res_data);
2150 if (crp->crp_aad_length != 0) {
2151 safexcel_append_segs(segs, nseg, ring->cmd_data,
2152 crp->crp_aad_start, crp->crp_aad_length);
2153 }
2154 safexcel_append_segs(segs, nseg, ring->cmd_data,
2155 crp->crp_payload_start, crp->crp_payload_length);
2156 if (csp->csp_cipher_alg != 0) {
2157 safexcel_append_segs(segs, nseg, ring->res_data,
2158 crp->crp_payload_start, crp->crp_payload_length);
2159 }
2160 if (sess->digestlen > 0) {
2161 if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
2162 safexcel_append_segs(segs, nseg, ring->cmd_data,
2163 crp->crp_digest_start, sess->digestlen);
2164 } else {
2165 safexcel_append_segs(segs, nseg, ring->res_data,
2166 crp->crp_digest_start, sess->digestlen);
2167 }
2168 }
2169
2170 sg = ring->cmd_data;
2171 if (sg->sg_nseg == 0) {
2172 /*
2173 * Fake a segment for the command descriptor if the input has
2174 * length zero. The EIP97 apparently does not handle
2175 * zero-length packets properly since subsequent requests return
2176 * bogus errors, so provide a dummy segment using the context
2177 * descriptor. Also, we must allocate at least one command ring
2178 * entry per request to keep the request shadow ring in sync.
2179 */
2180 (void)sglist_append_phys(sg, req->ctx.paddr, 1);
2181 }
2182 for (i = 0, inlen = 0; i < sg->sg_nseg; i++)
2183 inlen += sg->sg_segs[i].ss_len;
2184 for (i = 0; i < sg->sg_nseg; i++) {
2185 first = i == 0;
2186 last = i == sg->sg_nseg - 1;
2187
2188 cdesc = safexcel_cmd_descr_add(ring, first, last,
2189 sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len,
2190 (uint32_t)inlen, req->ctx.paddr);
2191 if (cdesc == NULL) {
2192 safexcel_cmd_descr_rollback(ring, i);
2193 counter_u64_add(req->sc->sc_cdesc_alloc_failures, 1);
2194 req->error = ERESTART;
2195 return;
2196 }
2197 if (i == 0)
2198 req->cdesc = cdesc;
2199 }
2200 req->cdescs = sg->sg_nseg;
2201
2202 sg = ring->res_data;
2203 if (sg->sg_nseg == 0) {
2204 /*
2205 * We need a result descriptor even if the output stream will be
2206 * empty, for example when verifying an AAD digest.
2207 */
2208 sg->sg_segs[0].ss_paddr = 0;
2209 sg->sg_segs[0].ss_len = 0;
2210 sg->sg_nseg = 1;
2211 }
2212 for (i = 0; i < sg->sg_nseg; i++) {
2213 first = i == 0;
2214 last = i == sg->sg_nseg - 1;
2215
2216 if (safexcel_res_descr_add(ring, first, last,
2217 sg->sg_segs[i].ss_paddr, sg->sg_segs[i].ss_len) == NULL) {
2218 safexcel_cmd_descr_rollback(ring,
2219 ring->cmd_data->sg_nseg);
2220 safexcel_res_descr_rollback(ring, i);
2221 counter_u64_add(req->sc->sc_rdesc_alloc_failures, 1);
2222 req->error = ERESTART;
2223 return;
2224 }
2225 }
2226 req->rdescs = sg->sg_nseg;
2227 }
2228
2229 static int
2230 safexcel_create_chain(struct safexcel_ring *ring, struct safexcel_request *req)
2231 {
2232 int error;
2233
2234 req->error = 0;
2235 req->cdescs = req->rdescs = 0;
2236
2237 error = bus_dmamap_load_crp(ring->data_dtag, req->dmap, req->crp,
2238 safexcel_create_chain_cb, req, BUS_DMA_NOWAIT);
2239 if (error == 0)
2240 req->dmap_loaded = true;
2241
2242 if (req->error != 0)
2243 error = req->error;
2244
2245 return (error);
2246 }
2247
2248 static bool
2249 safexcel_probe_cipher(const struct crypto_session_params *csp)
2250 {
2251 switch (csp->csp_cipher_alg) {
2252 case CRYPTO_AES_CBC:
2253 case CRYPTO_AES_ICM:
2254 if (csp->csp_ivlen != AES_BLOCK_LEN)
2255 return (false);
2256 break;
2257 case CRYPTO_AES_XTS:
2258 if (csp->csp_ivlen != AES_XTS_IV_LEN)
2259 return (false);
2260 break;
2261 default:
2262 return (false);
2263 }
2264
2265 return (true);
2266 }
2267
2268 /*
2269 * Determine whether the driver can implement a session with the requested
2270 * parameters.
2271 */
2272 static int
2273 safexcel_probesession(device_t dev, const struct crypto_session_params *csp)
2274 {
2275 if (csp->csp_flags != 0)
2276 return (EINVAL);
2277
2278 switch (csp->csp_mode) {
2279 case CSP_MODE_CIPHER:
2280 if (!safexcel_probe_cipher(csp))
2281 return (EINVAL);
2282 break;
2283 case CSP_MODE_DIGEST:
2284 switch (csp->csp_auth_alg) {
2285 case CRYPTO_AES_NIST_GMAC:
2286 if (csp->csp_ivlen != AES_GCM_IV_LEN)
2287 return (EINVAL);
2288 break;
2289 case CRYPTO_SHA1:
2290 case CRYPTO_SHA1_HMAC:
2291 case CRYPTO_SHA2_224:
2292 case CRYPTO_SHA2_224_HMAC:
2293 case CRYPTO_SHA2_256:
2294 case CRYPTO_SHA2_256_HMAC:
2295 case CRYPTO_SHA2_384:
2296 case CRYPTO_SHA2_384_HMAC:
2297 case CRYPTO_SHA2_512:
2298 case CRYPTO_SHA2_512_HMAC:
2299 break;
2300 default:
2301 return (EINVAL);
2302 }
2303 break;
2304 case CSP_MODE_AEAD:
2305 switch (csp->csp_cipher_alg) {
2306 case CRYPTO_AES_NIST_GCM_16:
2307 case CRYPTO_AES_CCM_16:
2308 break;
2309 default:
2310 return (EINVAL);
2311 }
2312 break;
2313 case CSP_MODE_ETA:
2314 if (!safexcel_probe_cipher(csp))
2315 return (EINVAL);
2316 switch (csp->csp_cipher_alg) {
2317 case CRYPTO_AES_CBC:
2318 case CRYPTO_AES_ICM:
2319 /*
2320 * The EIP-97 does not support combining AES-XTS with
2321 * hash operations.
2322 */
2323 if (csp->csp_auth_alg != CRYPTO_SHA1_HMAC &&
2324 csp->csp_auth_alg != CRYPTO_SHA2_224_HMAC &&
2325 csp->csp_auth_alg != CRYPTO_SHA2_256_HMAC &&
2326 csp->csp_auth_alg != CRYPTO_SHA2_384_HMAC &&
2327 csp->csp_auth_alg != CRYPTO_SHA2_512_HMAC)
2328 return (EINVAL);
2329 break;
2330 default:
2331 return (EINVAL);
2332 }
2333 break;
2334 default:
2335 return (EINVAL);
2336 }
2337
2338 return (CRYPTODEV_PROBE_HARDWARE);
2339 }
2340
2341 static uint32_t
2342 safexcel_aes_algid(int keylen)
2343 {
2344 switch (keylen) {
2345 case 16:
2346 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES128);
2347 case 24:
2348 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES192);
2349 case 32:
2350 return (SAFEXCEL_CONTROL0_CRYPTO_ALG_AES256);
2351 default:
2352 panic("invalid AES key length %d", keylen);
2353 }
2354 }
2355
2356 static uint32_t
2357 safexcel_aes_ccm_hashid(int keylen)
2358 {
2359 switch (keylen) {
2360 case 16:
2361 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC128);
2362 case 24:
2363 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC192);
2364 case 32:
2365 return (SAFEXCEL_CONTROL0_HASH_ALG_XCBC256);
2366 default:
2367 panic("invalid AES key length %d", keylen);
2368 }
2369 }
2370
2371 static uint32_t
2372 safexcel_sha_hashid(int alg)
2373 {
2374 switch (alg) {
2375 case CRYPTO_SHA1:
2376 case CRYPTO_SHA1_HMAC:
2377 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA1);
2378 case CRYPTO_SHA2_224:
2379 case CRYPTO_SHA2_224_HMAC:
2380 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA224);
2381 case CRYPTO_SHA2_256:
2382 case CRYPTO_SHA2_256_HMAC:
2383 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA256);
2384 case CRYPTO_SHA2_384:
2385 case CRYPTO_SHA2_384_HMAC:
2386 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA384);
2387 case CRYPTO_SHA2_512:
2388 case CRYPTO_SHA2_512_HMAC:
2389 return (SAFEXCEL_CONTROL0_HASH_ALG_SHA512);
2390 default:
2391 __assert_unreachable();
2392 }
2393 }
2394
2395 static int
2396 safexcel_sha_hashlen(int alg)
2397 {
2398 switch (alg) {
2399 case CRYPTO_SHA1:
2400 case CRYPTO_SHA1_HMAC:
2401 return (SHA1_HASH_LEN);
2402 case CRYPTO_SHA2_224:
2403 case CRYPTO_SHA2_224_HMAC:
2404 return (SHA2_224_HASH_LEN);
2405 case CRYPTO_SHA2_256:
2406 case CRYPTO_SHA2_256_HMAC:
2407 return (SHA2_256_HASH_LEN);
2408 case CRYPTO_SHA2_384:
2409 case CRYPTO_SHA2_384_HMAC:
2410 return (SHA2_384_HASH_LEN);
2411 case CRYPTO_SHA2_512:
2412 case CRYPTO_SHA2_512_HMAC:
2413 return (SHA2_512_HASH_LEN);
2414 default:
2415 __assert_unreachable();
2416 }
2417 }
2418
2419 static int
2420 safexcel_sha_statelen(int alg)
2421 {
2422 switch (alg) {
2423 case CRYPTO_SHA1:
2424 case CRYPTO_SHA1_HMAC:
2425 return (SHA1_HASH_LEN);
2426 case CRYPTO_SHA2_224:
2427 case CRYPTO_SHA2_224_HMAC:
2428 case CRYPTO_SHA2_256:
2429 case CRYPTO_SHA2_256_HMAC:
2430 return (SHA2_256_HASH_LEN);
2431 case CRYPTO_SHA2_384:
2432 case CRYPTO_SHA2_384_HMAC:
2433 case CRYPTO_SHA2_512:
2434 case CRYPTO_SHA2_512_HMAC:
2435 return (SHA2_512_HASH_LEN);
2436 default:
2437 __assert_unreachable();
2438 }
2439 }
2440
2441 static int
2442 safexcel_newsession(device_t dev, crypto_session_t cses,
2443 const struct crypto_session_params *csp)
2444 {
2445 struct safexcel_session *sess;
2446
2447 sess = crypto_get_driver_session(cses);
2448 sess->cses = cses;
2449
2450 switch (csp->csp_auth_alg) {
2451 case CRYPTO_SHA1:
2452 case CRYPTO_SHA2_224:
2453 case CRYPTO_SHA2_256:
2454 case CRYPTO_SHA2_384:
2455 case CRYPTO_SHA2_512:
2456 sess->digest = SAFEXCEL_CONTROL0_DIGEST_PRECOMPUTED;
2457 sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2458 sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2459 sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2460 break;
2461 case CRYPTO_SHA1_HMAC:
2462 case CRYPTO_SHA2_224_HMAC:
2463 case CRYPTO_SHA2_256_HMAC:
2464 case CRYPTO_SHA2_384_HMAC:
2465 case CRYPTO_SHA2_512_HMAC:
2466 sess->digest = SAFEXCEL_CONTROL0_DIGEST_HMAC;
2467 sess->hash = safexcel_sha_hashid(csp->csp_auth_alg);
2468 sess->digestlen = safexcel_sha_hashlen(csp->csp_auth_alg);
2469 sess->statelen = safexcel_sha_statelen(csp->csp_auth_alg);
2470 break;
2471 case CRYPTO_AES_NIST_GMAC:
2472 sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2473 sess->digestlen = GMAC_DIGEST_LEN;
2474 sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2475 sess->alg = safexcel_aes_algid(csp->csp_auth_klen);
2476 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2477 break;
2478 }
2479
2480 switch (csp->csp_cipher_alg) {
2481 case CRYPTO_AES_NIST_GCM_16:
2482 sess->digest = SAFEXCEL_CONTROL0_DIGEST_GMAC;
2483 sess->digestlen = GMAC_DIGEST_LEN;
2484 sess->hash = SAFEXCEL_CONTROL0_HASH_ALG_GHASH;
2485 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2486 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_GCM;
2487 break;
2488 case CRYPTO_AES_CCM_16:
2489 sess->hash = safexcel_aes_ccm_hashid(csp->csp_cipher_klen);
2490 sess->digest = SAFEXCEL_CONTROL0_DIGEST_CCM;
2491 sess->digestlen = CCM_CBC_MAX_DIGEST_LEN;
2492 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2493 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CCM;
2494 break;
2495 case CRYPTO_AES_CBC:
2496 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2497 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CBC;
2498 break;
2499 case CRYPTO_AES_ICM:
2500 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen);
2501 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_CTR;
2502 break;
2503 case CRYPTO_AES_XTS:
2504 sess->alg = safexcel_aes_algid(csp->csp_cipher_klen / 2);
2505 sess->mode = SAFEXCEL_CONTROL1_CRYPTO_MODE_XTS;
2506 break;
2507 }
2508
2509 if (csp->csp_auth_mlen != 0)
2510 sess->digestlen = csp->csp_auth_mlen;
2511
2512 sess->encctx.len = safexcel_set_context(&sess->encctx.ctx,
2513 CRYPTO_OP_ENCRYPT, csp->csp_cipher_key, csp->csp_auth_key,
2514 sess);
2515 sess->decctx.len = safexcel_set_context(&sess->decctx.ctx,
2516 CRYPTO_OP_DECRYPT, csp->csp_cipher_key, csp->csp_auth_key,
2517 sess);
2518
2519 return (0);
2520 }
2521
2522 static int
2523 safexcel_process(device_t dev, struct cryptop *crp, int hint)
2524 {
2525 struct safexcel_request *req;
2526 struct safexcel_ring *ring;
2527 struct safexcel_session *sess;
2528 struct safexcel_softc *sc;
2529 int error;
2530
2531 sc = device_get_softc(dev);
2532 sess = crypto_get_driver_session(crp->crp_session);
2533
2534 if (__predict_false(crypto_buffer_len(&crp->crp_buf) >
2535 SAFEXCEL_MAX_REQUEST_SIZE)) {
2536 crp->crp_etype = E2BIG;
2537 crypto_done(crp);
2538 return (0);
2539 }
2540
2541 ring = &sc->sc_ring[curcpu % sc->sc_config.rings];
2542 mtx_lock(&ring->mtx);
2543 req = safexcel_alloc_request(sc, ring);
2544 if (__predict_false(req == NULL)) {
2545 ring->blocked = CRYPTO_SYMQ;
2546 mtx_unlock(&ring->mtx);
2547 counter_u64_add(sc->sc_req_alloc_failures, 1);
2548 return (ERESTART);
2549 }
2550
2551 req->crp = crp;
2552 req->sess = sess;
2553
2554 crypto_read_iv(crp, req->iv);
2555
2556 error = safexcel_create_chain(ring, req);
2557 if (__predict_false(error != 0)) {
2558 safexcel_free_request(ring, req);
2559 if (error == ERESTART)
2560 ring->blocked = CRYPTO_SYMQ;
2561 mtx_unlock(&ring->mtx);
2562 if (error != ERESTART) {
2563 crp->crp_etype = error;
2564 crypto_done(crp);
2565 return (0);
2566 } else {
2567 return (ERESTART);
2568 }
2569 }
2570
2571 safexcel_set_token(req);
2572
2573 bus_dmamap_sync(ring->data_dtag, req->dmap,
2574 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2575 bus_dmamap_sync(req->ctx.tag, req->ctx.map,
2576 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2577 bus_dmamap_sync(ring->cdr.dma.tag, ring->cdr.dma.map,
2578 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2579 bus_dmamap_sync(ring->dma_atok.tag, ring->dma_atok.map,
2580 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2581 bus_dmamap_sync(ring->rdr.dma.tag, ring->rdr.dma.map,
2582 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2583
2584 safexcel_execute(sc, ring, req, hint);
2585
2586 mtx_unlock(&ring->mtx);
2587
2588 return (0);
2589 }
2590
2591 static device_method_t safexcel_methods[] = {
2592 /* Device interface */
2593 DEVMETHOD(device_probe, safexcel_probe),
2594 DEVMETHOD(device_attach, safexcel_attach),
2595 DEVMETHOD(device_detach, safexcel_detach),
2596
2597 /* Cryptodev interface */
2598 DEVMETHOD(cryptodev_probesession, safexcel_probesession),
2599 DEVMETHOD(cryptodev_newsession, safexcel_newsession),
2600 DEVMETHOD(cryptodev_process, safexcel_process),
2601
2602 DEVMETHOD_END
2603 };
2604
2605 static driver_t safexcel_driver = {
2606 .name = "safexcel",
2607 .methods = safexcel_methods,
2608 .size = sizeof(struct safexcel_softc),
2609 };
2610
2611 DRIVER_MODULE(safexcel, simplebus, safexcel_driver, 0, 0);
2612 MODULE_VERSION(safexcel, 1);
2613 MODULE_DEPEND(safexcel, crypto, 1, 1, 1);
Cache object: b88b269dbce3747fd78e7d48eca3d5d1
|