1 /* SPDX-License-Identifier: BSD-2-Clause-NetBSD AND BSD-3-Clause */
2 /* $NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $ */
3
4 /*
5 * Copyright (c) 2019 Internet Initiative Japan, Inc.
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
19 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 * POSSIBILITY OF SUCH DAMAGE.
28 */
29
30 /*
31 * Copyright(c) 2007-2019 Intel Corporation. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 *
37 * * Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * * Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in
41 * the documentation and/or other materials provided with the
42 * distribution.
43 * * Neither the name of Intel Corporation nor the names of its
44 * contributors may be used to endorse or promote products derived
45 * from this software without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
48 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
49 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
50 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
51 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
52 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
53 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
54 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
55 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
56 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
57 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
58 */
59
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
62 #if 0
63 __KERNEL_RCSID(0, "$NetBSD: qat.c,v 1.6 2020/06/14 23:23:12 riastradh Exp $");
64 #endif
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/bus.h>
69 #include <sys/cpu.h>
70 #include <sys/firmware.h>
71 #include <sys/kernel.h>
72 #include <sys/mbuf.h>
73 #include <sys/md5.h>
74 #include <sys/module.h>
75 #include <sys/mutex.h>
76 #include <sys/smp.h>
77 #include <sys/sysctl.h>
78 #include <sys/rman.h>
79
80 #include <machine/bus.h>
81
82 #include <opencrypto/cryptodev.h>
83 #include <opencrypto/xform.h>
84
85 #include "cryptodev_if.h"
86
87 #include <dev/pci/pcireg.h>
88 #include <dev/pci/pcivar.h>
89
90 #include "qatreg.h"
91 #include "qatvar.h"
92 #include "qat_aevar.h"
93
94 extern struct qat_hw qat_hw_c2xxx;
95
96 #define PCI_VENDOR_INTEL 0x8086
97 #define PCI_PRODUCT_INTEL_C2000_IQIA_PHYS 0x1f18
98
99 static const struct qat_product {
100 uint16_t qatp_vendor;
101 uint16_t qatp_product;
102 const char *qatp_name;
103 enum qat_chip_type qatp_chip;
104 const struct qat_hw *qatp_hw;
105 } qat_products[] = {
106 { PCI_VENDOR_INTEL, PCI_PRODUCT_INTEL_C2000_IQIA_PHYS,
107 "Intel C2000 QuickAssist PF",
108 QAT_CHIP_C2XXX, &qat_hw_c2xxx },
109 { 0, 0, NULL, 0, NULL },
110 };
111
112 /* Hash Algorithm specific structure */
113
114 /* SHA1 - 20 bytes - Initialiser state can be found in FIPS stds 180-2 */
115 static const uint8_t sha1_initial_state[QAT_HASH_SHA1_STATE_SIZE] = {
116 0x67, 0x45, 0x23, 0x01,
117 0xef, 0xcd, 0xab, 0x89,
118 0x98, 0xba, 0xdc, 0xfe,
119 0x10, 0x32, 0x54, 0x76,
120 0xc3, 0xd2, 0xe1, 0xf0
121 };
122
123 /* SHA 256 - 32 bytes - Initialiser state can be found in FIPS stds 180-2 */
124 static const uint8_t sha256_initial_state[QAT_HASH_SHA256_STATE_SIZE] = {
125 0x6a, 0x09, 0xe6, 0x67,
126 0xbb, 0x67, 0xae, 0x85,
127 0x3c, 0x6e, 0xf3, 0x72,
128 0xa5, 0x4f, 0xf5, 0x3a,
129 0x51, 0x0e, 0x52, 0x7f,
130 0x9b, 0x05, 0x68, 0x8c,
131 0x1f, 0x83, 0xd9, 0xab,
132 0x5b, 0xe0, 0xcd, 0x19
133 };
134
135 /* SHA 384 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
136 static const uint8_t sha384_initial_state[QAT_HASH_SHA384_STATE_SIZE] = {
137 0xcb, 0xbb, 0x9d, 0x5d, 0xc1, 0x05, 0x9e, 0xd8,
138 0x62, 0x9a, 0x29, 0x2a, 0x36, 0x7c, 0xd5, 0x07,
139 0x91, 0x59, 0x01, 0x5a, 0x30, 0x70, 0xdd, 0x17,
140 0x15, 0x2f, 0xec, 0xd8, 0xf7, 0x0e, 0x59, 0x39,
141 0x67, 0x33, 0x26, 0x67, 0xff, 0xc0, 0x0b, 0x31,
142 0x8e, 0xb4, 0x4a, 0x87, 0x68, 0x58, 0x15, 0x11,
143 0xdb, 0x0c, 0x2e, 0x0d, 0x64, 0xf9, 0x8f, 0xa7,
144 0x47, 0xb5, 0x48, 0x1d, 0xbe, 0xfa, 0x4f, 0xa4
145 };
146
147 /* SHA 512 - 64 bytes - Initialiser state can be found in FIPS stds 180-2 */
148 static const uint8_t sha512_initial_state[QAT_HASH_SHA512_STATE_SIZE] = {
149 0x6a, 0x09, 0xe6, 0x67, 0xf3, 0xbc, 0xc9, 0x08,
150 0xbb, 0x67, 0xae, 0x85, 0x84, 0xca, 0xa7, 0x3b,
151 0x3c, 0x6e, 0xf3, 0x72, 0xfe, 0x94, 0xf8, 0x2b,
152 0xa5, 0x4f, 0xf5, 0x3a, 0x5f, 0x1d, 0x36, 0xf1,
153 0x51, 0x0e, 0x52, 0x7f, 0xad, 0xe6, 0x82, 0xd1,
154 0x9b, 0x05, 0x68, 0x8c, 0x2b, 0x3e, 0x6c, 0x1f,
155 0x1f, 0x83, 0xd9, 0xab, 0xfb, 0x41, 0xbd, 0x6b,
156 0x5b, 0xe0, 0xcd, 0x19, 0x13, 0x7e, 0x21, 0x79
157 };
158
159 static const struct qat_sym_hash_alg_info sha1_info = {
160 .qshai_digest_len = QAT_HASH_SHA1_DIGEST_SIZE,
161 .qshai_block_len = QAT_HASH_SHA1_BLOCK_SIZE,
162 .qshai_state_size = QAT_HASH_SHA1_STATE_SIZE,
163 .qshai_init_state = sha1_initial_state,
164 .qshai_sah = &auth_hash_hmac_sha1,
165 .qshai_state_offset = 0,
166 .qshai_state_word = 4,
167 };
168
169 static const struct qat_sym_hash_alg_info sha256_info = {
170 .qshai_digest_len = QAT_HASH_SHA256_DIGEST_SIZE,
171 .qshai_block_len = QAT_HASH_SHA256_BLOCK_SIZE,
172 .qshai_state_size = QAT_HASH_SHA256_STATE_SIZE,
173 .qshai_init_state = sha256_initial_state,
174 .qshai_sah = &auth_hash_hmac_sha2_256,
175 .qshai_state_offset = offsetof(SHA256_CTX, state),
176 .qshai_state_word = 4,
177 };
178
179 static const struct qat_sym_hash_alg_info sha384_info = {
180 .qshai_digest_len = QAT_HASH_SHA384_DIGEST_SIZE,
181 .qshai_block_len = QAT_HASH_SHA384_BLOCK_SIZE,
182 .qshai_state_size = QAT_HASH_SHA384_STATE_SIZE,
183 .qshai_init_state = sha384_initial_state,
184 .qshai_sah = &auth_hash_hmac_sha2_384,
185 .qshai_state_offset = offsetof(SHA384_CTX, state),
186 .qshai_state_word = 8,
187 };
188
189 static const struct qat_sym_hash_alg_info sha512_info = {
190 .qshai_digest_len = QAT_HASH_SHA512_DIGEST_SIZE,
191 .qshai_block_len = QAT_HASH_SHA512_BLOCK_SIZE,
192 .qshai_state_size = QAT_HASH_SHA512_STATE_SIZE,
193 .qshai_init_state = sha512_initial_state,
194 .qshai_sah = &auth_hash_hmac_sha2_512,
195 .qshai_state_offset = offsetof(SHA512_CTX, state),
196 .qshai_state_word = 8,
197 };
198
199 static const struct qat_sym_hash_alg_info aes_gcm_info = {
200 .qshai_digest_len = QAT_HASH_AES_GCM_DIGEST_SIZE,
201 .qshai_block_len = QAT_HASH_AES_GCM_BLOCK_SIZE,
202 .qshai_state_size = QAT_HASH_AES_GCM_STATE_SIZE,
203 .qshai_sah = &auth_hash_nist_gmac_aes_128,
204 };
205
206 /* Hash QAT specific structures */
207
208 static const struct qat_sym_hash_qat_info sha1_config = {
209 .qshqi_algo_enc = HW_AUTH_ALGO_SHA1,
210 .qshqi_auth_counter = QAT_HASH_SHA1_BLOCK_SIZE,
211 .qshqi_state1_len = HW_SHA1_STATE1_SZ,
212 .qshqi_state2_len = HW_SHA1_STATE2_SZ,
213 };
214
215 static const struct qat_sym_hash_qat_info sha256_config = {
216 .qshqi_algo_enc = HW_AUTH_ALGO_SHA256,
217 .qshqi_auth_counter = QAT_HASH_SHA256_BLOCK_SIZE,
218 .qshqi_state1_len = HW_SHA256_STATE1_SZ,
219 .qshqi_state2_len = HW_SHA256_STATE2_SZ
220 };
221
222 static const struct qat_sym_hash_qat_info sha384_config = {
223 .qshqi_algo_enc = HW_AUTH_ALGO_SHA384,
224 .qshqi_auth_counter = QAT_HASH_SHA384_BLOCK_SIZE,
225 .qshqi_state1_len = HW_SHA384_STATE1_SZ,
226 .qshqi_state2_len = HW_SHA384_STATE2_SZ
227 };
228
229 static const struct qat_sym_hash_qat_info sha512_config = {
230 .qshqi_algo_enc = HW_AUTH_ALGO_SHA512,
231 .qshqi_auth_counter = QAT_HASH_SHA512_BLOCK_SIZE,
232 .qshqi_state1_len = HW_SHA512_STATE1_SZ,
233 .qshqi_state2_len = HW_SHA512_STATE2_SZ
234 };
235
236 static const struct qat_sym_hash_qat_info aes_gcm_config = {
237 .qshqi_algo_enc = HW_AUTH_ALGO_GALOIS_128,
238 .qshqi_auth_counter = QAT_HASH_AES_GCM_BLOCK_SIZE,
239 .qshqi_state1_len = HW_GALOIS_128_STATE1_SZ,
240 .qshqi_state2_len =
241 HW_GALOIS_H_SZ + HW_GALOIS_LEN_A_SZ + HW_GALOIS_E_CTR0_SZ,
242 };
243
244 static const struct qat_sym_hash_def qat_sym_hash_defs[] = {
245 [QAT_SYM_HASH_SHA1] = { &sha1_info, &sha1_config },
246 [QAT_SYM_HASH_SHA256] = { &sha256_info, &sha256_config },
247 [QAT_SYM_HASH_SHA384] = { &sha384_info, &sha384_config },
248 [QAT_SYM_HASH_SHA512] = { &sha512_info, &sha512_config },
249 [QAT_SYM_HASH_AES_GCM] = { &aes_gcm_info, &aes_gcm_config },
250 };
251
252 static const struct qat_product *qat_lookup(device_t);
253 static int qat_probe(device_t);
254 static int qat_attach(device_t);
255 static int qat_init(device_t);
256 static int qat_start(device_t);
257 static int qat_detach(device_t);
258
259 static int qat_newsession(device_t dev, crypto_session_t cses,
260 const struct crypto_session_params *csp);
261 static void qat_freesession(device_t dev, crypto_session_t cses);
262
263 static int qat_setup_msix_intr(struct qat_softc *);
264
265 static void qat_etr_init(struct qat_softc *);
266 static void qat_etr_deinit(struct qat_softc *);
267 static void qat_etr_bank_init(struct qat_softc *, int);
268 static void qat_etr_bank_deinit(struct qat_softc *sc, int);
269
270 static void qat_etr_ap_bank_init(struct qat_softc *);
271 static void qat_etr_ap_bank_set_ring_mask(uint32_t *, uint32_t, int);
272 static void qat_etr_ap_bank_set_ring_dest(struct qat_softc *, uint32_t *,
273 uint32_t, int);
274 static void qat_etr_ap_bank_setup_ring(struct qat_softc *,
275 struct qat_ring *);
276 static int qat_etr_verify_ring_size(uint32_t, uint32_t);
277
278 static int qat_etr_ring_intr(struct qat_softc *, struct qat_bank *,
279 struct qat_ring *);
280 static void qat_etr_bank_intr(void *);
281
282 static void qat_arb_update(struct qat_softc *, struct qat_bank *);
283
284 static struct qat_sym_cookie *qat_crypto_alloc_sym_cookie(
285 struct qat_crypto_bank *);
286 static void qat_crypto_free_sym_cookie(struct qat_crypto_bank *,
287 struct qat_sym_cookie *);
288 static int qat_crypto_setup_ring(struct qat_softc *,
289 struct qat_crypto_bank *);
290 static int qat_crypto_bank_init(struct qat_softc *,
291 struct qat_crypto_bank *);
292 static int qat_crypto_init(struct qat_softc *);
293 static void qat_crypto_deinit(struct qat_softc *);
294 static int qat_crypto_start(struct qat_softc *);
295 static void qat_crypto_stop(struct qat_softc *);
296 static int qat_crypto_sym_rxintr(struct qat_softc *, void *, void *);
297
298 static MALLOC_DEFINE(M_QAT, "qat", "Intel QAT driver");
299
300 static const struct qat_product *
301 qat_lookup(device_t dev)
302 {
303 const struct qat_product *qatp;
304
305 for (qatp = qat_products; qatp->qatp_name != NULL; qatp++) {
306 if (pci_get_vendor(dev) == qatp->qatp_vendor &&
307 pci_get_device(dev) == qatp->qatp_product)
308 return qatp;
309 }
310 return NULL;
311 }
312
313 static int
314 qat_probe(device_t dev)
315 {
316 const struct qat_product *prod;
317
318 prod = qat_lookup(dev);
319 if (prod != NULL) {
320 device_set_desc(dev, prod->qatp_name);
321 return BUS_PROBE_DEFAULT;
322 }
323 return ENXIO;
324 }
325
326 static int
327 qat_attach(device_t dev)
328 {
329 struct qat_softc *sc = device_get_softc(dev);
330 const struct qat_product *qatp;
331 int bar, count, error, i;
332
333 sc->sc_dev = dev;
334 sc->sc_rev = pci_get_revid(dev);
335 sc->sc_crypto.qcy_cid = -1;
336
337 qatp = qat_lookup(dev);
338 memcpy(&sc->sc_hw, qatp->qatp_hw, sizeof(struct qat_hw));
339
340 /* Determine active accelerators and engines */
341 sc->sc_accel_mask = sc->sc_hw.qhw_get_accel_mask(sc);
342 sc->sc_ae_mask = sc->sc_hw.qhw_get_ae_mask(sc);
343
344 sc->sc_accel_num = 0;
345 for (i = 0; i < sc->sc_hw.qhw_num_accel; i++) {
346 if (sc->sc_accel_mask & (1 << i))
347 sc->sc_accel_num++;
348 }
349 sc->sc_ae_num = 0;
350 for (i = 0; i < sc->sc_hw.qhw_num_engines; i++) {
351 if (sc->sc_ae_mask & (1 << i))
352 sc->sc_ae_num++;
353 }
354
355 if (!sc->sc_accel_mask || (sc->sc_ae_mask & 0x01) == 0) {
356 device_printf(sc->sc_dev, "couldn't find acceleration");
357 goto fail;
358 }
359
360 MPASS(sc->sc_accel_num <= MAX_NUM_ACCEL);
361 MPASS(sc->sc_ae_num <= MAX_NUM_AE);
362
363 /* Determine SKU and capabilities */
364 sc->sc_sku = sc->sc_hw.qhw_get_sku(sc);
365 sc->sc_accel_cap = sc->sc_hw.qhw_get_accel_cap(sc);
366 sc->sc_fw_uof_name = sc->sc_hw.qhw_get_fw_uof_name(sc);
367
368 i = 0;
369 if (sc->sc_hw.qhw_sram_bar_id != NO_PCI_REG) {
370 MPASS(sc->sc_hw.qhw_sram_bar_id == 0);
371 uint32_t fusectl = pci_read_config(dev, FUSECTL_REG, 4);
372 /* Skip SRAM BAR */
373 i = (fusectl & FUSECTL_MASK) ? 1 : 0;
374 }
375 for (bar = 0; bar < PCIR_MAX_BAR_0; bar++) {
376 uint32_t val = pci_read_config(dev, PCIR_BAR(bar), 4);
377 if (val == 0 || !PCI_BAR_MEM(val))
378 continue;
379
380 sc->sc_rid[i] = PCIR_BAR(bar);
381 sc->sc_res[i] = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
382 &sc->sc_rid[i], RF_ACTIVE);
383 if (sc->sc_res[i] == NULL) {
384 device_printf(dev, "couldn't map BAR %d\n", bar);
385 goto fail;
386 }
387
388 sc->sc_csrt[i] = rman_get_bustag(sc->sc_res[i]);
389 sc->sc_csrh[i] = rman_get_bushandle(sc->sc_res[i]);
390
391 i++;
392 if ((val & PCIM_BAR_MEM_TYPE) == PCIM_BAR_MEM_64)
393 bar++;
394 }
395
396 pci_enable_busmaster(dev);
397
398 count = sc->sc_hw.qhw_num_banks + 1;
399 if (pci_msix_count(dev) < count) {
400 device_printf(dev, "insufficient MSI-X vectors (%d vs. %d)\n",
401 pci_msix_count(dev), count);
402 goto fail;
403 }
404 error = pci_alloc_msix(dev, &count);
405 if (error != 0) {
406 device_printf(dev, "failed to allocate MSI-X vectors\n");
407 goto fail;
408 }
409
410 error = qat_init(dev);
411 if (error == 0)
412 return 0;
413
414 fail:
415 qat_detach(dev);
416 return ENXIO;
417 }
418
419 static int
420 qat_init(device_t dev)
421 {
422 struct qat_softc *sc = device_get_softc(dev);
423 int error;
424
425 qat_etr_init(sc);
426
427 if (sc->sc_hw.qhw_init_admin_comms != NULL &&
428 (error = sc->sc_hw.qhw_init_admin_comms(sc)) != 0) {
429 device_printf(sc->sc_dev,
430 "Could not initialize admin comms: %d\n", error);
431 return error;
432 }
433
434 if (sc->sc_hw.qhw_init_arb != NULL &&
435 (error = sc->sc_hw.qhw_init_arb(sc)) != 0) {
436 device_printf(sc->sc_dev,
437 "Could not initialize hw arbiter: %d\n", error);
438 return error;
439 }
440
441 error = qat_ae_init(sc);
442 if (error) {
443 device_printf(sc->sc_dev,
444 "Could not initialize Acceleration Engine: %d\n", error);
445 return error;
446 }
447
448 error = qat_aefw_load(sc);
449 if (error) {
450 device_printf(sc->sc_dev,
451 "Could not load firmware: %d\n", error);
452 return error;
453 }
454
455 error = qat_setup_msix_intr(sc);
456 if (error) {
457 device_printf(sc->sc_dev,
458 "Could not setup interrupts: %d\n", error);
459 return error;
460 }
461
462 sc->sc_hw.qhw_enable_intr(sc);
463
464 error = qat_crypto_init(sc);
465 if (error) {
466 device_printf(sc->sc_dev,
467 "Could not initialize service: %d\n", error);
468 return error;
469 }
470
471 if (sc->sc_hw.qhw_enable_error_correction != NULL)
472 sc->sc_hw.qhw_enable_error_correction(sc);
473
474 if (sc->sc_hw.qhw_set_ssm_wdtimer != NULL &&
475 (error = sc->sc_hw.qhw_set_ssm_wdtimer(sc)) != 0) {
476 device_printf(sc->sc_dev,
477 "Could not initialize watchdog timer: %d\n", error);
478 return error;
479 }
480
481 error = qat_start(dev);
482 if (error) {
483 device_printf(sc->sc_dev,
484 "Could not start: %d\n", error);
485 return error;
486 }
487
488 return 0;
489 }
490
491 static int
492 qat_start(device_t dev)
493 {
494 struct qat_softc *sc = device_get_softc(dev);
495 int error;
496
497 error = qat_ae_start(sc);
498 if (error)
499 return error;
500
501 if (sc->sc_hw.qhw_send_admin_init != NULL &&
502 (error = sc->sc_hw.qhw_send_admin_init(sc)) != 0) {
503 return error;
504 }
505
506 error = qat_crypto_start(sc);
507 if (error)
508 return error;
509
510 return 0;
511 }
512
513 static int
514 qat_detach(device_t dev)
515 {
516 struct qat_softc *sc;
517 int bar, i;
518
519 sc = device_get_softc(dev);
520
521 qat_crypto_stop(sc);
522 qat_crypto_deinit(sc);
523 qat_aefw_unload(sc);
524
525 if (sc->sc_etr_banks != NULL) {
526 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++) {
527 struct qat_bank *qb = &sc->sc_etr_banks[i];
528
529 if (qb->qb_ih_cookie != NULL)
530 (void)bus_teardown_intr(dev, qb->qb_ih,
531 qb->qb_ih_cookie);
532 if (qb->qb_ih != NULL)
533 (void)bus_release_resource(dev, SYS_RES_IRQ,
534 i + 1, qb->qb_ih);
535 }
536 }
537 if (sc->sc_ih_cookie != NULL) {
538 (void)bus_teardown_intr(dev, sc->sc_ih, sc->sc_ih_cookie);
539 sc->sc_ih_cookie = NULL;
540 }
541 if (sc->sc_ih != NULL) {
542 (void)bus_release_resource(dev, SYS_RES_IRQ,
543 sc->sc_hw.qhw_num_banks + 1, sc->sc_ih);
544 sc->sc_ih = NULL;
545 }
546 pci_release_msi(dev);
547
548 qat_etr_deinit(sc);
549
550 for (bar = 0; bar < MAX_BARS; bar++) {
551 if (sc->sc_res[bar] != NULL) {
552 (void)bus_release_resource(dev, SYS_RES_MEMORY,
553 sc->sc_rid[bar], sc->sc_res[bar]);
554 sc->sc_res[bar] = NULL;
555 }
556 }
557
558 return 0;
559 }
560
561 void *
562 qat_alloc_mem(size_t size)
563 {
564 return (malloc(size, M_QAT, M_WAITOK | M_ZERO));
565 }
566
567 void
568 qat_free_mem(void *ptr)
569 {
570 free(ptr, M_QAT);
571 }
572
573 static void
574 qat_alloc_dmamem_cb(void *arg, bus_dma_segment_t *segs, int nseg,
575 int error)
576 {
577 struct qat_dmamem *qdm;
578
579 if (error != 0)
580 return;
581
582 KASSERT(nseg == 1, ("%s: nsegs is %d", __func__, nseg));
583 qdm = arg;
584 qdm->qdm_dma_seg = segs[0];
585 }
586
587 int
588 qat_alloc_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm,
589 int nseg, bus_size_t size, bus_size_t alignment)
590 {
591 int error;
592
593 KASSERT(qdm->qdm_dma_vaddr == NULL,
594 ("%s: DMA memory descriptor in use", __func__));
595
596 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
597 alignment, 0, /* alignment, boundary */
598 BUS_SPACE_MAXADDR, /* lowaddr */
599 BUS_SPACE_MAXADDR, /* highaddr */
600 NULL, NULL, /* filter, filterarg */
601 size, /* maxsize */
602 nseg, /* nsegments */
603 size, /* maxsegsize */
604 BUS_DMA_COHERENT, /* flags */
605 NULL, NULL, /* lockfunc, lockarg */
606 &qdm->qdm_dma_tag);
607 if (error != 0)
608 return error;
609
610 error = bus_dmamem_alloc(qdm->qdm_dma_tag, &qdm->qdm_dma_vaddr,
611 BUS_DMA_NOWAIT | BUS_DMA_ZERO | BUS_DMA_COHERENT,
612 &qdm->qdm_dma_map);
613 if (error != 0) {
614 device_printf(sc->sc_dev,
615 "couldn't allocate dmamem, error = %d\n", error);
616 goto fail_0;
617 }
618
619 error = bus_dmamap_load(qdm->qdm_dma_tag, qdm->qdm_dma_map,
620 qdm->qdm_dma_vaddr, size, qat_alloc_dmamem_cb, qdm,
621 BUS_DMA_NOWAIT);
622 if (error) {
623 device_printf(sc->sc_dev,
624 "couldn't load dmamem map, error = %d\n", error);
625 goto fail_1;
626 }
627
628 return 0;
629 fail_1:
630 bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr, qdm->qdm_dma_map);
631 fail_0:
632 bus_dma_tag_destroy(qdm->qdm_dma_tag);
633 return error;
634 }
635
636 void
637 qat_free_dmamem(struct qat_softc *sc, struct qat_dmamem *qdm)
638 {
639 if (qdm->qdm_dma_tag != NULL) {
640 bus_dmamap_unload(qdm->qdm_dma_tag, qdm->qdm_dma_map);
641 bus_dmamem_free(qdm->qdm_dma_tag, qdm->qdm_dma_vaddr,
642 qdm->qdm_dma_map);
643 bus_dma_tag_destroy(qdm->qdm_dma_tag);
644 explicit_bzero(qdm, sizeof(*qdm));
645 }
646 }
647
648 static int
649 qat_setup_msix_intr(struct qat_softc *sc)
650 {
651 device_t dev;
652 int error, i, rid;
653
654 dev = sc->sc_dev;
655
656 for (i = 1; i <= sc->sc_hw.qhw_num_banks; i++) {
657 struct qat_bank *qb = &sc->sc_etr_banks[i - 1];
658
659 rid = i;
660 qb->qb_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
661 RF_ACTIVE);
662 if (qb->qb_ih == NULL) {
663 device_printf(dev,
664 "failed to allocate bank intr resource\n");
665 return ENXIO;
666 }
667 error = bus_setup_intr(dev, qb->qb_ih,
668 INTR_TYPE_NET | INTR_MPSAFE, NULL, qat_etr_bank_intr, qb,
669 &qb->qb_ih_cookie);
670 if (error != 0) {
671 device_printf(dev, "failed to set up bank intr\n");
672 return error;
673 }
674 error = bus_bind_intr(dev, qb->qb_ih, (i - 1) % mp_ncpus);
675 if (error != 0)
676 device_printf(dev, "failed to bind intr %d\n", i);
677 }
678
679 rid = i;
680 sc->sc_ih = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
681 RF_ACTIVE);
682 if (sc->sc_ih == NULL)
683 return ENXIO;
684 error = bus_setup_intr(dev, sc->sc_ih, INTR_TYPE_NET | INTR_MPSAFE,
685 NULL, qat_ae_cluster_intr, sc, &sc->sc_ih_cookie);
686
687 return error;
688 }
689
690 static void
691 qat_etr_init(struct qat_softc *sc)
692 {
693 int i;
694
695 sc->sc_etr_banks = qat_alloc_mem(
696 sizeof(struct qat_bank) * sc->sc_hw.qhw_num_banks);
697
698 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++)
699 qat_etr_bank_init(sc, i);
700
701 if (sc->sc_hw.qhw_num_ap_banks) {
702 sc->sc_etr_ap_banks = qat_alloc_mem(
703 sizeof(struct qat_ap_bank) * sc->sc_hw.qhw_num_ap_banks);
704 qat_etr_ap_bank_init(sc);
705 }
706 }
707
708 static void
709 qat_etr_deinit(struct qat_softc *sc)
710 {
711 int i;
712
713 if (sc->sc_etr_banks != NULL) {
714 for (i = 0; i < sc->sc_hw.qhw_num_banks; i++)
715 qat_etr_bank_deinit(sc, i);
716 qat_free_mem(sc->sc_etr_banks);
717 sc->sc_etr_banks = NULL;
718 }
719 if (sc->sc_etr_ap_banks != NULL) {
720 qat_free_mem(sc->sc_etr_ap_banks);
721 sc->sc_etr_ap_banks = NULL;
722 }
723 }
724
725 static void
726 qat_etr_bank_init(struct qat_softc *sc, int bank)
727 {
728 struct qat_bank *qb = &sc->sc_etr_banks[bank];
729 int i, tx_rx_gap = sc->sc_hw.qhw_tx_rx_gap;
730
731 MPASS(bank < sc->sc_hw.qhw_num_banks);
732
733 mtx_init(&qb->qb_bank_mtx, "qb bank", NULL, MTX_DEF);
734
735 qb->qb_sc = sc;
736 qb->qb_bank = bank;
737 qb->qb_coalescing_time = COALESCING_TIME_INTERVAL_DEFAULT;
738
739 /* Clean CSRs for all rings within the bank */
740 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
741 struct qat_ring *qr = &qb->qb_et_rings[i];
742
743 qat_etr_bank_ring_write_4(sc, bank, i,
744 ETR_RING_CONFIG, 0);
745 qat_etr_bank_ring_base_write_8(sc, bank, i, 0);
746
747 if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
748 qr->qr_inflight = qat_alloc_mem(sizeof(uint32_t));
749 } else if (sc->sc_hw.qhw_tx_rings_mask &
750 (1 << (i - tx_rx_gap))) {
751 /* Share inflight counter with rx and tx */
752 qr->qr_inflight =
753 qb->qb_et_rings[i - tx_rx_gap].qr_inflight;
754 }
755 }
756
757 if (sc->sc_hw.qhw_init_etr_intr != NULL) {
758 sc->sc_hw.qhw_init_etr_intr(sc, bank);
759 } else {
760 /* common code in qat 1.7 */
761 qat_etr_bank_write_4(sc, bank, ETR_INT_REG,
762 ETR_INT_REG_CLEAR_MASK);
763 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank /
764 ETR_RINGS_PER_INT_SRCSEL; i++) {
765 qat_etr_bank_write_4(sc, bank, ETR_INT_SRCSEL +
766 (i * ETR_INT_SRCSEL_NEXT_OFFSET),
767 ETR_INT_SRCSEL_MASK);
768 }
769 }
770 }
771
772 static void
773 qat_etr_bank_deinit(struct qat_softc *sc, int bank)
774 {
775 struct qat_bank *qb;
776 struct qat_ring *qr;
777 int i;
778
779 qb = &sc->sc_etr_banks[bank];
780 for (i = 0; i < sc->sc_hw.qhw_num_rings_per_bank; i++) {
781 if (sc->sc_hw.qhw_tx_rings_mask & (1 << i)) {
782 qr = &qb->qb_et_rings[i];
783 qat_free_mem(qr->qr_inflight);
784 }
785 }
786 }
787
788 static void
789 qat_etr_ap_bank_init(struct qat_softc *sc)
790 {
791 int ap_bank;
792
793 for (ap_bank = 0; ap_bank < sc->sc_hw.qhw_num_ap_banks; ap_bank++) {
794 struct qat_ap_bank *qab = &sc->sc_etr_ap_banks[ap_bank];
795
796 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_MASK,
797 ETR_AP_NF_MASK_INIT);
798 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST, 0);
799 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_MASK,
800 ETR_AP_NE_MASK_INIT);
801 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST, 0);
802
803 memset(qab, 0, sizeof(*qab));
804 }
805 }
806
807 static void
808 qat_etr_ap_bank_set_ring_mask(uint32_t *ap_mask, uint32_t ring, int set_mask)
809 {
810 if (set_mask)
811 *ap_mask |= (1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
812 else
813 *ap_mask &= ~(1 << ETR_RING_NUMBER_IN_AP_BANK(ring));
814 }
815
816 static void
817 qat_etr_ap_bank_set_ring_dest(struct qat_softc *sc, uint32_t *ap_dest,
818 uint32_t ring, int set_dest)
819 {
820 uint32_t ae_mask;
821 uint8_t mailbox, ae, nae;
822 uint8_t *dest = (uint8_t *)ap_dest;
823
824 mailbox = ETR_RING_AP_MAILBOX_NUMBER(ring);
825
826 nae = 0;
827 ae_mask = sc->sc_ae_mask;
828 for (ae = 0; ae < sc->sc_hw.qhw_num_engines; ae++) {
829 if ((ae_mask & (1 << ae)) == 0)
830 continue;
831
832 if (set_dest) {
833 dest[nae] = __SHIFTIN(ae, ETR_AP_DEST_AE) |
834 __SHIFTIN(mailbox, ETR_AP_DEST_MAILBOX) |
835 ETR_AP_DEST_ENABLE;
836 } else {
837 dest[nae] = 0;
838 }
839 nae++;
840 if (nae == ETR_MAX_AE_PER_MAILBOX)
841 break;
842 }
843 }
844
845 static void
846 qat_etr_ap_bank_setup_ring(struct qat_softc *sc, struct qat_ring *qr)
847 {
848 struct qat_ap_bank *qab;
849 int ap_bank;
850
851 if (sc->sc_hw.qhw_num_ap_banks == 0)
852 return;
853
854 ap_bank = ETR_RING_AP_BANK_NUMBER(qr->qr_ring);
855 MPASS(ap_bank < sc->sc_hw.qhw_num_ap_banks);
856 qab = &sc->sc_etr_ap_banks[ap_bank];
857
858 if (qr->qr_cb == NULL) {
859 qat_etr_ap_bank_set_ring_mask(&qab->qab_ne_mask, qr->qr_ring, 1);
860 if (!qab->qab_ne_dest) {
861 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_ne_dest,
862 qr->qr_ring, 1);
863 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NE_DEST,
864 qab->qab_ne_dest);
865 }
866 } else {
867 qat_etr_ap_bank_set_ring_mask(&qab->qab_nf_mask, qr->qr_ring, 1);
868 if (!qab->qab_nf_dest) {
869 qat_etr_ap_bank_set_ring_dest(sc, &qab->qab_nf_dest,
870 qr->qr_ring, 1);
871 qat_etr_ap_bank_write_4(sc, ap_bank, ETR_AP_NF_DEST,
872 qab->qab_nf_dest);
873 }
874 }
875 }
876
877 static int
878 qat_etr_verify_ring_size(uint32_t msg_size, uint32_t num_msgs)
879 {
880 int i = QAT_MIN_RING_SIZE;
881
882 for (; i <= QAT_MAX_RING_SIZE; i++)
883 if ((msg_size * num_msgs) == QAT_SIZE_TO_RING_SIZE_IN_BYTES(i))
884 return i;
885
886 return QAT_DEFAULT_RING_SIZE;
887 }
888
889 int
890 qat_etr_setup_ring(struct qat_softc *sc, int bank, uint32_t ring,
891 uint32_t num_msgs, uint32_t msg_size, qat_cb_t cb, void *cb_arg,
892 const char *name, struct qat_ring **rqr)
893 {
894 struct qat_bank *qb;
895 struct qat_ring *qr = NULL;
896 int error;
897 uint32_t ring_size_bytes, ring_config;
898 uint64_t ring_base;
899 uint32_t wm_nf = ETR_RING_CONFIG_NEAR_WM_512;
900 uint32_t wm_ne = ETR_RING_CONFIG_NEAR_WM_0;
901
902 MPASS(bank < sc->sc_hw.qhw_num_banks);
903
904 /* Allocate a ring from specified bank */
905 qb = &sc->sc_etr_banks[bank];
906
907 if (ring >= sc->sc_hw.qhw_num_rings_per_bank)
908 return EINVAL;
909 if (qb->qb_allocated_rings & (1 << ring))
910 return ENOENT;
911 qr = &qb->qb_et_rings[ring];
912 qb->qb_allocated_rings |= 1 << ring;
913
914 /* Initialize allocated ring */
915 qr->qr_ring = ring;
916 qr->qr_bank = bank;
917 qr->qr_name = name;
918 qr->qr_ring_id = qr->qr_bank * sc->sc_hw.qhw_num_rings_per_bank + ring;
919 qr->qr_ring_mask = (1 << ring);
920 qr->qr_cb = cb;
921 qr->qr_cb_arg = cb_arg;
922
923 /* Setup the shadow variables */
924 qr->qr_head = 0;
925 qr->qr_tail = 0;
926 qr->qr_msg_size = QAT_BYTES_TO_MSG_SIZE(msg_size);
927 qr->qr_ring_size = qat_etr_verify_ring_size(msg_size, num_msgs);
928
929 /*
930 * To make sure that ring is alligned to ring size allocate
931 * at least 4k and then tell the user it is smaller.
932 */
933 ring_size_bytes = QAT_SIZE_TO_RING_SIZE_IN_BYTES(qr->qr_ring_size);
934 ring_size_bytes = QAT_RING_SIZE_BYTES_MIN(ring_size_bytes);
935 error = qat_alloc_dmamem(sc, &qr->qr_dma, 1, ring_size_bytes,
936 ring_size_bytes);
937 if (error)
938 return error;
939
940 qr->qr_ring_vaddr = qr->qr_dma.qdm_dma_vaddr;
941 qr->qr_ring_paddr = qr->qr_dma.qdm_dma_seg.ds_addr;
942
943 memset(qr->qr_ring_vaddr, QAT_RING_PATTERN,
944 qr->qr_dma.qdm_dma_seg.ds_len);
945
946 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
947 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
948
949 if (cb == NULL) {
950 ring_config = ETR_RING_CONFIG_BUILD(qr->qr_ring_size);
951 } else {
952 ring_config =
953 ETR_RING_CONFIG_BUILD_RESP(qr->qr_ring_size, wm_nf, wm_ne);
954 }
955 qat_etr_bank_ring_write_4(sc, bank, ring, ETR_RING_CONFIG, ring_config);
956
957 ring_base = ETR_RING_BASE_BUILD(qr->qr_ring_paddr, qr->qr_ring_size);
958 qat_etr_bank_ring_base_write_8(sc, bank, ring, ring_base);
959
960 if (sc->sc_hw.qhw_init_arb != NULL)
961 qat_arb_update(sc, qb);
962
963 mtx_init(&qr->qr_ring_mtx, "qr ring", NULL, MTX_DEF);
964
965 qat_etr_ap_bank_setup_ring(sc, qr);
966
967 if (cb != NULL) {
968 uint32_t intr_mask;
969
970 qb->qb_intr_mask |= qr->qr_ring_mask;
971 intr_mask = qb->qb_intr_mask;
972
973 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_EN, intr_mask);
974 qat_etr_bank_write_4(sc, bank, ETR_INT_COL_CTL,
975 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
976 }
977
978 *rqr = qr;
979
980 return 0;
981 }
982
983 static inline u_int
984 qat_modulo(u_int data, u_int shift)
985 {
986 u_int div = data >> shift;
987 u_int mult = div << shift;
988 return data - mult;
989 }
990
991 int
992 qat_etr_put_msg(struct qat_softc *sc, struct qat_ring *qr, uint32_t *msg)
993 {
994 uint32_t inflight;
995 uint32_t *addr;
996
997 mtx_lock(&qr->qr_ring_mtx);
998
999 inflight = atomic_fetchadd_32(qr->qr_inflight, 1) + 1;
1000 if (inflight > QAT_MAX_INFLIGHTS(qr->qr_ring_size, qr->qr_msg_size)) {
1001 atomic_subtract_32(qr->qr_inflight, 1);
1002 qr->qr_need_wakeup = true;
1003 mtx_unlock(&qr->qr_ring_mtx);
1004 counter_u64_add(sc->sc_ring_full_restarts, 1);
1005 return ERESTART;
1006 }
1007
1008 addr = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_tail);
1009
1010 memcpy(addr, msg, QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size));
1011
1012 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
1013 BUS_DMASYNC_PREWRITE);
1014
1015 qr->qr_tail = qat_modulo(qr->qr_tail +
1016 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1017 QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1018
1019 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1020 ETR_RING_TAIL_OFFSET, qr->qr_tail);
1021
1022 mtx_unlock(&qr->qr_ring_mtx);
1023
1024 return 0;
1025 }
1026
1027 static int
1028 qat_etr_ring_intr(struct qat_softc *sc, struct qat_bank *qb,
1029 struct qat_ring *qr)
1030 {
1031 uint32_t *msg, nmsg = 0;
1032 int handled = 0;
1033 bool blocked = false;
1034
1035 mtx_lock(&qr->qr_ring_mtx);
1036
1037 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1038
1039 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
1040 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1041
1042 while (atomic_load_32(msg) != ETR_RING_EMPTY_ENTRY_SIG) {
1043 atomic_subtract_32(qr->qr_inflight, 1);
1044
1045 if (qr->qr_cb != NULL) {
1046 mtx_unlock(&qr->qr_ring_mtx);
1047 handled |= qr->qr_cb(sc, qr->qr_cb_arg, msg);
1048 mtx_lock(&qr->qr_ring_mtx);
1049 }
1050
1051 atomic_store_32(msg, ETR_RING_EMPTY_ENTRY_SIG);
1052
1053 qr->qr_head = qat_modulo(qr->qr_head +
1054 QAT_MSG_SIZE_TO_BYTES(qr->qr_msg_size),
1055 QAT_RING_SIZE_MODULO(qr->qr_ring_size));
1056 nmsg++;
1057
1058 msg = (uint32_t *)((uintptr_t)qr->qr_ring_vaddr + qr->qr_head);
1059 }
1060
1061 bus_dmamap_sync(qr->qr_dma.qdm_dma_tag, qr->qr_dma.qdm_dma_map,
1062 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
1063
1064 if (nmsg > 0) {
1065 qat_etr_bank_ring_write_4(sc, qr->qr_bank, qr->qr_ring,
1066 ETR_RING_HEAD_OFFSET, qr->qr_head);
1067 if (qr->qr_need_wakeup) {
1068 blocked = true;
1069 qr->qr_need_wakeup = false;
1070 }
1071 }
1072
1073 mtx_unlock(&qr->qr_ring_mtx);
1074
1075 if (blocked)
1076 crypto_unblock(sc->sc_crypto.qcy_cid, CRYPTO_SYMQ);
1077
1078 return handled;
1079 }
1080
1081 static void
1082 qat_etr_bank_intr(void *arg)
1083 {
1084 struct qat_bank *qb = arg;
1085 struct qat_softc *sc = qb->qb_sc;
1086 uint32_t estat;
1087 int i;
1088
1089 mtx_lock(&qb->qb_bank_mtx);
1090
1091 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL, 0);
1092
1093 /* Now handle all the responses */
1094 estat = ~qat_etr_bank_read_4(sc, qb->qb_bank, ETR_E_STAT);
1095 estat &= qb->qb_intr_mask;
1096
1097 qat_etr_bank_write_4(sc, qb->qb_bank, ETR_INT_COL_CTL,
1098 ETR_INT_COL_CTL_ENABLE | qb->qb_coalescing_time);
1099
1100 mtx_unlock(&qb->qb_bank_mtx);
1101
1102 while ((i = ffs(estat)) != 0) {
1103 struct qat_ring *qr = &qb->qb_et_rings[--i];
1104 estat &= ~(1 << i);
1105 (void)qat_etr_ring_intr(sc, qb, qr);
1106 }
1107 }
1108
1109 void
1110 qat_arb_update(struct qat_softc *sc, struct qat_bank *qb)
1111 {
1112
1113 qat_arb_ringsrvarben_write_4(sc, qb->qb_bank,
1114 qb->qb_allocated_rings & 0xff);
1115 }
1116
1117 static struct qat_sym_cookie *
1118 qat_crypto_alloc_sym_cookie(struct qat_crypto_bank *qcb)
1119 {
1120 struct qat_sym_cookie *qsc;
1121
1122 mtx_lock(&qcb->qcb_bank_mtx);
1123
1124 if (qcb->qcb_symck_free_count == 0) {
1125 mtx_unlock(&qcb->qcb_bank_mtx);
1126 return NULL;
1127 }
1128
1129 qsc = qcb->qcb_symck_free[--qcb->qcb_symck_free_count];
1130
1131 mtx_unlock(&qcb->qcb_bank_mtx);
1132
1133 return qsc;
1134 }
1135
1136 static void
1137 qat_crypto_free_sym_cookie(struct qat_crypto_bank *qcb,
1138 struct qat_sym_cookie *qsc)
1139 {
1140 explicit_bzero(qsc->qsc_iv_buf, EALG_MAX_BLOCK_LEN);
1141 explicit_bzero(qsc->qsc_auth_res, QAT_SYM_HASH_BUFFER_LEN);
1142
1143 mtx_lock(&qcb->qcb_bank_mtx);
1144 qcb->qcb_symck_free[qcb->qcb_symck_free_count++] = qsc;
1145 mtx_unlock(&qcb->qcb_bank_mtx);
1146 }
1147
1148 void
1149 qat_memcpy_htobe64(void *dst, const void *src, size_t len)
1150 {
1151 uint64_t *dst0 = dst;
1152 const uint64_t *src0 = src;
1153 size_t i;
1154
1155 MPASS(len % sizeof(*dst0) == 0);
1156
1157 for (i = 0; i < len / sizeof(*dst0); i++)
1158 *(dst0 + i) = htobe64(*(src0 + i));
1159 }
1160
1161 void
1162 qat_memcpy_htobe32(void *dst, const void *src, size_t len)
1163 {
1164 uint32_t *dst0 = dst;
1165 const uint32_t *src0 = src;
1166 size_t i;
1167
1168 MPASS(len % sizeof(*dst0) == 0);
1169
1170 for (i = 0; i < len / sizeof(*dst0); i++)
1171 *(dst0 + i) = htobe32(*(src0 + i));
1172 }
1173
1174 void
1175 qat_memcpy_htobe(void *dst, const void *src, size_t len, uint32_t wordbyte)
1176 {
1177 switch (wordbyte) {
1178 case 4:
1179 qat_memcpy_htobe32(dst, src, len);
1180 break;
1181 case 8:
1182 qat_memcpy_htobe64(dst, src, len);
1183 break;
1184 default:
1185 panic("invalid word size %u", wordbyte);
1186 }
1187 }
1188
1189 void
1190 qat_crypto_gmac_precompute(const struct qat_crypto_desc *desc,
1191 const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def,
1192 uint8_t *state)
1193 {
1194 uint32_t ks[4 * (RIJNDAEL_MAXNR + 1)];
1195 char zeros[AES_BLOCK_LEN];
1196 int rounds;
1197
1198 memset(zeros, 0, sizeof(zeros));
1199 rounds = rijndaelKeySetupEnc(ks, key, klen * NBBY);
1200 rijndaelEncrypt(ks, rounds, zeros, state);
1201 explicit_bzero(ks, sizeof(ks));
1202 }
1203
1204 void
1205 qat_crypto_hmac_precompute(const struct qat_crypto_desc *desc,
1206 const uint8_t *key, int klen, const struct qat_sym_hash_def *hash_def,
1207 uint8_t *state1, uint8_t *state2)
1208 {
1209 union authctx ctx;
1210 const struct auth_hash *sah = hash_def->qshd_alg->qshai_sah;
1211 uint32_t state_offset = hash_def->qshd_alg->qshai_state_offset;
1212 uint32_t state_size = hash_def->qshd_alg->qshai_state_size;
1213 uint32_t state_word = hash_def->qshd_alg->qshai_state_word;
1214
1215 hmac_init_ipad(sah, key, klen, &ctx);
1216 qat_memcpy_htobe(state1, (uint8_t *)&ctx + state_offset, state_size,
1217 state_word);
1218 hmac_init_opad(sah, key, klen, &ctx);
1219 qat_memcpy_htobe(state2, (uint8_t *)&ctx + state_offset, state_size,
1220 state_word);
1221 explicit_bzero(&ctx, sizeof(ctx));
1222 }
1223
1224 static enum hw_cipher_algo
1225 qat_aes_cipher_algo(int klen)
1226 {
1227 switch (klen) {
1228 case HW_AES_128_KEY_SZ:
1229 return HW_CIPHER_ALGO_AES128;
1230 case HW_AES_192_KEY_SZ:
1231 return HW_CIPHER_ALGO_AES192;
1232 case HW_AES_256_KEY_SZ:
1233 return HW_CIPHER_ALGO_AES256;
1234 default:
1235 panic("invalid key length %d", klen);
1236 }
1237 }
1238
1239 uint16_t
1240 qat_crypto_load_cipher_session(const struct qat_crypto_desc *desc,
1241 const struct qat_session *qs)
1242 {
1243 enum hw_cipher_algo algo;
1244 enum hw_cipher_dir dir;
1245 enum hw_cipher_convert key_convert;
1246 enum hw_cipher_mode mode;
1247
1248 dir = desc->qcd_cipher_dir;
1249 key_convert = HW_CIPHER_NO_CONVERT;
1250 mode = qs->qs_cipher_mode;
1251 switch (mode) {
1252 case HW_CIPHER_CBC_MODE:
1253 case HW_CIPHER_XTS_MODE:
1254 algo = qs->qs_cipher_algo;
1255
1256 /*
1257 * AES decrypt key needs to be reversed.
1258 * Instead of reversing the key at session registration,
1259 * it is instead reversed on-the-fly by setting the KEY_CONVERT
1260 * bit here.
1261 */
1262 if (desc->qcd_cipher_dir == HW_CIPHER_DECRYPT)
1263 key_convert = HW_CIPHER_KEY_CONVERT;
1264 break;
1265 case HW_CIPHER_CTR_MODE:
1266 algo = qs->qs_cipher_algo;
1267 dir = HW_CIPHER_ENCRYPT;
1268 break;
1269 default:
1270 panic("unhandled cipher mode %d", mode);
1271 break;
1272 }
1273
1274 return HW_CIPHER_CONFIG_BUILD(mode, algo, key_convert, dir);
1275 }
1276
1277 uint16_t
1278 qat_crypto_load_auth_session(const struct qat_crypto_desc *desc,
1279 const struct qat_session *qs, const struct qat_sym_hash_def **hash_def)
1280 {
1281 enum qat_sym_hash_algorithm algo;
1282
1283 switch (qs->qs_auth_algo) {
1284 case HW_AUTH_ALGO_SHA1:
1285 algo = QAT_SYM_HASH_SHA1;
1286 break;
1287 case HW_AUTH_ALGO_SHA256:
1288 algo = QAT_SYM_HASH_SHA256;
1289 break;
1290 case HW_AUTH_ALGO_SHA384:
1291 algo = QAT_SYM_HASH_SHA384;
1292 break;
1293 case HW_AUTH_ALGO_SHA512:
1294 algo = QAT_SYM_HASH_SHA512;
1295 break;
1296 case HW_AUTH_ALGO_GALOIS_128:
1297 algo = QAT_SYM_HASH_AES_GCM;
1298 break;
1299 default:
1300 panic("unhandled auth algorithm %d", qs->qs_auth_algo);
1301 break;
1302 }
1303 *hash_def = &qat_sym_hash_defs[algo];
1304
1305 return HW_AUTH_CONFIG_BUILD(qs->qs_auth_mode,
1306 (*hash_def)->qshd_qat->qshqi_algo_enc,
1307 (*hash_def)->qshd_alg->qshai_digest_len);
1308 }
1309
1310 struct qat_crypto_load_cb_arg {
1311 struct qat_session *qs;
1312 struct qat_sym_cookie *qsc;
1313 struct cryptop *crp;
1314 int error;
1315 };
1316
1317 static int
1318 qat_crypto_populate_buf_list(struct buffer_list_desc *buffers,
1319 bus_dma_segment_t *segs, int niseg, int noseg, int skip)
1320 {
1321 struct flat_buffer_desc *flatbuf;
1322 bus_addr_t addr;
1323 bus_size_t len;
1324 int iseg, oseg;
1325
1326 for (iseg = 0, oseg = noseg; iseg < niseg && oseg < QAT_MAXSEG;
1327 iseg++) {
1328 addr = segs[iseg].ds_addr;
1329 len = segs[iseg].ds_len;
1330
1331 if (skip > 0) {
1332 if (skip < len) {
1333 addr += skip;
1334 len -= skip;
1335 skip = 0;
1336 } else {
1337 skip -= len;
1338 continue;
1339 }
1340 }
1341
1342 flatbuf = &buffers->flat_bufs[oseg++];
1343 flatbuf->data_len_in_bytes = (uint32_t)len;
1344 flatbuf->phy_buffer = (uint64_t)addr;
1345 }
1346 buffers->num_buffers = oseg;
1347 return iseg < niseg ? E2BIG : 0;
1348 }
1349
1350 static void
1351 qat_crypto_load_aadbuf_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
1352 int error)
1353 {
1354 struct qat_crypto_load_cb_arg *arg;
1355 struct qat_sym_cookie *qsc;
1356
1357 arg = _arg;
1358 if (error != 0) {
1359 arg->error = error;
1360 return;
1361 }
1362
1363 qsc = arg->qsc;
1364 arg->error = qat_crypto_populate_buf_list(&qsc->qsc_buf_list, segs,
1365 nseg, 0, 0);
1366 }
1367
1368 static void
1369 qat_crypto_load_buf_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
1370 int error)
1371 {
1372 struct cryptop *crp;
1373 struct qat_crypto_load_cb_arg *arg;
1374 struct qat_session *qs;
1375 struct qat_sym_cookie *qsc;
1376 int noseg, skip;
1377
1378 arg = _arg;
1379 if (error != 0) {
1380 arg->error = error;
1381 return;
1382 }
1383
1384 crp = arg->crp;
1385 qs = arg->qs;
1386 qsc = arg->qsc;
1387
1388 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
1389 /* AAD was handled in qat_crypto_load(). */
1390 skip = crp->crp_payload_start;
1391 noseg = 0;
1392 } else if (crp->crp_aad == NULL && crp->crp_aad_length > 0) {
1393 skip = crp->crp_aad_start;
1394 noseg = 0;
1395 } else {
1396 skip = crp->crp_payload_start;
1397 noseg = crp->crp_aad == NULL ?
1398 0 : qsc->qsc_buf_list.num_buffers;
1399 }
1400 arg->error = qat_crypto_populate_buf_list(&qsc->qsc_buf_list, segs,
1401 nseg, noseg, skip);
1402 }
1403
1404 static void
1405 qat_crypto_load_obuf_cb(void *_arg, bus_dma_segment_t *segs, int nseg,
1406 int error)
1407 {
1408 struct buffer_list_desc *ibufs, *obufs;
1409 struct flat_buffer_desc *ibuf, *obuf;
1410 struct cryptop *crp;
1411 struct qat_crypto_load_cb_arg *arg;
1412 struct qat_session *qs;
1413 struct qat_sym_cookie *qsc;
1414 int buflen, osegs, tocopy;
1415
1416 arg = _arg;
1417 if (error != 0) {
1418 arg->error = error;
1419 return;
1420 }
1421
1422 crp = arg->crp;
1423 qs = arg->qs;
1424 qsc = arg->qsc;
1425
1426 /*
1427 * The payload must start at the same offset in the output SG list as in
1428 * the input SG list. Copy over SG entries from the input corresponding
1429 * to the AAD buffer.
1430 */
1431 osegs = 0;
1432 if (qs->qs_auth_algo != HW_AUTH_ALGO_GALOIS_128 &&
1433 crp->crp_aad_length > 0) {
1434 tocopy = crp->crp_aad == NULL ?
1435 crp->crp_payload_start - crp->crp_aad_start :
1436 crp->crp_aad_length;
1437
1438 ibufs = &qsc->qsc_buf_list;
1439 obufs = &qsc->qsc_obuf_list;
1440 for (; osegs < ibufs->num_buffers && tocopy > 0; osegs++) {
1441 ibuf = &ibufs->flat_bufs[osegs];
1442 obuf = &obufs->flat_bufs[osegs];
1443
1444 obuf->phy_buffer = ibuf->phy_buffer;
1445 buflen = imin(ibuf->data_len_in_bytes, tocopy);
1446 obuf->data_len_in_bytes = buflen;
1447 tocopy -= buflen;
1448 }
1449 }
1450
1451 arg->error = qat_crypto_populate_buf_list(&qsc->qsc_obuf_list, segs,
1452 nseg, osegs, crp->crp_payload_output_start);
1453 }
1454
1455 static int
1456 qat_crypto_load(struct qat_session *qs, struct qat_sym_cookie *qsc,
1457 struct qat_crypto_desc const *desc, struct cryptop *crp)
1458 {
1459 struct qat_crypto_load_cb_arg arg;
1460 int error;
1461
1462 crypto_read_iv(crp, qsc->qsc_iv_buf);
1463
1464 arg.crp = crp;
1465 arg.qs = qs;
1466 arg.qsc = qsc;
1467 arg.error = 0;
1468
1469 error = 0;
1470 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128 &&
1471 crp->crp_aad_length > 0) {
1472 /*
1473 * The firmware expects AAD to be in a contiguous buffer and
1474 * padded to a multiple of 16 bytes. To satisfy these
1475 * constraints we bounce the AAD into a per-request buffer.
1476 * There is a small limit on the AAD size so this is not too
1477 * onerous.
1478 */
1479 memset(qsc->qsc_gcm_aad, 0, QAT_GCM_AAD_SIZE_MAX);
1480 if (crp->crp_aad == NULL) {
1481 crypto_copydata(crp, crp->crp_aad_start,
1482 crp->crp_aad_length, qsc->qsc_gcm_aad);
1483 } else {
1484 memcpy(qsc->qsc_gcm_aad, crp->crp_aad,
1485 crp->crp_aad_length);
1486 }
1487 } else if (crp->crp_aad != NULL) {
1488 error = bus_dmamap_load(
1489 qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dma_tag,
1490 qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dmamap,
1491 crp->crp_aad, crp->crp_aad_length,
1492 qat_crypto_load_aadbuf_cb, &arg, BUS_DMA_NOWAIT);
1493 if (error == 0)
1494 error = arg.error;
1495 }
1496 if (error == 0) {
1497 error = bus_dmamap_load_crp_buffer(
1498 qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dma_tag,
1499 qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dmamap,
1500 &crp->crp_buf, qat_crypto_load_buf_cb, &arg,
1501 BUS_DMA_NOWAIT);
1502 if (error == 0)
1503 error = arg.error;
1504 }
1505 if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
1506 error = bus_dmamap_load_crp_buffer(
1507 qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dma_tag,
1508 qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dmamap,
1509 &crp->crp_obuf, qat_crypto_load_obuf_cb, &arg,
1510 BUS_DMA_NOWAIT);
1511 if (error == 0)
1512 error = arg.error;
1513 }
1514 return error;
1515 }
1516
1517 static inline struct qat_crypto_bank *
1518 qat_crypto_select_bank(struct qat_crypto *qcy)
1519 {
1520 u_int cpuid = PCPU_GET(cpuid);
1521
1522 return &qcy->qcy_banks[cpuid % qcy->qcy_num_banks];
1523 }
1524
1525 static int
1526 qat_crypto_setup_ring(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1527 {
1528 char *name;
1529 int bank, curname, error, i, j;
1530
1531 bank = qcb->qcb_bank;
1532 curname = 0;
1533
1534 name = qcb->qcb_ring_names[curname++];
1535 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_tx", bank);
1536 error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1537 sc->sc_hw.qhw_ring_sym_tx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_req_size,
1538 NULL, NULL, name, &qcb->qcb_sym_tx);
1539 if (error)
1540 return error;
1541
1542 name = qcb->qcb_ring_names[curname++];
1543 snprintf(name, QAT_RING_NAME_SIZE, "bank%d sym_rx", bank);
1544 error = qat_etr_setup_ring(sc, qcb->qcb_bank,
1545 sc->sc_hw.qhw_ring_sym_rx, QAT_NSYMREQ, sc->sc_hw.qhw_fw_resp_size,
1546 qat_crypto_sym_rxintr, qcb, name, &qcb->qcb_sym_rx);
1547 if (error)
1548 return error;
1549
1550 for (i = 0; i < QAT_NSYMCOOKIE; i++) {
1551 struct qat_dmamem *qdm = &qcb->qcb_symck_dmamems[i];
1552 struct qat_sym_cookie *qsc;
1553
1554 error = qat_alloc_dmamem(sc, qdm, 1,
1555 sizeof(struct qat_sym_cookie), QAT_OPTIMAL_ALIGN);
1556 if (error)
1557 return error;
1558
1559 qsc = qdm->qdm_dma_vaddr;
1560 qsc->qsc_self_dmamap = qdm->qdm_dma_map;
1561 qsc->qsc_self_dma_tag = qdm->qdm_dma_tag;
1562 qsc->qsc_bulk_req_params_buf_paddr =
1563 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1564 qsc_bulk_cookie.qsbc_req_params_buf);
1565 qsc->qsc_buffer_list_desc_paddr =
1566 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1567 qsc_buf_list);
1568 qsc->qsc_obuffer_list_desc_paddr =
1569 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1570 qsc_obuf_list);
1571 qsc->qsc_obuffer_list_desc_paddr =
1572 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1573 qsc_obuf_list);
1574 qsc->qsc_iv_buf_paddr =
1575 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1576 qsc_iv_buf);
1577 qsc->qsc_auth_res_paddr =
1578 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1579 qsc_auth_res);
1580 qsc->qsc_gcm_aad_paddr =
1581 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1582 qsc_gcm_aad);
1583 qsc->qsc_content_desc_paddr =
1584 qdm->qdm_dma_seg.ds_addr + offsetof(struct qat_sym_cookie,
1585 qsc_content_desc);
1586 qcb->qcb_symck_free[i] = qsc;
1587 qcb->qcb_symck_free_count++;
1588
1589 for (j = 0; j < QAT_SYM_DMA_COUNT; j++) {
1590 error = bus_dma_tag_create(bus_get_dma_tag(sc->sc_dev),
1591 1, 0, /* alignment, boundary */
1592 BUS_SPACE_MAXADDR, /* lowaddr */
1593 BUS_SPACE_MAXADDR, /* highaddr */
1594 NULL, NULL, /* filter, filterarg */
1595 QAT_MAXLEN, /* maxsize */
1596 QAT_MAXSEG, /* nsegments */
1597 QAT_MAXLEN, /* maxsegsize */
1598 BUS_DMA_COHERENT, /* flags */
1599 NULL, NULL, /* lockfunc, lockarg */
1600 &qsc->qsc_dma[j].qsd_dma_tag);
1601 if (error != 0)
1602 return error;
1603 error = bus_dmamap_create(qsc->qsc_dma[j].qsd_dma_tag,
1604 BUS_DMA_COHERENT, &qsc->qsc_dma[j].qsd_dmamap);
1605 if (error != 0)
1606 return error;
1607 }
1608 }
1609
1610 return 0;
1611 }
1612
1613 static int
1614 qat_crypto_bank_init(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1615 {
1616 mtx_init(&qcb->qcb_bank_mtx, "qcb bank", NULL, MTX_DEF);
1617
1618 return qat_crypto_setup_ring(sc, qcb);
1619 }
1620
1621 static void
1622 qat_crypto_bank_deinit(struct qat_softc *sc, struct qat_crypto_bank *qcb)
1623 {
1624 struct qat_dmamem *qdm;
1625 struct qat_sym_cookie *qsc;
1626 int i, j;
1627
1628 for (i = 0; i < QAT_NSYMCOOKIE; i++) {
1629 qdm = &qcb->qcb_symck_dmamems[i];
1630 qsc = qcb->qcb_symck_free[i];
1631 for (j = 0; j < QAT_SYM_DMA_COUNT; j++) {
1632 bus_dmamap_destroy(qsc->qsc_dma[j].qsd_dma_tag,
1633 qsc->qsc_dma[j].qsd_dmamap);
1634 bus_dma_tag_destroy(qsc->qsc_dma[j].qsd_dma_tag);
1635 }
1636 qat_free_dmamem(sc, qdm);
1637 }
1638 qat_free_dmamem(sc, &qcb->qcb_sym_tx->qr_dma);
1639 qat_free_dmamem(sc, &qcb->qcb_sym_rx->qr_dma);
1640
1641 mtx_destroy(&qcb->qcb_bank_mtx);
1642 }
1643
1644 static int
1645 qat_crypto_init(struct qat_softc *sc)
1646 {
1647 struct qat_crypto *qcy = &sc->sc_crypto;
1648 struct sysctl_ctx_list *ctx;
1649 struct sysctl_oid *oid;
1650 struct sysctl_oid_list *children;
1651 int bank, error, num_banks;
1652
1653 qcy->qcy_sc = sc;
1654
1655 if (sc->sc_hw.qhw_init_arb != NULL)
1656 num_banks = imin(mp_ncpus, sc->sc_hw.qhw_num_banks);
1657 else
1658 num_banks = sc->sc_ae_num;
1659
1660 qcy->qcy_num_banks = num_banks;
1661
1662 qcy->qcy_banks =
1663 qat_alloc_mem(sizeof(struct qat_crypto_bank) * num_banks);
1664
1665 for (bank = 0; bank < num_banks; bank++) {
1666 struct qat_crypto_bank *qcb = &qcy->qcy_banks[bank];
1667 qcb->qcb_bank = bank;
1668 error = qat_crypto_bank_init(sc, qcb);
1669 if (error)
1670 return error;
1671 }
1672
1673 mtx_init(&qcy->qcy_crypto_mtx, "qcy crypto", NULL, MTX_DEF);
1674
1675 ctx = device_get_sysctl_ctx(sc->sc_dev);
1676 oid = device_get_sysctl_tree(sc->sc_dev);
1677 children = SYSCTL_CHILDREN(oid);
1678 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1679 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1680 children = SYSCTL_CHILDREN(oid);
1681
1682 sc->sc_gcm_aad_restarts = counter_u64_alloc(M_WAITOK);
1683 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_restarts",
1684 CTLFLAG_RD, &sc->sc_gcm_aad_restarts,
1685 "GCM requests deferred due to AAD size change");
1686 sc->sc_gcm_aad_updates = counter_u64_alloc(M_WAITOK);
1687 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_aad_updates",
1688 CTLFLAG_RD, &sc->sc_gcm_aad_updates,
1689 "GCM requests that required session state update");
1690 sc->sc_ring_full_restarts = counter_u64_alloc(M_WAITOK);
1691 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ring_full",
1692 CTLFLAG_RD, &sc->sc_ring_full_restarts,
1693 "Requests deferred due to in-flight max reached");
1694 sc->sc_sym_alloc_failures = counter_u64_alloc(M_WAITOK);
1695 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sym_alloc_failures",
1696 CTLFLAG_RD, &sc->sc_sym_alloc_failures,
1697 "Request allocation failures");
1698
1699 return 0;
1700 }
1701
1702 static void
1703 qat_crypto_deinit(struct qat_softc *sc)
1704 {
1705 struct qat_crypto *qcy = &sc->sc_crypto;
1706 struct qat_crypto_bank *qcb;
1707 int bank;
1708
1709 counter_u64_free(sc->sc_sym_alloc_failures);
1710 counter_u64_free(sc->sc_ring_full_restarts);
1711 counter_u64_free(sc->sc_gcm_aad_updates);
1712 counter_u64_free(sc->sc_gcm_aad_restarts);
1713
1714 if (qcy->qcy_banks != NULL) {
1715 for (bank = 0; bank < qcy->qcy_num_banks; bank++) {
1716 qcb = &qcy->qcy_banks[bank];
1717 qat_crypto_bank_deinit(sc, qcb);
1718 }
1719 qat_free_mem(qcy->qcy_banks);
1720 mtx_destroy(&qcy->qcy_crypto_mtx);
1721 }
1722 }
1723
1724 static int
1725 qat_crypto_start(struct qat_softc *sc)
1726 {
1727 struct qat_crypto *qcy;
1728
1729 qcy = &sc->sc_crypto;
1730 qcy->qcy_cid = crypto_get_driverid(sc->sc_dev,
1731 sizeof(struct qat_session), CRYPTOCAP_F_HARDWARE);
1732 if (qcy->qcy_cid < 0) {
1733 device_printf(sc->sc_dev,
1734 "could not get opencrypto driver id\n");
1735 return ENOENT;
1736 }
1737
1738 return 0;
1739 }
1740
1741 static void
1742 qat_crypto_stop(struct qat_softc *sc)
1743 {
1744 struct qat_crypto *qcy;
1745
1746 qcy = &sc->sc_crypto;
1747 if (qcy->qcy_cid >= 0)
1748 (void)crypto_unregister_all(qcy->qcy_cid);
1749 }
1750
1751 static void
1752 qat_crypto_sym_dma_unload(struct qat_sym_cookie *qsc, enum qat_sym_dma i)
1753 {
1754 bus_dmamap_sync(qsc->qsc_dma[i].qsd_dma_tag, qsc->qsc_dma[i].qsd_dmamap,
1755 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1756 bus_dmamap_unload(qsc->qsc_dma[i].qsd_dma_tag,
1757 qsc->qsc_dma[i].qsd_dmamap);
1758 }
1759
1760 static int
1761 qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg, void *msg)
1762 {
1763 char icv[QAT_SYM_HASH_BUFFER_LEN];
1764 struct qat_crypto_bank *qcb = arg;
1765 struct qat_crypto *qcy;
1766 struct qat_session *qs;
1767 struct qat_sym_cookie *qsc;
1768 struct qat_sym_bulk_cookie *qsbc;
1769 struct cryptop *crp;
1770 int error;
1771 uint16_t auth_sz;
1772 bool blocked;
1773
1774 qsc = *(void **)((uintptr_t)msg + sc->sc_hw.qhw_crypto_opaque_offset);
1775
1776 qsbc = &qsc->qsc_bulk_cookie;
1777 qcy = qsbc->qsbc_crypto;
1778 qs = qsbc->qsbc_session;
1779 crp = qsbc->qsbc_cb_tag;
1780
1781 bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap,
1782 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1783
1784 if (crp->crp_aad != NULL)
1785 qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_AADBUF);
1786 qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_BUF);
1787 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1788 qat_crypto_sym_dma_unload(qsc, QAT_SYM_DMA_OBUF);
1789
1790 error = 0;
1791 if ((auth_sz = qs->qs_auth_mlen) != 0) {
1792 if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
1793 crypto_copydata(crp, crp->crp_digest_start,
1794 auth_sz, icv);
1795 if (timingsafe_bcmp(icv, qsc->qsc_auth_res,
1796 auth_sz) != 0) {
1797 error = EBADMSG;
1798 }
1799 } else {
1800 crypto_copyback(crp, crp->crp_digest_start,
1801 auth_sz, qsc->qsc_auth_res);
1802 }
1803 }
1804
1805 qat_crypto_free_sym_cookie(qcb, qsc);
1806
1807 blocked = false;
1808 mtx_lock(&qs->qs_session_mtx);
1809 MPASS(qs->qs_status & QAT_SESSION_STATUS_ACTIVE);
1810 qs->qs_inflight--;
1811 if (__predict_false(qs->qs_need_wakeup && qs->qs_inflight == 0)) {
1812 blocked = true;
1813 qs->qs_need_wakeup = false;
1814 }
1815 mtx_unlock(&qs->qs_session_mtx);
1816
1817 crp->crp_etype = error;
1818 crypto_done(crp);
1819
1820 if (blocked)
1821 crypto_unblock(qcy->qcy_cid, CRYPTO_SYMQ);
1822
1823 return 1;
1824 }
1825
1826 static int
1827 qat_probesession(device_t dev, const struct crypto_session_params *csp)
1828 {
1829 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) !=
1830 0)
1831 return EINVAL;
1832
1833 if (csp->csp_cipher_alg == CRYPTO_AES_XTS &&
1834 qat_lookup(dev)->qatp_chip == QAT_CHIP_C2XXX) {
1835 /*
1836 * AES-XTS is not supported by the NanoQAT.
1837 */
1838 return EINVAL;
1839 }
1840
1841 switch (csp->csp_mode) {
1842 case CSP_MODE_CIPHER:
1843 switch (csp->csp_cipher_alg) {
1844 case CRYPTO_AES_CBC:
1845 case CRYPTO_AES_ICM:
1846 if (csp->csp_ivlen != AES_BLOCK_LEN)
1847 return EINVAL;
1848 break;
1849 case CRYPTO_AES_XTS:
1850 if (csp->csp_ivlen != AES_XTS_IV_LEN)
1851 return EINVAL;
1852 break;
1853 default:
1854 return EINVAL;
1855 }
1856 break;
1857 case CSP_MODE_DIGEST:
1858 switch (csp->csp_auth_alg) {
1859 case CRYPTO_SHA1:
1860 case CRYPTO_SHA1_HMAC:
1861 case CRYPTO_SHA2_256:
1862 case CRYPTO_SHA2_256_HMAC:
1863 case CRYPTO_SHA2_384:
1864 case CRYPTO_SHA2_384_HMAC:
1865 case CRYPTO_SHA2_512:
1866 case CRYPTO_SHA2_512_HMAC:
1867 break;
1868 case CRYPTO_AES_NIST_GMAC:
1869 if (csp->csp_ivlen != AES_GCM_IV_LEN)
1870 return EINVAL;
1871 break;
1872 default:
1873 return EINVAL;
1874 }
1875 break;
1876 case CSP_MODE_AEAD:
1877 switch (csp->csp_cipher_alg) {
1878 case CRYPTO_AES_NIST_GCM_16:
1879 break;
1880 default:
1881 return EINVAL;
1882 }
1883 break;
1884 case CSP_MODE_ETA:
1885 switch (csp->csp_auth_alg) {
1886 case CRYPTO_SHA1_HMAC:
1887 case CRYPTO_SHA2_256_HMAC:
1888 case CRYPTO_SHA2_384_HMAC:
1889 case CRYPTO_SHA2_512_HMAC:
1890 switch (csp->csp_cipher_alg) {
1891 case CRYPTO_AES_CBC:
1892 case CRYPTO_AES_ICM:
1893 if (csp->csp_ivlen != AES_BLOCK_LEN)
1894 return EINVAL;
1895 break;
1896 case CRYPTO_AES_XTS:
1897 if (csp->csp_ivlen != AES_XTS_IV_LEN)
1898 return EINVAL;
1899 break;
1900 default:
1901 return EINVAL;
1902 }
1903 break;
1904 default:
1905 return EINVAL;
1906 }
1907 break;
1908 default:
1909 return EINVAL;
1910 }
1911
1912 return CRYPTODEV_PROBE_HARDWARE;
1913 }
1914
1915 static int
1916 qat_newsession(device_t dev, crypto_session_t cses,
1917 const struct crypto_session_params *csp)
1918 {
1919 struct qat_crypto *qcy;
1920 struct qat_dmamem *qdm;
1921 struct qat_session *qs;
1922 struct qat_softc *sc;
1923 struct qat_crypto_desc *ddesc, *edesc;
1924 int error, slices;
1925
1926 sc = device_get_softc(dev);
1927 qs = crypto_get_driver_session(cses);
1928 qcy = &sc->sc_crypto;
1929
1930 qdm = &qs->qs_desc_mem;
1931 error = qat_alloc_dmamem(sc, qdm, QAT_MAXSEG,
1932 sizeof(struct qat_crypto_desc) * 2, QAT_OPTIMAL_ALIGN);
1933 if (error != 0)
1934 return error;
1935
1936 mtx_init(&qs->qs_session_mtx, "qs session", NULL, MTX_DEF);
1937 qs->qs_aad_length = -1;
1938
1939 qs->qs_dec_desc = ddesc = qdm->qdm_dma_vaddr;
1940 qs->qs_enc_desc = edesc = ddesc + 1;
1941
1942 ddesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr;
1943 ddesc->qcd_hash_state_paddr = ddesc->qcd_desc_paddr +
1944 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1945 edesc->qcd_desc_paddr = qdm->qdm_dma_seg.ds_addr +
1946 sizeof(struct qat_crypto_desc);
1947 edesc->qcd_hash_state_paddr = edesc->qcd_desc_paddr +
1948 offsetof(struct qat_crypto_desc, qcd_hash_state_prefix_buf);
1949
1950 qs->qs_status = QAT_SESSION_STATUS_ACTIVE;
1951 qs->qs_inflight = 0;
1952
1953 qs->qs_cipher_key = csp->csp_cipher_key;
1954 qs->qs_cipher_klen = csp->csp_cipher_klen;
1955 qs->qs_auth_key = csp->csp_auth_key;
1956 qs->qs_auth_klen = csp->csp_auth_klen;
1957
1958 switch (csp->csp_cipher_alg) {
1959 case CRYPTO_AES_CBC:
1960 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
1961 qs->qs_cipher_mode = HW_CIPHER_CBC_MODE;
1962 break;
1963 case CRYPTO_AES_ICM:
1964 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
1965 qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
1966 break;
1967 case CRYPTO_AES_XTS:
1968 qs->qs_cipher_algo =
1969 qat_aes_cipher_algo(csp->csp_cipher_klen / 2);
1970 qs->qs_cipher_mode = HW_CIPHER_XTS_MODE;
1971 break;
1972 case CRYPTO_AES_NIST_GCM_16:
1973 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
1974 qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
1975 qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
1976 qs->qs_auth_mode = HW_AUTH_MODE1;
1977 break;
1978 case 0:
1979 break;
1980 default:
1981 panic("%s: unhandled cipher algorithm %d", __func__,
1982 csp->csp_cipher_alg);
1983 }
1984
1985 switch (csp->csp_auth_alg) {
1986 case CRYPTO_SHA1_HMAC:
1987 qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
1988 qs->qs_auth_mode = HW_AUTH_MODE1;
1989 break;
1990 case CRYPTO_SHA1:
1991 qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
1992 qs->qs_auth_mode = HW_AUTH_MODE0;
1993 break;
1994 case CRYPTO_SHA2_256_HMAC:
1995 qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
1996 qs->qs_auth_mode = HW_AUTH_MODE1;
1997 break;
1998 case CRYPTO_SHA2_256:
1999 qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
2000 qs->qs_auth_mode = HW_AUTH_MODE0;
2001 break;
2002 case CRYPTO_SHA2_384_HMAC:
2003 qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
2004 qs->qs_auth_mode = HW_AUTH_MODE1;
2005 break;
2006 case CRYPTO_SHA2_384:
2007 qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
2008 qs->qs_auth_mode = HW_AUTH_MODE0;
2009 break;
2010 case CRYPTO_SHA2_512_HMAC:
2011 qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
2012 qs->qs_auth_mode = HW_AUTH_MODE1;
2013 break;
2014 case CRYPTO_SHA2_512:
2015 qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
2016 qs->qs_auth_mode = HW_AUTH_MODE0;
2017 break;
2018 case CRYPTO_AES_NIST_GMAC:
2019 qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_auth_klen);
2020 qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
2021 qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
2022 qs->qs_auth_mode = HW_AUTH_MODE1;
2023
2024 qs->qs_cipher_key = qs->qs_auth_key;
2025 qs->qs_cipher_klen = qs->qs_auth_klen;
2026 break;
2027 case 0:
2028 break;
2029 default:
2030 panic("%s: unhandled auth algorithm %d", __func__,
2031 csp->csp_auth_alg);
2032 }
2033
2034 slices = 0;
2035 switch (csp->csp_mode) {
2036 case CSP_MODE_AEAD:
2037 case CSP_MODE_ETA:
2038 /* auth then decrypt */
2039 ddesc->qcd_slices[0] = FW_SLICE_AUTH;
2040 ddesc->qcd_slices[1] = FW_SLICE_CIPHER;
2041 ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
2042 ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
2043 /* encrypt then auth */
2044 edesc->qcd_slices[0] = FW_SLICE_CIPHER;
2045 edesc->qcd_slices[1] = FW_SLICE_AUTH;
2046 edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
2047 edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
2048 slices = 2;
2049 break;
2050 case CSP_MODE_CIPHER:
2051 /* decrypt */
2052 ddesc->qcd_slices[0] = FW_SLICE_CIPHER;
2053 ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
2054 ddesc->qcd_cmd_id = FW_LA_CMD_CIPHER;
2055 /* encrypt */
2056 edesc->qcd_slices[0] = FW_SLICE_CIPHER;
2057 edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
2058 edesc->qcd_cmd_id = FW_LA_CMD_CIPHER;
2059 slices = 1;
2060 break;
2061 case CSP_MODE_DIGEST:
2062 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
2063 /* auth then decrypt */
2064 ddesc->qcd_slices[0] = FW_SLICE_AUTH;
2065 ddesc->qcd_slices[1] = FW_SLICE_CIPHER;
2066 ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
2067 ddesc->qcd_cmd_id = FW_LA_CMD_HASH_CIPHER;
2068 /* encrypt then auth */
2069 edesc->qcd_slices[0] = FW_SLICE_CIPHER;
2070 edesc->qcd_slices[1] = FW_SLICE_AUTH;
2071 edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
2072 edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
2073 slices = 2;
2074 } else {
2075 ddesc->qcd_slices[0] = FW_SLICE_AUTH;
2076 ddesc->qcd_cmd_id = FW_LA_CMD_AUTH;
2077 edesc->qcd_slices[0] = FW_SLICE_AUTH;
2078 edesc->qcd_cmd_id = FW_LA_CMD_AUTH;
2079 slices = 1;
2080 }
2081 break;
2082 default:
2083 panic("%s: unhandled crypto algorithm %d, %d", __func__,
2084 csp->csp_cipher_alg, csp->csp_auth_alg);
2085 }
2086 ddesc->qcd_slices[slices] = FW_SLICE_DRAM_WR;
2087 edesc->qcd_slices[slices] = FW_SLICE_DRAM_WR;
2088
2089 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, ddesc);
2090 qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, edesc);
2091
2092 if (csp->csp_auth_mlen != 0)
2093 qs->qs_auth_mlen = csp->csp_auth_mlen;
2094 else
2095 qs->qs_auth_mlen = edesc->qcd_auth_sz;
2096
2097 /* Compute the GMAC by specifying a null cipher payload. */
2098 if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC)
2099 ddesc->qcd_cmd_id = edesc->qcd_cmd_id = FW_LA_CMD_AUTH;
2100
2101 return 0;
2102 }
2103
2104 static void
2105 qat_crypto_clear_desc(struct qat_crypto_desc *desc)
2106 {
2107 explicit_bzero(desc->qcd_content_desc, sizeof(desc->qcd_content_desc));
2108 explicit_bzero(desc->qcd_hash_state_prefix_buf,
2109 sizeof(desc->qcd_hash_state_prefix_buf));
2110 explicit_bzero(desc->qcd_req_cache, sizeof(desc->qcd_req_cache));
2111 }
2112
2113 static void
2114 qat_freesession(device_t dev, crypto_session_t cses)
2115 {
2116 struct qat_session *qs;
2117
2118 qs = crypto_get_driver_session(cses);
2119 KASSERT(qs->qs_inflight == 0,
2120 ("%s: session %p has requests in flight", __func__, qs));
2121
2122 qat_crypto_clear_desc(qs->qs_enc_desc);
2123 qat_crypto_clear_desc(qs->qs_dec_desc);
2124 qat_free_dmamem(device_get_softc(dev), &qs->qs_desc_mem);
2125 mtx_destroy(&qs->qs_session_mtx);
2126 }
2127
2128 static int
2129 qat_process(device_t dev, struct cryptop *crp, int hint)
2130 {
2131 struct qat_crypto *qcy;
2132 struct qat_crypto_bank *qcb;
2133 struct qat_crypto_desc const *desc;
2134 struct qat_session *qs;
2135 struct qat_softc *sc;
2136 struct qat_sym_cookie *qsc;
2137 struct qat_sym_bulk_cookie *qsbc;
2138 int error;
2139
2140 sc = device_get_softc(dev);
2141 qcy = &sc->sc_crypto;
2142 qs = crypto_get_driver_session(crp->crp_session);
2143 qsc = NULL;
2144
2145 if (__predict_false(crypto_buffer_len(&crp->crp_buf) > QAT_MAXLEN)) {
2146 error = E2BIG;
2147 goto fail1;
2148 }
2149
2150 mtx_lock(&qs->qs_session_mtx);
2151 if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
2152 if (crp->crp_aad_length > QAT_GCM_AAD_SIZE_MAX) {
2153 error = E2BIG;
2154 mtx_unlock(&qs->qs_session_mtx);
2155 goto fail1;
2156 }
2157
2158 /*
2159 * The firmware interface for GCM annoyingly requires the AAD
2160 * size to be stored in the session's content descriptor, which
2161 * is not really meant to be updated after session
2162 * initialization. For IPSec the AAD size is fixed so this is
2163 * not much of a problem in practice, but we have to catch AAD
2164 * size updates here so that the device code can safely update
2165 * the session's recorded AAD size.
2166 */
2167 if (__predict_false(crp->crp_aad_length != qs->qs_aad_length)) {
2168 if (qs->qs_inflight == 0) {
2169 if (qs->qs_aad_length != -1) {
2170 counter_u64_add(sc->sc_gcm_aad_updates,
2171 1);
2172 }
2173 qs->qs_aad_length = crp->crp_aad_length;
2174 } else {
2175 qs->qs_need_wakeup = true;
2176 mtx_unlock(&qs->qs_session_mtx);
2177 counter_u64_add(sc->sc_gcm_aad_restarts, 1);
2178 error = ERESTART;
2179 goto fail1;
2180 }
2181 }
2182 }
2183 qs->qs_inflight++;
2184 mtx_unlock(&qs->qs_session_mtx);
2185
2186 qcb = qat_crypto_select_bank(qcy);
2187
2188 qsc = qat_crypto_alloc_sym_cookie(qcb);
2189 if (qsc == NULL) {
2190 counter_u64_add(sc->sc_sym_alloc_failures, 1);
2191 error = ENOBUFS;
2192 goto fail2;
2193 }
2194
2195 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2196 desc = qs->qs_enc_desc;
2197 else
2198 desc = qs->qs_dec_desc;
2199
2200 error = qat_crypto_load(qs, qsc, desc, crp);
2201 if (error != 0)
2202 goto fail2;
2203
2204 qsbc = &qsc->qsc_bulk_cookie;
2205 qsbc->qsbc_crypto = qcy;
2206 qsbc->qsbc_session = qs;
2207 qsbc->qsbc_cb_tag = crp;
2208
2209 sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, crp);
2210
2211 if (crp->crp_aad != NULL) {
2212 bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dma_tag,
2213 qsc->qsc_dma[QAT_SYM_DMA_AADBUF].qsd_dmamap,
2214 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2215 }
2216 bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dma_tag,
2217 qsc->qsc_dma[QAT_SYM_DMA_BUF].qsd_dmamap,
2218 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2219 if (CRYPTO_HAS_OUTPUT_BUFFER(crp)) {
2220 bus_dmamap_sync(qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dma_tag,
2221 qsc->qsc_dma[QAT_SYM_DMA_OBUF].qsd_dmamap,
2222 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2223 }
2224 bus_dmamap_sync(qsc->qsc_self_dma_tag, qsc->qsc_self_dmamap,
2225 BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
2226
2227 error = qat_etr_put_msg(sc, qcb->qcb_sym_tx,
2228 (uint32_t *)qsbc->qsbc_msg);
2229 if (error)
2230 goto fail2;
2231
2232 return 0;
2233
2234 fail2:
2235 if (qsc)
2236 qat_crypto_free_sym_cookie(qcb, qsc);
2237 mtx_lock(&qs->qs_session_mtx);
2238 qs->qs_inflight--;
2239 mtx_unlock(&qs->qs_session_mtx);
2240 fail1:
2241 crp->crp_etype = error;
2242 crypto_done(crp);
2243 return 0;
2244 }
2245
2246 static device_method_t qat_methods[] = {
2247 /* Device interface */
2248 DEVMETHOD(device_probe, qat_probe),
2249 DEVMETHOD(device_attach, qat_attach),
2250 DEVMETHOD(device_detach, qat_detach),
2251
2252 /* Cryptodev interface */
2253 DEVMETHOD(cryptodev_probesession, qat_probesession),
2254 DEVMETHOD(cryptodev_newsession, qat_newsession),
2255 DEVMETHOD(cryptodev_freesession, qat_freesession),
2256 DEVMETHOD(cryptodev_process, qat_process),
2257
2258 DEVMETHOD_END
2259 };
2260
2261 static driver_t qat_driver = {
2262 .name = "qat_c2xxx",
2263 .methods = qat_methods,
2264 .size = sizeof(struct qat_softc),
2265 };
2266
2267 DRIVER_MODULE(qat_c2xxx, pci, qat_driver, 0, 0);
2268 MODULE_VERSION(qat_c2xxx, 1);
2269 MODULE_DEPEND(qat_c2xxx, crypto, 1, 1, 1);
2270 MODULE_DEPEND(qat_c2xxx, firmware, 1, 1, 1);
2271 MODULE_DEPEND(qat_c2xxx, pci, 1, 1, 1);
Cache object: 6f11e4b310f9e87dbc4a6e1c0ac9532a
|