1 /*-
2 * Copyright (c) 2017 Chelsio Communications, Inc.
3 * Copyright (c) 2021 The FreeBSD Foundation
4 * All rights reserved.
5 * Written by: John Baldwin <jhb@FreeBSD.org>
6 *
7 * Portions of this software were developed by Ararat River
8 * Consulting, LLC under sponsorship of the FreeBSD Foundation.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34
35 #include <sys/types.h>
36 #include <sys/bus.h>
37 #include <sys/lock.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/module.h>
41 #include <sys/sglist.h>
42
43 #include <opencrypto/cryptodev.h>
44 #include <opencrypto/xform.h>
45
46 #include "cryptodev_if.h"
47
48 #include "common/common.h"
49 #include "crypto/t4_crypto.h"
50
51 /*
52 * Requests consist of:
53 *
54 * +-------------------------------+
55 * | struct fw_crypto_lookaside_wr |
56 * +-------------------------------+
57 * | struct ulp_txpkt |
58 * +-------------------------------+
59 * | struct ulptx_idata |
60 * +-------------------------------+
61 * | struct cpl_tx_sec_pdu |
62 * +-------------------------------+
63 * | struct cpl_tls_tx_scmd_fmt |
64 * +-------------------------------+
65 * | key context header |
66 * +-------------------------------+
67 * | AES key | ----- For requests with AES
68 * +-------------------------------+
69 * | Hash state | ----- For hash-only requests
70 * +-------------------------------+ -
71 * | IPAD (16-byte aligned) | \
72 * +-------------------------------+ +---- For requests with HMAC
73 * | OPAD (16-byte aligned) | /
74 * +-------------------------------+ -
75 * | GMAC H | ----- For AES-GCM
76 * +-------------------------------+ -
77 * | struct cpl_rx_phys_dsgl | \
78 * +-------------------------------+ +---- Destination buffer for
79 * | PHYS_DSGL entries | / non-hash-only requests
80 * +-------------------------------+ -
81 * | 16 dummy bytes | ----- Only for HMAC/hash-only requests
82 * +-------------------------------+
83 * | IV | ----- If immediate IV
84 * +-------------------------------+
85 * | Payload | ----- If immediate Payload
86 * +-------------------------------+ -
87 * | struct ulptx_sgl | \
88 * +-------------------------------+ +---- If payload via SGL
89 * | SGL entries | /
90 * +-------------------------------+ -
91 *
92 * Note that the key context must be padded to ensure 16-byte alignment.
93 * For HMAC requests, the key consists of the partial hash of the IPAD
94 * followed by the partial hash of the OPAD.
95 *
96 * Replies consist of:
97 *
98 * +-------------------------------+
99 * | struct cpl_fw6_pld |
100 * +-------------------------------+
101 * | hash digest | ----- For HMAC request with
102 * +-------------------------------+ 'hash_size' set in work request
103 *
104 * A 32-bit big-endian error status word is supplied in the last 4
105 * bytes of data[0] in the CPL_FW6_PLD message. bit 0 indicates a
106 * "MAC" error and bit 1 indicates a "PAD" error.
107 *
108 * The 64-bit 'cookie' field from the fw_crypto_lookaside_wr message
109 * in the request is returned in data[1] of the CPL_FW6_PLD message.
110 *
111 * For block cipher replies, the updated IV is supplied in data[2] and
112 * data[3] of the CPL_FW6_PLD message.
113 *
114 * For hash replies where the work request set 'hash_size' to request
115 * a copy of the hash in the reply, the hash digest is supplied
116 * immediately following the CPL_FW6_PLD message.
117 */
118
119 /*
120 * The crypto engine supports a maximum AAD size of 511 bytes.
121 */
122 #define MAX_AAD_LEN 511
123
124 /*
125 * The documentation for CPL_RX_PHYS_DSGL claims a maximum of 32 SG
126 * entries. While the CPL includes a 16-bit length field, the T6 can
127 * sometimes hang if an error occurs while processing a request with a
128 * single DSGL entry larger than 2k.
129 */
130 #define MAX_RX_PHYS_DSGL_SGE 32
131 #define DSGL_SGE_MAXLEN 2048
132
133 /*
134 * The adapter only supports requests with a total input or output
135 * length of 64k-1 or smaller. Longer requests either result in hung
136 * requests or incorrect results.
137 */
138 #define MAX_REQUEST_SIZE 65535
139
140 static MALLOC_DEFINE(M_CCR, "ccr", "Chelsio T6 crypto");
141
142 struct ccr_session_hmac {
143 const struct auth_hash *auth_hash;
144 int hash_len;
145 unsigned int partial_digest_len;
146 unsigned int auth_mode;
147 unsigned int mk_size;
148 char pads[CHCR_HASH_MAX_BLOCK_SIZE_128 * 2];
149 };
150
151 struct ccr_session_gmac {
152 int hash_len;
153 char ghash_h[GMAC_BLOCK_LEN];
154 };
155
156 struct ccr_session_ccm_mac {
157 int hash_len;
158 };
159
160 struct ccr_session_cipher {
161 unsigned int cipher_mode;
162 unsigned int key_len;
163 unsigned int iv_len;
164 __be32 key_ctx_hdr;
165 char enckey[CHCR_AES_MAX_KEY_LEN];
166 char deckey[CHCR_AES_MAX_KEY_LEN];
167 };
168
169 struct ccr_port {
170 struct sge_wrq *txq;
171 struct sge_rxq *rxq;
172 int rx_channel_id;
173 int tx_channel_id;
174 u_int active_sessions;
175
176 counter_u64_t stats_queued;
177 counter_u64_t stats_completed;
178 };
179
180 struct ccr_softc {
181 struct adapter *adapter;
182 device_t dev;
183 uint32_t cid;
184 struct mtx lock;
185 bool detaching;
186 struct ccr_port ports[MAX_NPORTS];
187 u_int port_mask;
188 int first_rxq_id;
189
190 /*
191 * Pre-allocate a dummy output buffer for the IV and AAD for
192 * AEAD requests.
193 */
194 char *iv_aad_buf;
195 struct sglist *sg_iv_aad;
196
197 /* Statistics. */
198 counter_u64_t stats_cipher_encrypt;
199 counter_u64_t stats_cipher_decrypt;
200 counter_u64_t stats_hash;
201 counter_u64_t stats_hmac;
202 counter_u64_t stats_eta_encrypt;
203 counter_u64_t stats_eta_decrypt;
204 counter_u64_t stats_gcm_encrypt;
205 counter_u64_t stats_gcm_decrypt;
206 counter_u64_t stats_ccm_encrypt;
207 counter_u64_t stats_ccm_decrypt;
208 counter_u64_t stats_wr_nomem;
209 counter_u64_t stats_inflight;
210 counter_u64_t stats_mac_error;
211 counter_u64_t stats_pad_error;
212 counter_u64_t stats_sglist_error;
213 counter_u64_t stats_process_error;
214 counter_u64_t stats_sw_fallback;
215
216 struct sysctl_ctx_list ctx;
217 };
218
219 struct ccr_session {
220 #ifdef INVARIANTS
221 int pending;
222 #endif
223 enum { HASH, HMAC, CIPHER, ETA, GCM, CCM } mode;
224 struct ccr_softc *sc;
225 struct ccr_port *port;
226 union {
227 struct ccr_session_hmac hmac;
228 struct ccr_session_gmac gmac;
229 struct ccr_session_ccm_mac ccm_mac;
230 };
231 struct ccr_session_cipher cipher;
232 struct mtx lock;
233
234 /*
235 * A fallback software session is used for certain GCM/CCM
236 * requests that the hardware can't handle such as requests
237 * with only AAD and no payload.
238 */
239 crypto_session_t sw_session;
240
241 /*
242 * Pre-allocate S/G lists used when preparing a work request.
243 * 'sg_input' contains an sglist describing the entire input
244 * buffer for a 'struct cryptop'. 'sg_output' contains an
245 * sglist describing the entire output buffer. 'sg_ulptx' is
246 * used to describe the data the engine should DMA as input
247 * via ULPTX_SGL. 'sg_dsgl' is used to describe the
248 * destination that cipher text and a tag should be written
249 * to.
250 */
251 struct sglist *sg_input;
252 struct sglist *sg_output;
253 struct sglist *sg_ulptx;
254 struct sglist *sg_dsgl;
255 };
256
257 /*
258 * Crypto requests involve two kind of scatter/gather lists.
259 *
260 * Non-hash-only requests require a PHYS_DSGL that describes the
261 * location to store the results of the encryption or decryption
262 * operation. This SGL uses a different format (PHYS_DSGL) and should
263 * exclude the skip bytes at the start of the data as well as any AAD
264 * or IV. For authenticated encryption requests it should include the
265 * destination of the hash or tag.
266 *
267 * The input payload may either be supplied inline as immediate data,
268 * or via a standard ULP_TX SGL. This SGL should include AAD,
269 * ciphertext, and the hash or tag for authenticated decryption
270 * requests.
271 *
272 * These scatter/gather lists can describe different subsets of the
273 * buffers described by the crypto operation. ccr_populate_sglist()
274 * generates a scatter/gather list that covers an entire crypto
275 * operation buffer that is then used to construct the other
276 * scatter/gather lists.
277 */
278 static int
279 ccr_populate_sglist(struct sglist *sg, struct crypto_buffer *cb)
280 {
281 int error;
282
283 sglist_reset(sg);
284 switch (cb->cb_type) {
285 case CRYPTO_BUF_MBUF:
286 error = sglist_append_mbuf(sg, cb->cb_mbuf);
287 break;
288 case CRYPTO_BUF_SINGLE_MBUF:
289 error = sglist_append_single_mbuf(sg, cb->cb_mbuf);
290 break;
291 case CRYPTO_BUF_UIO:
292 error = sglist_append_uio(sg, cb->cb_uio);
293 break;
294 case CRYPTO_BUF_CONTIG:
295 error = sglist_append(sg, cb->cb_buf, cb->cb_buf_len);
296 break;
297 case CRYPTO_BUF_VMPAGE:
298 error = sglist_append_vmpages(sg, cb->cb_vm_page,
299 cb->cb_vm_page_len, cb->cb_vm_page_offset);
300 break;
301 default:
302 error = EINVAL;
303 }
304 return (error);
305 }
306
307 /*
308 * Segments in 'sg' larger than 'maxsegsize' are counted as multiple
309 * segments.
310 */
311 static int
312 ccr_count_sgl(struct sglist *sg, int maxsegsize)
313 {
314 int i, nsegs;
315
316 nsegs = 0;
317 for (i = 0; i < sg->sg_nseg; i++)
318 nsegs += howmany(sg->sg_segs[i].ss_len, maxsegsize);
319 return (nsegs);
320 }
321
322 /* These functions deal with PHYS_DSGL for the reply buffer. */
323 static inline int
324 ccr_phys_dsgl_len(int nsegs)
325 {
326 int len;
327
328 len = (nsegs / 8) * sizeof(struct phys_sge_pairs);
329 if ((nsegs % 8) != 0) {
330 len += sizeof(uint16_t) * 8;
331 len += roundup2(nsegs % 8, 2) * sizeof(uint64_t);
332 }
333 return (len);
334 }
335
336 static void
337 ccr_write_phys_dsgl(struct ccr_session *s, void *dst, int nsegs)
338 {
339 struct sglist *sg;
340 struct cpl_rx_phys_dsgl *cpl;
341 struct phys_sge_pairs *sgl;
342 vm_paddr_t paddr;
343 size_t seglen;
344 u_int i, j;
345
346 sg = s->sg_dsgl;
347 cpl = dst;
348 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) |
349 V_CPL_RX_PHYS_DSGL_ISRDMA(0));
350 cpl->pcirlxorder_to_noofsgentr = htobe32(
351 V_CPL_RX_PHYS_DSGL_PCIRLXORDER(0) |
352 V_CPL_RX_PHYS_DSGL_PCINOSNOOP(0) |
353 V_CPL_RX_PHYS_DSGL_PCITPHNTENB(0) | V_CPL_RX_PHYS_DSGL_DCAID(0) |
354 V_CPL_RX_PHYS_DSGL_NOOFSGENTR(nsegs));
355 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
356 cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id);
357 cpl->rss_hdr_int.hash_val = 0;
358 cpl->rss_hdr_int.channel = s->port->rx_channel_id;
359 sgl = (struct phys_sge_pairs *)(cpl + 1);
360 j = 0;
361 for (i = 0; i < sg->sg_nseg; i++) {
362 seglen = sg->sg_segs[i].ss_len;
363 paddr = sg->sg_segs[i].ss_paddr;
364 do {
365 sgl->addr[j] = htobe64(paddr);
366 if (seglen > DSGL_SGE_MAXLEN) {
367 sgl->len[j] = htobe16(DSGL_SGE_MAXLEN);
368 paddr += DSGL_SGE_MAXLEN;
369 seglen -= DSGL_SGE_MAXLEN;
370 } else {
371 sgl->len[j] = htobe16(seglen);
372 seglen = 0;
373 }
374 j++;
375 if (j == 8) {
376 sgl++;
377 j = 0;
378 }
379 } while (seglen != 0);
380 }
381 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl + 1)) == nsegs);
382 }
383
384 /* These functions deal with the ULPTX_SGL for input payload. */
385 static inline int
386 ccr_ulptx_sgl_len(int nsegs)
387 {
388 u_int n;
389
390 nsegs--; /* first segment is part of ulptx_sgl */
391 n = sizeof(struct ulptx_sgl) + 8 * ((3 * nsegs) / 2 + (nsegs & 1));
392 return (roundup2(n, 16));
393 }
394
395 static void
396 ccr_write_ulptx_sgl(struct ccr_session *s, void *dst, int nsegs)
397 {
398 struct ulptx_sgl *usgl;
399 struct sglist *sg;
400 struct sglist_seg *ss;
401 int i;
402
403 sg = s->sg_ulptx;
404 MPASS(nsegs == sg->sg_nseg);
405 ss = &sg->sg_segs[0];
406 usgl = dst;
407 usgl->cmd_nsge = htobe32(V_ULPTX_CMD(ULP_TX_SC_DSGL) |
408 V_ULPTX_NSGE(nsegs));
409 usgl->len0 = htobe32(ss->ss_len);
410 usgl->addr0 = htobe64(ss->ss_paddr);
411 ss++;
412 for (i = 0; i < sg->sg_nseg - 1; i++) {
413 usgl->sge[i / 2].len[i & 1] = htobe32(ss->ss_len);
414 usgl->sge[i / 2].addr[i & 1] = htobe64(ss->ss_paddr);
415 ss++;
416 }
417 }
418
419 static bool
420 ccr_use_imm_data(u_int transhdr_len, u_int input_len)
421 {
422
423 if (input_len > CRYPTO_MAX_IMM_TX_PKT_LEN)
424 return (false);
425 if (roundup2(transhdr_len, 16) + roundup2(input_len, 16) >
426 SGE_MAX_WR_LEN)
427 return (false);
428 return (true);
429 }
430
431 static void
432 ccr_populate_wreq(struct ccr_softc *sc, struct ccr_session *s,
433 struct chcr_wr *crwr, u_int kctx_len, u_int wr_len, u_int imm_len,
434 u_int sgl_len, u_int hash_size, struct cryptop *crp)
435 {
436 u_int cctx_size, idata_len;
437
438 cctx_size = sizeof(struct _key_ctx) + kctx_len;
439 crwr->wreq.op_to_cctx_size = htobe32(
440 V_FW_CRYPTO_LOOKASIDE_WR_OPCODE(FW_CRYPTO_LOOKASIDE_WR) |
441 V_FW_CRYPTO_LOOKASIDE_WR_COMPL(0) |
442 V_FW_CRYPTO_LOOKASIDE_WR_IMM_LEN(imm_len) |
443 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_LOC(1) |
444 V_FW_CRYPTO_LOOKASIDE_WR_CCTX_SIZE(cctx_size >> 4));
445 crwr->wreq.len16_pkd = htobe32(
446 V_FW_CRYPTO_LOOKASIDE_WR_LEN16(wr_len / 16));
447 crwr->wreq.session_id = 0;
448 crwr->wreq.rx_chid_to_rx_q_id = htobe32(
449 V_FW_CRYPTO_LOOKASIDE_WR_RX_CHID(s->port->rx_channel_id) |
450 V_FW_CRYPTO_LOOKASIDE_WR_LCB(0) |
451 V_FW_CRYPTO_LOOKASIDE_WR_PHASH(0) |
452 V_FW_CRYPTO_LOOKASIDE_WR_IV(IV_NOP) |
453 V_FW_CRYPTO_LOOKASIDE_WR_FQIDX(0) |
454 V_FW_CRYPTO_LOOKASIDE_WR_TX_CH(0) | /* unused in firmware */
455 V_FW_CRYPTO_LOOKASIDE_WR_RX_Q_ID(s->port->rxq->iq.abs_id));
456 crwr->wreq.key_addr = 0;
457 crwr->wreq.pld_size_hash_size = htobe32(
458 V_FW_CRYPTO_LOOKASIDE_WR_PLD_SIZE(sgl_len) |
459 V_FW_CRYPTO_LOOKASIDE_WR_HASH_SIZE(hash_size));
460 crwr->wreq.cookie = htobe64((uintptr_t)crp);
461
462 crwr->ulptx.cmd_dest = htobe32(V_ULPTX_CMD(ULP_TX_PKT) |
463 V_ULP_TXPKT_DATAMODIFY(0) |
464 V_ULP_TXPKT_CHANNELID(s->port->tx_channel_id) |
465 V_ULP_TXPKT_DEST(0) |
466 V_ULP_TXPKT_FID(sc->first_rxq_id) | V_ULP_TXPKT_RO(1));
467 crwr->ulptx.len = htobe32(
468 ((wr_len - sizeof(struct fw_crypto_lookaside_wr)) / 16));
469
470 crwr->sc_imm.cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM) |
471 V_ULP_TX_SC_MORE(sgl_len != 0 ? 1 : 0));
472 idata_len = wr_len - offsetof(struct chcr_wr, sec_cpl) - sgl_len;
473 if (imm_len % 16 != 0)
474 idata_len -= 16 - imm_len % 16;
475 crwr->sc_imm.len = htobe32(idata_len);
476 }
477
478 static int
479 ccr_hash(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
480 {
481 struct chcr_wr *crwr;
482 struct wrqe *wr;
483 const struct auth_hash *axf;
484 char *dst;
485 u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
486 u_int hmac_ctrl, imm_len, iopad_size;
487 int error, sgl_nsegs, sgl_len, use_opad;
488
489 /* Reject requests with too large of an input buffer. */
490 if (crp->crp_payload_length > MAX_REQUEST_SIZE)
491 return (EFBIG);
492
493 axf = s->hmac.auth_hash;
494
495 if (s->mode == HMAC) {
496 use_opad = 1;
497 hmac_ctrl = SCMD_HMAC_CTRL_NO_TRUNC;
498 } else {
499 use_opad = 0;
500 hmac_ctrl = SCMD_HMAC_CTRL_NOP;
501 }
502
503 /* PADs must be 128-bit aligned. */
504 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
505
506 /*
507 * The 'key' part of the context includes the aligned IPAD and
508 * OPAD.
509 */
510 kctx_len = iopad_size;
511 if (use_opad)
512 kctx_len += iopad_size;
513 hash_size_in_response = axf->hashsize;
514 transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
515
516 if (crp->crp_payload_length == 0) {
517 imm_len = axf->blocksize;
518 sgl_nsegs = 0;
519 sgl_len = 0;
520 } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) {
521 imm_len = crp->crp_payload_length;
522 sgl_nsegs = 0;
523 sgl_len = 0;
524 } else {
525 imm_len = 0;
526 sglist_reset(s->sg_ulptx);
527 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
528 crp->crp_payload_start, crp->crp_payload_length);
529 if (error)
530 return (error);
531 sgl_nsegs = s->sg_ulptx->sg_nseg;
532 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
533 }
534
535 wr_len = roundup2(transhdr_len, 16) + roundup2(imm_len, 16) + sgl_len;
536 if (wr_len > SGE_MAX_WR_LEN)
537 return (EFBIG);
538 wr = alloc_wrqe(wr_len, s->port->txq);
539 if (wr == NULL) {
540 counter_u64_add(sc->stats_wr_nomem, 1);
541 return (ENOMEM);
542 }
543 crwr = wrtod(wr);
544 memset(crwr, 0, wr_len);
545
546 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len,
547 hash_size_in_response, crp);
548
549 crwr->sec_cpl.op_ivinsrtofst = htobe32(
550 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
551 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
552 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
553 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
554 V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
555
556 crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ?
557 axf->blocksize : crp->crp_payload_length);
558
559 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
560 V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
561
562 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
563 crwr->sec_cpl.seqno_numivs = htobe32(
564 V_SCMD_SEQ_NO_CTRL(0) |
565 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
566 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_NOP) |
567 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
568 V_SCMD_HMAC_CTRL(hmac_ctrl));
569 crwr->sec_cpl.ivgen_hdrlen = htobe32(
570 V_SCMD_LAST_FRAG(0) |
571 V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) |
572 V_SCMD_MAC_ONLY(1));
573
574 memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len);
575
576 /* XXX: F_KEY_CONTEXT_SALT_PRESENT set, but 'salt' not set. */
577 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
578 crwr->key_ctx.ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
579 V_KEY_CONTEXT_OPAD_PRESENT(use_opad) |
580 V_KEY_CONTEXT_SALT_PRESENT(1) |
581 V_KEY_CONTEXT_CK_SIZE(CHCR_KEYCTX_NO_KEY) |
582 V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
583
584 dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
585 if (crp->crp_payload_length == 0) {
586 dst[0] = 0x80;
587 if (s->mode == HMAC)
588 *(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
589 htobe64(axf->blocksize << 3);
590 } else if (imm_len != 0)
591 crypto_copydata(crp, crp->crp_payload_start,
592 crp->crp_payload_length, dst);
593 else
594 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
595
596 /* XXX: TODO backpressure */
597 t4_wrq_tx(sc->adapter, wr);
598
599 return (0);
600 }
601
602 static int
603 ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
604 const struct cpl_fw6_pld *cpl, int error)
605 {
606 uint8_t hash[HASH_MAX_LEN];
607
608 if (error)
609 return (error);
610
611 if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
612 crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len,
613 hash);
614 if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0)
615 return (EBADMSG);
616 } else
617 crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len,
618 (cpl + 1));
619 return (0);
620 }
621
622 static int
623 ccr_cipher(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
624 {
625 char iv[CHCR_MAX_CRYPTO_IV_LEN];
626 struct chcr_wr *crwr;
627 struct wrqe *wr;
628 char *dst;
629 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
630 u_int imm_len, iv_len;
631 int dsgl_nsegs, dsgl_len;
632 int sgl_nsegs, sgl_len;
633 int error;
634
635 if (s->cipher.key_len == 0 || crp->crp_payload_length == 0)
636 return (EINVAL);
637 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
638 (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
639 return (EINVAL);
640
641 /* Reject requests with too large of an input buffer. */
642 if (crp->crp_payload_length > MAX_REQUEST_SIZE)
643 return (EFBIG);
644
645 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
646 op_type = CHCR_ENCRYPT_OP;
647 else
648 op_type = CHCR_DECRYPT_OP;
649
650 sglist_reset(s->sg_dsgl);
651 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
652 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
653 crp->crp_payload_output_start, crp->crp_payload_length);
654 else
655 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
656 crp->crp_payload_start, crp->crp_payload_length);
657 if (error)
658 return (error);
659 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
660 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
661 return (EFBIG);
662 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
663
664 /* The 'key' must be 128-bit aligned. */
665 kctx_len = roundup2(s->cipher.key_len, 16);
666 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
667
668 /* For AES-XTS we send a 16-byte IV in the work request. */
669 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
670 iv_len = AES_BLOCK_LEN;
671 else
672 iv_len = s->cipher.iv_len;
673
674 if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) {
675 imm_len = crp->crp_payload_length;
676 sgl_nsegs = 0;
677 sgl_len = 0;
678 } else {
679 imm_len = 0;
680 sglist_reset(s->sg_ulptx);
681 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
682 crp->crp_payload_start, crp->crp_payload_length);
683 if (error)
684 return (error);
685 sgl_nsegs = s->sg_ulptx->sg_nseg;
686 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
687 }
688
689 wr_len = roundup2(transhdr_len, 16) + iv_len +
690 roundup2(imm_len, 16) + sgl_len;
691 if (wr_len > SGE_MAX_WR_LEN)
692 return (EFBIG);
693 wr = alloc_wrqe(wr_len, s->port->txq);
694 if (wr == NULL) {
695 counter_u64_add(sc->stats_wr_nomem, 1);
696 return (ENOMEM);
697 }
698 crwr = wrtod(wr);
699 memset(crwr, 0, wr_len);
700
701 crypto_read_iv(crp, iv);
702
703 /* Zero the remainder of the IV for AES-XTS. */
704 memset(iv + s->cipher.iv_len, 0, iv_len - s->cipher.iv_len);
705
706 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
707 crp);
708
709 crwr->sec_cpl.op_ivinsrtofst = htobe32(
710 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
711 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
712 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
713 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
714 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
715
716 crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length);
717
718 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
719 V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) |
720 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
721 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
722 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
723
724 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
725 crwr->sec_cpl.seqno_numivs = htobe32(
726 V_SCMD_SEQ_NO_CTRL(0) |
727 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
728 V_SCMD_ENC_DEC_CTRL(op_type) |
729 V_SCMD_CIPH_MODE(s->cipher.cipher_mode) |
730 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
731 V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
732 V_SCMD_IV_SIZE(iv_len / 2) |
733 V_SCMD_NUM_IVS(0));
734 crwr->sec_cpl.ivgen_hdrlen = htobe32(
735 V_SCMD_IV_GEN_CTRL(0) |
736 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
737 V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
738
739 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
740 switch (s->cipher.cipher_mode) {
741 case SCMD_CIPH_MODE_AES_CBC:
742 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
743 memcpy(crwr->key_ctx.key, s->cipher.enckey,
744 s->cipher.key_len);
745 else
746 memcpy(crwr->key_ctx.key, s->cipher.deckey,
747 s->cipher.key_len);
748 break;
749 case SCMD_CIPH_MODE_AES_CTR:
750 memcpy(crwr->key_ctx.key, s->cipher.enckey,
751 s->cipher.key_len);
752 break;
753 case SCMD_CIPH_MODE_AES_XTS:
754 key_half = s->cipher.key_len / 2;
755 memcpy(crwr->key_ctx.key, s->cipher.enckey + key_half,
756 key_half);
757 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
758 memcpy(crwr->key_ctx.key + key_half,
759 s->cipher.enckey, key_half);
760 else
761 memcpy(crwr->key_ctx.key + key_half,
762 s->cipher.deckey, key_half);
763 break;
764 }
765
766 dst = (char *)(crwr + 1) + kctx_len;
767 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
768 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
769 memcpy(dst, iv, iv_len);
770 dst += iv_len;
771 if (imm_len != 0)
772 crypto_copydata(crp, crp->crp_payload_start,
773 crp->crp_payload_length, dst);
774 else
775 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
776
777 /* XXX: TODO backpressure */
778 t4_wrq_tx(sc->adapter, wr);
779
780 explicit_bzero(iv, sizeof(iv));
781 return (0);
782 }
783
784 static int
785 ccr_cipher_done(struct ccr_softc *sc, struct ccr_session *s,
786 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
787 {
788
789 /*
790 * The updated IV to permit chained requests is at
791 * cpl->data[2], but OCF doesn't permit chained requests.
792 */
793 return (error);
794 }
795
796 /*
797 * 'hashsize' is the length of a full digest. 'authsize' is the
798 * requested digest length for this operation which may be less
799 * than 'hashsize'.
800 */
801 static int
802 ccr_hmac_ctrl(unsigned int hashsize, unsigned int authsize)
803 {
804
805 if (authsize == 10)
806 return (SCMD_HMAC_CTRL_TRUNC_RFC4366);
807 if (authsize == 12)
808 return (SCMD_HMAC_CTRL_IPSEC_96BIT);
809 if (authsize == hashsize / 2)
810 return (SCMD_HMAC_CTRL_DIV2);
811 return (SCMD_HMAC_CTRL_NO_TRUNC);
812 }
813
814 static int
815 ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
816 {
817 char iv[CHCR_MAX_CRYPTO_IV_LEN];
818 struct chcr_wr *crwr;
819 struct wrqe *wr;
820 const struct auth_hash *axf;
821 char *dst;
822 u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
823 u_int hash_size_in_response, imm_len, iopad_size, iv_len;
824 u_int aad_start, aad_stop;
825 u_int auth_insert;
826 u_int cipher_start, cipher_stop;
827 u_int hmac_ctrl, input_len;
828 int dsgl_nsegs, dsgl_len;
829 int sgl_nsegs, sgl_len;
830 int error;
831
832 /*
833 * If there is a need in the future, requests with an empty
834 * payload could be supported as HMAC-only requests.
835 */
836 if (s->cipher.key_len == 0 || crp->crp_payload_length == 0)
837 return (EINVAL);
838 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
839 (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
840 return (EINVAL);
841
842 /* For AES-XTS we send a 16-byte IV in the work request. */
843 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
844 iv_len = AES_BLOCK_LEN;
845 else
846 iv_len = s->cipher.iv_len;
847
848 if (crp->crp_aad_length + iv_len > MAX_AAD_LEN)
849 return (EINVAL);
850
851 axf = s->hmac.auth_hash;
852 hash_size_in_response = s->hmac.hash_len;
853 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
854 op_type = CHCR_ENCRYPT_OP;
855 else
856 op_type = CHCR_DECRYPT_OP;
857
858 /*
859 * The output buffer consists of the cipher text followed by
860 * the hash when encrypting. For decryption it only contains
861 * the plain text.
862 *
863 * Due to a firmware bug, the output buffer must include a
864 * dummy output buffer for the IV and AAD prior to the real
865 * output buffer.
866 */
867 if (op_type == CHCR_ENCRYPT_OP) {
868 if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
869 hash_size_in_response > MAX_REQUEST_SIZE)
870 return (EFBIG);
871 } else {
872 if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
873 MAX_REQUEST_SIZE)
874 return (EFBIG);
875 }
876 sglist_reset(s->sg_dsgl);
877 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0,
878 iv_len + crp->crp_aad_length);
879 if (error)
880 return (error);
881 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
882 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
883 crp->crp_payload_output_start, crp->crp_payload_length);
884 else
885 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
886 crp->crp_payload_start, crp->crp_payload_length);
887 if (error)
888 return (error);
889 if (op_type == CHCR_ENCRYPT_OP) {
890 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
891 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
892 crp->crp_digest_start, hash_size_in_response);
893 else
894 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
895 crp->crp_digest_start, hash_size_in_response);
896 if (error)
897 return (error);
898 }
899 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
900 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
901 return (EFBIG);
902 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
903
904 /* PADs must be 128-bit aligned. */
905 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
906
907 /*
908 * The 'key' part of the key context consists of the key followed
909 * by the IPAD and OPAD.
910 */
911 kctx_len = roundup2(s->cipher.key_len, 16) + iopad_size * 2;
912 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
913
914 /*
915 * The input buffer consists of the IV, any AAD, and then the
916 * cipher/plain text. For decryption requests the hash is
917 * appended after the cipher text.
918 *
919 * The IV is always stored at the start of the input buffer
920 * even though it may be duplicated in the payload. The
921 * crypto engine doesn't work properly if the IV offset points
922 * inside of the AAD region, so a second copy is always
923 * required.
924 */
925 input_len = crp->crp_aad_length + crp->crp_payload_length;
926
927 /*
928 * The firmware hangs if sent a request which is a
929 * bit smaller than MAX_REQUEST_SIZE. In particular, the
930 * firmware appears to require 512 - 16 bytes of spare room
931 * along with the size of the hash even if the hash isn't
932 * included in the input buffer.
933 */
934 if (input_len + roundup2(axf->hashsize, 16) + (512 - 16) >
935 MAX_REQUEST_SIZE)
936 return (EFBIG);
937 if (op_type == CHCR_DECRYPT_OP)
938 input_len += hash_size_in_response;
939
940 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
941 imm_len = input_len;
942 sgl_nsegs = 0;
943 sgl_len = 0;
944 } else {
945 imm_len = 0;
946 sglist_reset(s->sg_ulptx);
947 if (crp->crp_aad_length != 0) {
948 if (crp->crp_aad != NULL)
949 error = sglist_append(s->sg_ulptx,
950 crp->crp_aad, crp->crp_aad_length);
951 else
952 error = sglist_append_sglist(s->sg_ulptx,
953 s->sg_input, crp->crp_aad_start,
954 crp->crp_aad_length);
955 if (error)
956 return (error);
957 }
958 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
959 crp->crp_payload_start, crp->crp_payload_length);
960 if (error)
961 return (error);
962 if (op_type == CHCR_DECRYPT_OP) {
963 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
964 crp->crp_digest_start, hash_size_in_response);
965 if (error)
966 return (error);
967 }
968 sgl_nsegs = s->sg_ulptx->sg_nseg;
969 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
970 }
971
972 /* Any AAD comes after the IV. */
973 if (crp->crp_aad_length != 0) {
974 aad_start = iv_len + 1;
975 aad_stop = aad_start + crp->crp_aad_length - 1;
976 } else {
977 aad_start = 0;
978 aad_stop = 0;
979 }
980 cipher_start = iv_len + crp->crp_aad_length + 1;
981 if (op_type == CHCR_DECRYPT_OP)
982 cipher_stop = hash_size_in_response;
983 else
984 cipher_stop = 0;
985 if (op_type == CHCR_DECRYPT_OP)
986 auth_insert = hash_size_in_response;
987 else
988 auth_insert = 0;
989
990 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
991 sgl_len;
992 if (wr_len > SGE_MAX_WR_LEN)
993 return (EFBIG);
994 wr = alloc_wrqe(wr_len, s->port->txq);
995 if (wr == NULL) {
996 counter_u64_add(sc->stats_wr_nomem, 1);
997 return (ENOMEM);
998 }
999 crwr = wrtod(wr);
1000 memset(crwr, 0, wr_len);
1001
1002 crypto_read_iv(crp, iv);
1003
1004 /* Zero the remainder of the IV for AES-XTS. */
1005 memset(iv + s->cipher.iv_len, 0, iv_len - s->cipher.iv_len);
1006
1007 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len,
1008 op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
1009
1010 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1011 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1012 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
1013 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1014 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1015 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1016
1017 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1018
1019 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1020 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1021 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1022 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1023 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
1024 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1025 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
1026 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1027 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1028 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1029
1030 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1031 hmac_ctrl = ccr_hmac_ctrl(axf->hashsize, hash_size_in_response);
1032 crwr->sec_cpl.seqno_numivs = htobe32(
1033 V_SCMD_SEQ_NO_CTRL(0) |
1034 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1035 V_SCMD_ENC_DEC_CTRL(op_type) |
1036 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1037 V_SCMD_CIPH_MODE(s->cipher.cipher_mode) |
1038 V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
1039 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1040 V_SCMD_IV_SIZE(iv_len / 2) |
1041 V_SCMD_NUM_IVS(0));
1042 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1043 V_SCMD_IV_GEN_CTRL(0) |
1044 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1045 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1046
1047 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1048 switch (s->cipher.cipher_mode) {
1049 case SCMD_CIPH_MODE_AES_CBC:
1050 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1051 memcpy(crwr->key_ctx.key, s->cipher.enckey,
1052 s->cipher.key_len);
1053 else
1054 memcpy(crwr->key_ctx.key, s->cipher.deckey,
1055 s->cipher.key_len);
1056 break;
1057 case SCMD_CIPH_MODE_AES_CTR:
1058 memcpy(crwr->key_ctx.key, s->cipher.enckey,
1059 s->cipher.key_len);
1060 break;
1061 case SCMD_CIPH_MODE_AES_XTS:
1062 key_half = s->cipher.key_len / 2;
1063 memcpy(crwr->key_ctx.key, s->cipher.enckey + key_half,
1064 key_half);
1065 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1066 memcpy(crwr->key_ctx.key + key_half,
1067 s->cipher.enckey, key_half);
1068 else
1069 memcpy(crwr->key_ctx.key + key_half,
1070 s->cipher.deckey, key_half);
1071 break;
1072 }
1073
1074 dst = crwr->key_ctx.key + roundup2(s->cipher.key_len, 16);
1075 memcpy(dst, s->hmac.pads, iopad_size * 2);
1076
1077 dst = (char *)(crwr + 1) + kctx_len;
1078 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
1079 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1080 memcpy(dst, iv, iv_len);
1081 dst += iv_len;
1082 if (imm_len != 0) {
1083 if (crp->crp_aad_length != 0) {
1084 if (crp->crp_aad != NULL)
1085 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1086 else
1087 crypto_copydata(crp, crp->crp_aad_start,
1088 crp->crp_aad_length, dst);
1089 dst += crp->crp_aad_length;
1090 }
1091 crypto_copydata(crp, crp->crp_payload_start,
1092 crp->crp_payload_length, dst);
1093 dst += crp->crp_payload_length;
1094 if (op_type == CHCR_DECRYPT_OP)
1095 crypto_copydata(crp, crp->crp_digest_start,
1096 hash_size_in_response, dst);
1097 } else
1098 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
1099
1100 /* XXX: TODO backpressure */
1101 t4_wrq_tx(sc->adapter, wr);
1102
1103 explicit_bzero(iv, sizeof(iv));
1104 return (0);
1105 }
1106
1107 static int
1108 ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s,
1109 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1110 {
1111
1112 /*
1113 * The updated IV to permit chained requests is at
1114 * cpl->data[2], but OCF doesn't permit chained requests.
1115 */
1116 return (error);
1117 }
1118
1119 static int
1120 ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
1121 {
1122 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1123 struct chcr_wr *crwr;
1124 struct wrqe *wr;
1125 char *dst;
1126 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1127 u_int hash_size_in_response, imm_len;
1128 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1129 u_int hmac_ctrl, input_len;
1130 int dsgl_nsegs, dsgl_len;
1131 int sgl_nsegs, sgl_len;
1132 int error;
1133
1134 if (s->cipher.key_len == 0)
1135 return (EINVAL);
1136
1137 /*
1138 * The crypto engine doesn't handle GCM requests with an empty
1139 * payload, so handle those in software instead.
1140 */
1141 if (crp->crp_payload_length == 0)
1142 return (EMSGSIZE);
1143
1144 if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN)
1145 return (EMSGSIZE);
1146
1147 hash_size_in_response = s->gmac.hash_len;
1148 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1149 op_type = CHCR_ENCRYPT_OP;
1150 else
1151 op_type = CHCR_DECRYPT_OP;
1152
1153 iv_len = AES_BLOCK_LEN;
1154
1155 /*
1156 * GCM requests should always provide an explicit IV.
1157 */
1158 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1159 return (EINVAL);
1160
1161 /*
1162 * The output buffer consists of the cipher text followed by
1163 * the tag when encrypting. For decryption it only contains
1164 * the plain text.
1165 *
1166 * Due to a firmware bug, the output buffer must include a
1167 * dummy output buffer for the IV and AAD prior to the real
1168 * output buffer.
1169 */
1170 if (op_type == CHCR_ENCRYPT_OP) {
1171 if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
1172 hash_size_in_response > MAX_REQUEST_SIZE)
1173 return (EFBIG);
1174 } else {
1175 if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
1176 MAX_REQUEST_SIZE)
1177 return (EFBIG);
1178 }
1179 sglist_reset(s->sg_dsgl);
1180 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1181 crp->crp_aad_length);
1182 if (error)
1183 return (error);
1184 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1185 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1186 crp->crp_payload_output_start, crp->crp_payload_length);
1187 else
1188 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1189 crp->crp_payload_start, crp->crp_payload_length);
1190 if (error)
1191 return (error);
1192 if (op_type == CHCR_ENCRYPT_OP) {
1193 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1194 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1195 crp->crp_digest_start, hash_size_in_response);
1196 else
1197 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1198 crp->crp_digest_start, hash_size_in_response);
1199 if (error)
1200 return (error);
1201 }
1202 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
1203 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1204 return (EFBIG);
1205 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1206
1207 /*
1208 * The 'key' part of the key context consists of the key followed
1209 * by the Galois hash key.
1210 */
1211 kctx_len = roundup2(s->cipher.key_len, 16) + GMAC_BLOCK_LEN;
1212 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1213
1214 /*
1215 * The input buffer consists of the IV, any AAD, and then the
1216 * cipher/plain text. For decryption requests the hash is
1217 * appended after the cipher text.
1218 *
1219 * The IV is always stored at the start of the input buffer
1220 * even though it may be duplicated in the payload. The
1221 * crypto engine doesn't work properly if the IV offset points
1222 * inside of the AAD region, so a second copy is always
1223 * required.
1224 */
1225 input_len = crp->crp_aad_length + crp->crp_payload_length;
1226 if (op_type == CHCR_DECRYPT_OP)
1227 input_len += hash_size_in_response;
1228 if (input_len > MAX_REQUEST_SIZE)
1229 return (EFBIG);
1230 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1231 imm_len = input_len;
1232 sgl_nsegs = 0;
1233 sgl_len = 0;
1234 } else {
1235 imm_len = 0;
1236 sglist_reset(s->sg_ulptx);
1237 if (crp->crp_aad_length != 0) {
1238 if (crp->crp_aad != NULL)
1239 error = sglist_append(s->sg_ulptx,
1240 crp->crp_aad, crp->crp_aad_length);
1241 else
1242 error = sglist_append_sglist(s->sg_ulptx,
1243 s->sg_input, crp->crp_aad_start,
1244 crp->crp_aad_length);
1245 if (error)
1246 return (error);
1247 }
1248 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1249 crp->crp_payload_start, crp->crp_payload_length);
1250 if (error)
1251 return (error);
1252 if (op_type == CHCR_DECRYPT_OP) {
1253 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1254 crp->crp_digest_start, hash_size_in_response);
1255 if (error)
1256 return (error);
1257 }
1258 sgl_nsegs = s->sg_ulptx->sg_nseg;
1259 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1260 }
1261
1262 if (crp->crp_aad_length != 0) {
1263 aad_start = iv_len + 1;
1264 aad_stop = aad_start + crp->crp_aad_length - 1;
1265 } else {
1266 aad_start = 0;
1267 aad_stop = 0;
1268 }
1269 cipher_start = iv_len + crp->crp_aad_length + 1;
1270 if (op_type == CHCR_DECRYPT_OP)
1271 cipher_stop = hash_size_in_response;
1272 else
1273 cipher_stop = 0;
1274 if (op_type == CHCR_DECRYPT_OP)
1275 auth_insert = hash_size_in_response;
1276 else
1277 auth_insert = 0;
1278
1279 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1280 sgl_len;
1281 if (wr_len > SGE_MAX_WR_LEN)
1282 return (EFBIG);
1283 wr = alloc_wrqe(wr_len, s->port->txq);
1284 if (wr == NULL) {
1285 counter_u64_add(sc->stats_wr_nomem, 1);
1286 return (ENOMEM);
1287 }
1288 crwr = wrtod(wr);
1289 memset(crwr, 0, wr_len);
1290
1291 crypto_read_iv(crp, iv);
1292 *(uint32_t *)&iv[12] = htobe32(1);
1293
1294 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1295 crp);
1296
1297 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1298 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1299 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
1300 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1301 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1302 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1303
1304 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1305
1306 /*
1307 * NB: cipherstop is explicitly set to 0. On encrypt it
1308 * should normally be set to 0 anyway. However, for decrypt
1309 * the cipher ends before the tag in the ETA case (and
1310 * authstop is set to stop before the tag), but for GCM the
1311 * cipher still runs to the end of the buffer. Not sure if
1312 * this is intentional or a firmware quirk, but it is required
1313 * for working tag validation with GCM decryption.
1314 */
1315 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1316 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1317 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1318 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1319 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1320 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1321 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1322 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1323 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1324 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1325
1326 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1327 hmac_ctrl = ccr_hmac_ctrl(AES_GMAC_HASH_LEN, hash_size_in_response);
1328 crwr->sec_cpl.seqno_numivs = htobe32(
1329 V_SCMD_SEQ_NO_CTRL(0) |
1330 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1331 V_SCMD_ENC_DEC_CTRL(op_type) |
1332 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 1 : 0) |
1333 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_GCM) |
1334 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_GHASH) |
1335 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1336 V_SCMD_IV_SIZE(iv_len / 2) |
1337 V_SCMD_NUM_IVS(0));
1338 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1339 V_SCMD_IV_GEN_CTRL(0) |
1340 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1341 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1342
1343 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1344 memcpy(crwr->key_ctx.key, s->cipher.enckey, s->cipher.key_len);
1345 dst = crwr->key_ctx.key + roundup2(s->cipher.key_len, 16);
1346 memcpy(dst, s->gmac.ghash_h, GMAC_BLOCK_LEN);
1347
1348 dst = (char *)(crwr + 1) + kctx_len;
1349 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
1350 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1351 memcpy(dst, iv, iv_len);
1352 dst += iv_len;
1353 if (imm_len != 0) {
1354 if (crp->crp_aad_length != 0) {
1355 if (crp->crp_aad != NULL)
1356 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1357 else
1358 crypto_copydata(crp, crp->crp_aad_start,
1359 crp->crp_aad_length, dst);
1360 dst += crp->crp_aad_length;
1361 }
1362 crypto_copydata(crp, crp->crp_payload_start,
1363 crp->crp_payload_length, dst);
1364 dst += crp->crp_payload_length;
1365 if (op_type == CHCR_DECRYPT_OP)
1366 crypto_copydata(crp, crp->crp_digest_start,
1367 hash_size_in_response, dst);
1368 } else
1369 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
1370
1371 /* XXX: TODO backpressure */
1372 t4_wrq_tx(sc->adapter, wr);
1373
1374 explicit_bzero(iv, sizeof(iv));
1375 return (0);
1376 }
1377
1378 static int
1379 ccr_gcm_done(struct ccr_softc *sc, struct ccr_session *s,
1380 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1381 {
1382
1383 /*
1384 * The updated IV to permit chained requests is at
1385 * cpl->data[2], but OCF doesn't permit chained requests.
1386 *
1387 * Note that the hardware should always verify the GMAC hash.
1388 */
1389 return (error);
1390 }
1391
1392 static int
1393 ccr_ccm_hmac_ctrl(unsigned int authsize)
1394 {
1395 switch (authsize) {
1396 case 4:
1397 return (SCMD_HMAC_CTRL_PL1);
1398 case 6:
1399 return (SCMD_HMAC_CTRL_PL2);
1400 case 8:
1401 return (SCMD_HMAC_CTRL_DIV2);
1402 case 10:
1403 return (SCMD_HMAC_CTRL_TRUNC_RFC4366);
1404 case 12:
1405 return (SCMD_HMAC_CTRL_IPSEC_96BIT);
1406 case 14:
1407 return (SCMD_HMAC_CTRL_PL3);
1408 case 16:
1409 return (SCMD_HMAC_CTRL_NO_TRUNC);
1410 default:
1411 __assert_unreachable();
1412 }
1413 }
1414
1415 static void
1416 generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response,
1417 const char *iv, char *b0)
1418 {
1419 u_int i, payload_len, L;
1420
1421 /* NB: L is already set in the first byte of the IV. */
1422 memcpy(b0, iv, CCM_B0_SIZE);
1423 L = iv[0] + 1;
1424
1425 /* Set length of hash in bits 3 - 5. */
1426 b0[0] |= (((hash_size_in_response - 2) / 2) << 3);
1427
1428 /* Store the payload length as a big-endian value. */
1429 payload_len = crp->crp_payload_length;
1430 for (i = 0; i < L; i++) {
1431 b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len;
1432 payload_len >>= 8;
1433 }
1434
1435 /*
1436 * If there is AAD in the request, set bit 6 in the flags
1437 * field and store the AAD length as a big-endian value at the
1438 * start of block 1. This only assumes a 16-bit AAD length
1439 * since T6 doesn't support large AAD sizes.
1440 */
1441 if (crp->crp_aad_length != 0) {
1442 b0[0] |= (1 << 6);
1443 *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length);
1444 }
1445 }
1446
1447 static int
1448 ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
1449 {
1450 char iv[CHCR_MAX_CRYPTO_IV_LEN];
1451 const struct crypto_session_params *csp;
1452 struct ulptx_idata *idata;
1453 struct chcr_wr *crwr;
1454 struct wrqe *wr;
1455 char *dst;
1456 u_int iv_len, kctx_len, op_type, transhdr_len, wr_len;
1457 u_int aad_len, b0_len, hash_size_in_response, imm_len;
1458 u_int aad_start, aad_stop, cipher_start, cipher_stop, auth_insert;
1459 u_int hmac_ctrl, input_len;
1460 int dsgl_nsegs, dsgl_len;
1461 int sgl_nsegs, sgl_len;
1462 int error;
1463
1464 csp = crypto_get_params(crp->crp_session);
1465
1466 if (s->cipher.key_len == 0)
1467 return (EINVAL);
1468
1469 /*
1470 * The crypto engine doesn't handle CCM requests with an empty
1471 * payload, so handle those in software instead.
1472 */
1473 if (crp->crp_payload_length == 0)
1474 return (EMSGSIZE);
1475
1476 /* The length has to fit within the length field in block 0. */
1477 if (crp->crp_payload_length > ccm_max_payload_length(csp))
1478 return (EMSGSIZE);
1479
1480 /*
1481 * CCM always includes block 0 in the AAD before AAD from the
1482 * request.
1483 */
1484 b0_len = CCM_B0_SIZE;
1485 if (crp->crp_aad_length != 0)
1486 b0_len += CCM_AAD_FIELD_SIZE;
1487 aad_len = b0_len + crp->crp_aad_length;
1488
1489 /*
1490 * CCM requests should always provide an explicit IV (really
1491 * the nonce).
1492 */
1493 if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
1494 return (EINVAL);
1495
1496 /*
1497 * The IV in the work request is 16 bytes and not just the
1498 * nonce.
1499 */
1500 iv_len = AES_BLOCK_LEN;
1501
1502 if (iv_len + aad_len > MAX_AAD_LEN)
1503 return (EMSGSIZE);
1504
1505 hash_size_in_response = s->ccm_mac.hash_len;
1506 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
1507 op_type = CHCR_ENCRYPT_OP;
1508 else
1509 op_type = CHCR_DECRYPT_OP;
1510
1511 /*
1512 * The output buffer consists of the cipher text followed by
1513 * the tag when encrypting. For decryption it only contains
1514 * the plain text.
1515 *
1516 * Due to a firmware bug, the output buffer must include a
1517 * dummy output buffer for the IV and AAD prior to the real
1518 * output buffer.
1519 */
1520 if (op_type == CHCR_ENCRYPT_OP) {
1521 if (iv_len + aad_len + crp->crp_payload_length +
1522 hash_size_in_response > MAX_REQUEST_SIZE)
1523 return (EFBIG);
1524 } else {
1525 if (iv_len + aad_len + crp->crp_payload_length >
1526 MAX_REQUEST_SIZE)
1527 return (EFBIG);
1528 }
1529 sglist_reset(s->sg_dsgl);
1530 error = sglist_append_sglist(s->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
1531 aad_len);
1532 if (error)
1533 return (error);
1534 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1535 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1536 crp->crp_payload_output_start, crp->crp_payload_length);
1537 else
1538 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1539 crp->crp_payload_start, crp->crp_payload_length);
1540 if (error)
1541 return (error);
1542 if (op_type == CHCR_ENCRYPT_OP) {
1543 if (CRYPTO_HAS_OUTPUT_BUFFER(crp))
1544 error = sglist_append_sglist(s->sg_dsgl, s->sg_output,
1545 crp->crp_digest_start, hash_size_in_response);
1546 else
1547 error = sglist_append_sglist(s->sg_dsgl, s->sg_input,
1548 crp->crp_digest_start, hash_size_in_response);
1549 if (error)
1550 return (error);
1551 }
1552 dsgl_nsegs = ccr_count_sgl(s->sg_dsgl, DSGL_SGE_MAXLEN);
1553 if (dsgl_nsegs > MAX_RX_PHYS_DSGL_SGE)
1554 return (EFBIG);
1555 dsgl_len = ccr_phys_dsgl_len(dsgl_nsegs);
1556
1557 /*
1558 * The 'key' part of the key context consists of two copies of
1559 * the AES key.
1560 */
1561 kctx_len = roundup2(s->cipher.key_len, 16) * 2;
1562 transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
1563
1564 /*
1565 * The input buffer consists of the IV, AAD (including block
1566 * 0), and then the cipher/plain text. For decryption
1567 * requests the hash is appended after the cipher text.
1568 *
1569 * The IV is always stored at the start of the input buffer
1570 * even though it may be duplicated in the payload. The
1571 * crypto engine doesn't work properly if the IV offset points
1572 * inside of the AAD region, so a second copy is always
1573 * required.
1574 */
1575 input_len = aad_len + crp->crp_payload_length;
1576 if (op_type == CHCR_DECRYPT_OP)
1577 input_len += hash_size_in_response;
1578 if (input_len > MAX_REQUEST_SIZE)
1579 return (EFBIG);
1580 if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
1581 imm_len = input_len;
1582 sgl_nsegs = 0;
1583 sgl_len = 0;
1584 } else {
1585 /* Block 0 is passed as immediate data. */
1586 imm_len = b0_len;
1587
1588 sglist_reset(s->sg_ulptx);
1589 if (crp->crp_aad_length != 0) {
1590 if (crp->crp_aad != NULL)
1591 error = sglist_append(s->sg_ulptx,
1592 crp->crp_aad, crp->crp_aad_length);
1593 else
1594 error = sglist_append_sglist(s->sg_ulptx,
1595 s->sg_input, crp->crp_aad_start,
1596 crp->crp_aad_length);
1597 if (error)
1598 return (error);
1599 }
1600 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1601 crp->crp_payload_start, crp->crp_payload_length);
1602 if (error)
1603 return (error);
1604 if (op_type == CHCR_DECRYPT_OP) {
1605 error = sglist_append_sglist(s->sg_ulptx, s->sg_input,
1606 crp->crp_digest_start, hash_size_in_response);
1607 if (error)
1608 return (error);
1609 }
1610 sgl_nsegs = s->sg_ulptx->sg_nseg;
1611 sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
1612 }
1613
1614 aad_start = iv_len + 1;
1615 aad_stop = aad_start + aad_len - 1;
1616 cipher_start = aad_stop + 1;
1617 if (op_type == CHCR_DECRYPT_OP)
1618 cipher_stop = hash_size_in_response;
1619 else
1620 cipher_stop = 0;
1621 if (op_type == CHCR_DECRYPT_OP)
1622 auth_insert = hash_size_in_response;
1623 else
1624 auth_insert = 0;
1625
1626 wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
1627 sgl_len;
1628 if (wr_len > SGE_MAX_WR_LEN)
1629 return (EFBIG);
1630 wr = alloc_wrqe(wr_len, s->port->txq);
1631 if (wr == NULL) {
1632 counter_u64_add(sc->stats_wr_nomem, 1);
1633 return (ENOMEM);
1634 }
1635 crwr = wrtod(wr);
1636 memset(crwr, 0, wr_len);
1637
1638 /*
1639 * Read the nonce from the request. Use the nonce to generate
1640 * the full IV with the counter set to 0.
1641 */
1642 memset(iv, 0, iv_len);
1643 iv[0] = (15 - csp->csp_ivlen) - 1;
1644 crypto_read_iv(crp, iv + 1);
1645
1646 ccr_populate_wreq(sc, s, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
1647 crp);
1648
1649 crwr->sec_cpl.op_ivinsrtofst = htobe32(
1650 V_CPL_TX_SEC_PDU_OPCODE(CPL_TX_SEC_PDU) |
1651 V_CPL_TX_SEC_PDU_RXCHID(s->port->rx_channel_id) |
1652 V_CPL_TX_SEC_PDU_ACKFOLLOWS(0) | V_CPL_TX_SEC_PDU_ULPTXLPBK(1) |
1653 V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
1654 V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
1655
1656 crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
1657
1658 /*
1659 * NB: cipherstop is explicitly set to 0. See comments above
1660 * in ccr_gcm().
1661 */
1662 crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
1663 V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
1664 V_CPL_TX_SEC_PDU_AADSTOP(aad_stop) |
1665 V_CPL_TX_SEC_PDU_CIPHERSTART(cipher_start) |
1666 V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
1667 crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
1668 V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0) |
1669 V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
1670 V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
1671 V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
1672
1673 /* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
1674 hmac_ctrl = ccr_ccm_hmac_ctrl(hash_size_in_response);
1675 crwr->sec_cpl.seqno_numivs = htobe32(
1676 V_SCMD_SEQ_NO_CTRL(0) |
1677 V_SCMD_PROTO_VERSION(SCMD_PROTO_VERSION_GENERIC) |
1678 V_SCMD_ENC_DEC_CTRL(op_type) |
1679 V_SCMD_CIPH_AUTH_SEQ_CTRL(op_type == CHCR_ENCRYPT_OP ? 0 : 1) |
1680 V_SCMD_CIPH_MODE(SCMD_CIPH_MODE_AES_CCM) |
1681 V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_CBCMAC) |
1682 V_SCMD_HMAC_CTRL(hmac_ctrl) |
1683 V_SCMD_IV_SIZE(iv_len / 2) |
1684 V_SCMD_NUM_IVS(0));
1685 crwr->sec_cpl.ivgen_hdrlen = htobe32(
1686 V_SCMD_IV_GEN_CTRL(0) |
1687 V_SCMD_MORE_FRAGS(0) | V_SCMD_LAST_FRAG(0) | V_SCMD_MAC_ONLY(0) |
1688 V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
1689
1690 crwr->key_ctx.ctx_hdr = s->cipher.key_ctx_hdr;
1691 memcpy(crwr->key_ctx.key, s->cipher.enckey, s->cipher.key_len);
1692 memcpy(crwr->key_ctx.key + roundup(s->cipher.key_len, 16),
1693 s->cipher.enckey, s->cipher.key_len);
1694
1695 dst = (char *)(crwr + 1) + kctx_len;
1696 ccr_write_phys_dsgl(s, dst, dsgl_nsegs);
1697 dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
1698 memcpy(dst, iv, iv_len);
1699 dst += iv_len;
1700 generate_ccm_b0(crp, hash_size_in_response, iv, dst);
1701 if (sgl_nsegs == 0) {
1702 dst += b0_len;
1703 if (crp->crp_aad_length != 0) {
1704 if (crp->crp_aad != NULL)
1705 memcpy(dst, crp->crp_aad, crp->crp_aad_length);
1706 else
1707 crypto_copydata(crp, crp->crp_aad_start,
1708 crp->crp_aad_length, dst);
1709 dst += crp->crp_aad_length;
1710 }
1711 crypto_copydata(crp, crp->crp_payload_start,
1712 crp->crp_payload_length, dst);
1713 dst += crp->crp_payload_length;
1714 if (op_type == CHCR_DECRYPT_OP)
1715 crypto_copydata(crp, crp->crp_digest_start,
1716 hash_size_in_response, dst);
1717 } else {
1718 dst += CCM_B0_SIZE;
1719 if (b0_len > CCM_B0_SIZE) {
1720 /*
1721 * If there is AAD, insert padding including a
1722 * ULP_TX_SC_NOOP so that the ULP_TX_SC_DSGL
1723 * is 16-byte aligned.
1724 */
1725 KASSERT(b0_len - CCM_B0_SIZE == CCM_AAD_FIELD_SIZE,
1726 ("b0_len mismatch"));
1727 memset(dst + CCM_AAD_FIELD_SIZE, 0,
1728 8 - CCM_AAD_FIELD_SIZE);
1729 idata = (void *)(dst + 8);
1730 idata->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
1731 idata->len = htobe32(0);
1732 dst = (void *)(idata + 1);
1733 }
1734 ccr_write_ulptx_sgl(s, dst, sgl_nsegs);
1735 }
1736
1737 /* XXX: TODO backpressure */
1738 t4_wrq_tx(sc->adapter, wr);
1739
1740 explicit_bzero(iv, sizeof(iv));
1741 return (0);
1742 }
1743
1744 static int
1745 ccr_ccm_done(struct ccr_softc *sc, struct ccr_session *s,
1746 struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
1747 {
1748
1749 /*
1750 * The updated IV to permit chained requests is at
1751 * cpl->data[2], but OCF doesn't permit chained requests.
1752 *
1753 * Note that the hardware should always verify the CBC MAC
1754 * hash.
1755 */
1756 return (error);
1757 }
1758
1759 /*
1760 * Use the software session for requests not supported by the crypto
1761 * engine (e.g. CCM and GCM requests with an empty payload).
1762 */
1763 static int
1764 ccr_soft_done(struct cryptop *crp)
1765 {
1766 struct cryptop *orig;
1767
1768 orig = crp->crp_opaque;
1769 orig->crp_etype = crp->crp_etype;
1770 crypto_freereq(crp);
1771 crypto_done(orig);
1772 return (0);
1773 }
1774
1775 static void
1776 ccr_soft(struct ccr_session *s, struct cryptop *crp)
1777 {
1778 struct cryptop *new;
1779 int error;
1780
1781 new = crypto_clonereq(crp, s->sw_session, M_NOWAIT);
1782 if (new == NULL) {
1783 crp->crp_etype = ENOMEM;
1784 crypto_done(crp);
1785 return;
1786 }
1787
1788 /*
1789 * XXX: This only really needs CRYPTO_ASYNC_ORDERED if the
1790 * original request was dispatched that way. There is no way
1791 * to know that though since crypto_dispatch_async() discards
1792 * the flag for async backends (such as ccr(4)).
1793 */
1794 new->crp_opaque = crp;
1795 new->crp_callback = ccr_soft_done;
1796 error = crypto_dispatch_async(new, CRYPTO_ASYNC_ORDERED);
1797 if (error != 0) {
1798 crp->crp_etype = error;
1799 crypto_done(crp);
1800 }
1801 }
1802
1803 static void
1804 ccr_identify(driver_t *driver, device_t parent)
1805 {
1806 struct adapter *sc;
1807
1808 sc = device_get_softc(parent);
1809 if (sc->cryptocaps & FW_CAPS_CONFIG_CRYPTO_LOOKASIDE &&
1810 device_find_child(parent, "ccr", -1) == NULL)
1811 device_add_child(parent, "ccr", -1);
1812 }
1813
1814 static int
1815 ccr_probe(device_t dev)
1816 {
1817
1818 device_set_desc(dev, "Chelsio Crypto Accelerator");
1819 return (BUS_PROBE_DEFAULT);
1820 }
1821
1822 static void
1823 ccr_sysctls(struct ccr_softc *sc)
1824 {
1825 struct sysctl_ctx_list *ctx = &sc->ctx;
1826 struct sysctl_oid *oid, *port_oid;
1827 struct sysctl_oid_list *children;
1828 char buf[16];
1829 int i;
1830
1831 /*
1832 * dev.ccr.X.
1833 */
1834 oid = device_get_sysctl_tree(sc->dev);
1835 children = SYSCTL_CHILDREN(oid);
1836
1837 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "port_mask", CTLFLAG_RW,
1838 &sc->port_mask, 0, "Mask of enabled ports");
1839
1840 /*
1841 * dev.ccr.X.stats.
1842 */
1843 oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "stats",
1844 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "statistics");
1845 children = SYSCTL_CHILDREN(oid);
1846
1847 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hash", CTLFLAG_RD,
1848 &sc->stats_hash, "Hash requests submitted");
1849 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "hmac", CTLFLAG_RD,
1850 &sc->stats_hmac, "HMAC requests submitted");
1851 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_encrypt",
1852 CTLFLAG_RD, &sc->stats_cipher_encrypt,
1853 "Cipher encryption requests submitted");
1854 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "cipher_decrypt",
1855 CTLFLAG_RD, &sc->stats_cipher_decrypt,
1856 "Cipher decryption requests submitted");
1857 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_encrypt",
1858 CTLFLAG_RD, &sc->stats_eta_encrypt,
1859 "Combined AES+HMAC encryption requests submitted");
1860 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "eta_decrypt",
1861 CTLFLAG_RD, &sc->stats_eta_decrypt,
1862 "Combined AES+HMAC decryption requests submitted");
1863 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_encrypt",
1864 CTLFLAG_RD, &sc->stats_gcm_encrypt,
1865 "AES-GCM encryption requests submitted");
1866 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "gcm_decrypt",
1867 CTLFLAG_RD, &sc->stats_gcm_decrypt,
1868 "AES-GCM decryption requests submitted");
1869 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_encrypt",
1870 CTLFLAG_RD, &sc->stats_ccm_encrypt,
1871 "AES-CCM encryption requests submitted");
1872 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "ccm_decrypt",
1873 CTLFLAG_RD, &sc->stats_ccm_decrypt,
1874 "AES-CCM decryption requests submitted");
1875 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "wr_nomem", CTLFLAG_RD,
1876 &sc->stats_wr_nomem, "Work request memory allocation failures");
1877 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "inflight", CTLFLAG_RD,
1878 &sc->stats_inflight, "Requests currently pending");
1879 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "mac_error", CTLFLAG_RD,
1880 &sc->stats_mac_error, "MAC errors");
1881 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "pad_error", CTLFLAG_RD,
1882 &sc->stats_pad_error, "Padding errors");
1883 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sglist_error",
1884 CTLFLAG_RD, &sc->stats_sglist_error,
1885 "Requests for which DMA mapping failed");
1886 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "process_error",
1887 CTLFLAG_RD, &sc->stats_process_error,
1888 "Requests failed during queueing");
1889 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "sw_fallback",
1890 CTLFLAG_RD, &sc->stats_sw_fallback,
1891 "Requests processed by falling back to software");
1892
1893 /*
1894 * dev.ccr.X.stats.port
1895 */
1896 port_oid = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "port",
1897 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "Per-port statistics");
1898
1899 for (i = 0; i < nitems(sc->ports); i++) {
1900 if (sc->ports[i].rxq == NULL)
1901 continue;
1902
1903 /*
1904 * dev.ccr.X.stats.port.Y
1905 */
1906 snprintf(buf, sizeof(buf), "%d", i);
1907 oid = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(port_oid), OID_AUTO,
1908 buf, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, buf);
1909 children = SYSCTL_CHILDREN(oid);
1910
1911 SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "active_sessions",
1912 CTLFLAG_RD, &sc->ports[i].active_sessions, 0,
1913 "Count of active sessions");
1914 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "queued",
1915 CTLFLAG_RD, &sc->ports[i].stats_queued, "Requests queued");
1916 SYSCTL_ADD_COUNTER_U64(ctx, children, OID_AUTO, "completed",
1917 CTLFLAG_RD, &sc->ports[i].stats_completed,
1918 "Requests completed");
1919 }
1920 }
1921
1922 static void
1923 ccr_init_port(struct ccr_softc *sc, int port)
1924 {
1925 struct port_info *pi;
1926
1927 pi = sc->adapter->port[port];
1928 sc->ports[port].txq = &sc->adapter->sge.ctrlq[port];
1929 sc->ports[port].rxq = &sc->adapter->sge.rxq[pi->vi->first_rxq];
1930 sc->ports[port].rx_channel_id = pi->rx_c_chan;
1931 sc->ports[port].tx_channel_id = pi->tx_chan;
1932 sc->ports[port].stats_queued = counter_u64_alloc(M_WAITOK);
1933 sc->ports[port].stats_completed = counter_u64_alloc(M_WAITOK);
1934 _Static_assert(sizeof(sc->port_mask) * NBBY >= MAX_NPORTS - 1,
1935 "Too many ports to fit in port_mask");
1936
1937 /*
1938 * Completions for crypto requests on port 1 can sometimes
1939 * return a stale cookie value due to a firmware bug. Disable
1940 * requests on port 1 by default on affected firmware.
1941 */
1942 if (sc->adapter->params.fw_vers >= FW_VERSION32(1, 25, 4, 0) ||
1943 port == 0)
1944 sc->port_mask |= 1u << port;
1945 }
1946
1947 static int
1948 ccr_attach(device_t dev)
1949 {
1950 struct ccr_softc *sc;
1951 int32_t cid;
1952 int i;
1953
1954 sc = device_get_softc(dev);
1955 sc->dev = dev;
1956 sysctl_ctx_init(&sc->ctx);
1957 sc->adapter = device_get_softc(device_get_parent(dev));
1958 for_each_port(sc->adapter, i) {
1959 ccr_init_port(sc, i);
1960 }
1961 cid = crypto_get_driverid(dev, sizeof(struct ccr_session),
1962 CRYPTOCAP_F_HARDWARE);
1963 if (cid < 0) {
1964 device_printf(dev, "could not get crypto driver id\n");
1965 return (ENXIO);
1966 }
1967 sc->cid = cid;
1968
1969 /*
1970 * The FID must be the first RXQ for port 0 regardless of
1971 * which port is used to service the request.
1972 */
1973 sc->first_rxq_id = sc->adapter->sge.rxq[0].iq.abs_id;
1974
1975 mtx_init(&sc->lock, "ccr", NULL, MTX_DEF);
1976 sc->iv_aad_buf = malloc(MAX_AAD_LEN, M_CCR, M_WAITOK);
1977 sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
1978 sc->stats_cipher_encrypt = counter_u64_alloc(M_WAITOK);
1979 sc->stats_cipher_decrypt = counter_u64_alloc(M_WAITOK);
1980 sc->stats_hash = counter_u64_alloc(M_WAITOK);
1981 sc->stats_hmac = counter_u64_alloc(M_WAITOK);
1982 sc->stats_eta_encrypt = counter_u64_alloc(M_WAITOK);
1983 sc->stats_eta_decrypt = counter_u64_alloc(M_WAITOK);
1984 sc->stats_gcm_encrypt = counter_u64_alloc(M_WAITOK);
1985 sc->stats_gcm_decrypt = counter_u64_alloc(M_WAITOK);
1986 sc->stats_ccm_encrypt = counter_u64_alloc(M_WAITOK);
1987 sc->stats_ccm_decrypt = counter_u64_alloc(M_WAITOK);
1988 sc->stats_wr_nomem = counter_u64_alloc(M_WAITOK);
1989 sc->stats_inflight = counter_u64_alloc(M_WAITOK);
1990 sc->stats_mac_error = counter_u64_alloc(M_WAITOK);
1991 sc->stats_pad_error = counter_u64_alloc(M_WAITOK);
1992 sc->stats_sglist_error = counter_u64_alloc(M_WAITOK);
1993 sc->stats_process_error = counter_u64_alloc(M_WAITOK);
1994 sc->stats_sw_fallback = counter_u64_alloc(M_WAITOK);
1995 ccr_sysctls(sc);
1996
1997 return (0);
1998 }
1999
2000 static void
2001 ccr_free_port(struct ccr_softc *sc, int port)
2002 {
2003
2004 counter_u64_free(sc->ports[port].stats_queued);
2005 counter_u64_free(sc->ports[port].stats_completed);
2006 }
2007
2008 static int
2009 ccr_detach(device_t dev)
2010 {
2011 struct ccr_softc *sc;
2012 int i;
2013
2014 sc = device_get_softc(dev);
2015
2016 mtx_lock(&sc->lock);
2017 sc->detaching = true;
2018 mtx_unlock(&sc->lock);
2019
2020 crypto_unregister_all(sc->cid);
2021
2022 sysctl_ctx_free(&sc->ctx);
2023 mtx_destroy(&sc->lock);
2024 counter_u64_free(sc->stats_cipher_encrypt);
2025 counter_u64_free(sc->stats_cipher_decrypt);
2026 counter_u64_free(sc->stats_hash);
2027 counter_u64_free(sc->stats_hmac);
2028 counter_u64_free(sc->stats_eta_encrypt);
2029 counter_u64_free(sc->stats_eta_decrypt);
2030 counter_u64_free(sc->stats_gcm_encrypt);
2031 counter_u64_free(sc->stats_gcm_decrypt);
2032 counter_u64_free(sc->stats_ccm_encrypt);
2033 counter_u64_free(sc->stats_ccm_decrypt);
2034 counter_u64_free(sc->stats_wr_nomem);
2035 counter_u64_free(sc->stats_inflight);
2036 counter_u64_free(sc->stats_mac_error);
2037 counter_u64_free(sc->stats_pad_error);
2038 counter_u64_free(sc->stats_sglist_error);
2039 counter_u64_free(sc->stats_process_error);
2040 counter_u64_free(sc->stats_sw_fallback);
2041 for_each_port(sc->adapter, i) {
2042 ccr_free_port(sc, i);
2043 }
2044 sglist_free(sc->sg_iv_aad);
2045 free(sc->iv_aad_buf, M_CCR);
2046 return (0);
2047 }
2048
2049 static void
2050 ccr_init_hash_digest(struct ccr_session *s)
2051 {
2052 union authctx auth_ctx;
2053 const struct auth_hash *axf;
2054
2055 axf = s->hmac.auth_hash;
2056 axf->Init(&auth_ctx);
2057 t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads);
2058 }
2059
2060 static bool
2061 ccr_aes_check_keylen(int alg, int klen)
2062 {
2063
2064 switch (klen * 8) {
2065 case 128:
2066 case 192:
2067 if (alg == CRYPTO_AES_XTS)
2068 return (false);
2069 break;
2070 case 256:
2071 break;
2072 case 512:
2073 if (alg != CRYPTO_AES_XTS)
2074 return (false);
2075 break;
2076 default:
2077 return (false);
2078 }
2079 return (true);
2080 }
2081
2082 static void
2083 ccr_aes_setkey(struct ccr_session *s, const void *key, int klen)
2084 {
2085 unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
2086 unsigned int opad_present;
2087
2088 if (s->cipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
2089 kbits = (klen / 2) * 8;
2090 else
2091 kbits = klen * 8;
2092 switch (kbits) {
2093 case 128:
2094 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
2095 break;
2096 case 192:
2097 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_192;
2098 break;
2099 case 256:
2100 ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_256;
2101 break;
2102 default:
2103 panic("should not get here");
2104 }
2105
2106 s->cipher.key_len = klen;
2107 memcpy(s->cipher.enckey, key, s->cipher.key_len);
2108 switch (s->cipher.cipher_mode) {
2109 case SCMD_CIPH_MODE_AES_CBC:
2110 case SCMD_CIPH_MODE_AES_XTS:
2111 t4_aes_getdeckey(s->cipher.deckey, key, kbits);
2112 break;
2113 }
2114
2115 kctx_len = roundup2(s->cipher.key_len, 16);
2116 switch (s->mode) {
2117 case ETA:
2118 mk_size = s->hmac.mk_size;
2119 opad_present = 1;
2120 iopad_size = roundup2(s->hmac.partial_digest_len, 16);
2121 kctx_len += iopad_size * 2;
2122 break;
2123 case GCM:
2124 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2125 opad_present = 0;
2126 kctx_len += GMAC_BLOCK_LEN;
2127 break;
2128 case CCM:
2129 switch (kbits) {
2130 case 128:
2131 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
2132 break;
2133 case 192:
2134 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_192;
2135 break;
2136 case 256:
2137 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2138 break;
2139 default:
2140 panic("should not get here");
2141 }
2142 opad_present = 0;
2143 kctx_len *= 2;
2144 break;
2145 default:
2146 mk_size = CHCR_KEYCTX_NO_KEY;
2147 opad_present = 0;
2148 break;
2149 }
2150 kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
2151 s->cipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
2152 V_KEY_CONTEXT_DUAL_CK(s->cipher.cipher_mode ==
2153 SCMD_CIPH_MODE_AES_XTS) |
2154 V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
2155 V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
2156 V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
2157 }
2158
2159 static bool
2160 ccr_auth_supported(const struct crypto_session_params *csp)
2161 {
2162
2163 switch (csp->csp_auth_alg) {
2164 case CRYPTO_SHA1:
2165 case CRYPTO_SHA2_224:
2166 case CRYPTO_SHA2_256:
2167 case CRYPTO_SHA2_384:
2168 case CRYPTO_SHA2_512:
2169 case CRYPTO_SHA1_HMAC:
2170 case CRYPTO_SHA2_224_HMAC:
2171 case CRYPTO_SHA2_256_HMAC:
2172 case CRYPTO_SHA2_384_HMAC:
2173 case CRYPTO_SHA2_512_HMAC:
2174 break;
2175 default:
2176 return (false);
2177 }
2178 return (true);
2179 }
2180
2181 static bool
2182 ccr_cipher_supported(const struct crypto_session_params *csp)
2183 {
2184
2185 switch (csp->csp_cipher_alg) {
2186 case CRYPTO_AES_CBC:
2187 if (csp->csp_ivlen != AES_BLOCK_LEN)
2188 return (false);
2189 break;
2190 case CRYPTO_AES_ICM:
2191 if (csp->csp_ivlen != AES_BLOCK_LEN)
2192 return (false);
2193 break;
2194 case CRYPTO_AES_XTS:
2195 if (csp->csp_ivlen != AES_XTS_IV_LEN)
2196 return (false);
2197 break;
2198 default:
2199 return (false);
2200 }
2201 return (ccr_aes_check_keylen(csp->csp_cipher_alg,
2202 csp->csp_cipher_klen));
2203 }
2204
2205 static int
2206 ccr_cipher_mode(const struct crypto_session_params *csp)
2207 {
2208
2209 switch (csp->csp_cipher_alg) {
2210 case CRYPTO_AES_CBC:
2211 return (SCMD_CIPH_MODE_AES_CBC);
2212 case CRYPTO_AES_ICM:
2213 return (SCMD_CIPH_MODE_AES_CTR);
2214 case CRYPTO_AES_NIST_GCM_16:
2215 return (SCMD_CIPH_MODE_AES_GCM);
2216 case CRYPTO_AES_XTS:
2217 return (SCMD_CIPH_MODE_AES_XTS);
2218 case CRYPTO_AES_CCM_16:
2219 return (SCMD_CIPH_MODE_AES_CCM);
2220 default:
2221 return (SCMD_CIPH_MODE_NOP);
2222 }
2223 }
2224
2225 static int
2226 ccr_probesession(device_t dev, const struct crypto_session_params *csp)
2227 {
2228 unsigned int cipher_mode;
2229
2230 if ((csp->csp_flags & ~(CSP_F_SEPARATE_OUTPUT | CSP_F_SEPARATE_AAD)) !=
2231 0)
2232 return (EINVAL);
2233 switch (csp->csp_mode) {
2234 case CSP_MODE_DIGEST:
2235 if (!ccr_auth_supported(csp))
2236 return (EINVAL);
2237 break;
2238 case CSP_MODE_CIPHER:
2239 if (!ccr_cipher_supported(csp))
2240 return (EINVAL);
2241 break;
2242 case CSP_MODE_AEAD:
2243 switch (csp->csp_cipher_alg) {
2244 case CRYPTO_AES_NIST_GCM_16:
2245 case CRYPTO_AES_CCM_16:
2246 break;
2247 default:
2248 return (EINVAL);
2249 }
2250 break;
2251 case CSP_MODE_ETA:
2252 if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp))
2253 return (EINVAL);
2254 break;
2255 default:
2256 return (EINVAL);
2257 }
2258
2259 if (csp->csp_cipher_klen != 0) {
2260 cipher_mode = ccr_cipher_mode(csp);
2261 if (cipher_mode == SCMD_CIPH_MODE_NOP)
2262 return (EINVAL);
2263 }
2264
2265 return (CRYPTODEV_PROBE_HARDWARE);
2266 }
2267
2268 /*
2269 * Select an available port with the lowest number of active sessions.
2270 */
2271 static struct ccr_port *
2272 ccr_choose_port(struct ccr_softc *sc)
2273 {
2274 struct ccr_port *best, *p;
2275 int i;
2276
2277 mtx_assert(&sc->lock, MA_OWNED);
2278 best = NULL;
2279 for (i = 0; i < nitems(sc->ports); i++) {
2280 p = &sc->ports[i];
2281
2282 /* Ignore non-existent ports. */
2283 if (p->rxq == NULL)
2284 continue;
2285
2286 /*
2287 * XXX: Ignore ports whose queues aren't initialized.
2288 * This is racy as the rxq can be destroyed by the
2289 * associated VI detaching. Eventually ccr should use
2290 * dedicated queues.
2291 */
2292 if (p->rxq->iq.adapter == NULL || p->txq->adapter == NULL)
2293 continue;
2294
2295 if ((sc->port_mask & (1u << i)) == 0)
2296 continue;
2297
2298 if (best == NULL ||
2299 p->active_sessions < best->active_sessions)
2300 best = p;
2301 }
2302 return (best);
2303 }
2304
2305 static void
2306 ccr_delete_session(struct ccr_session *s)
2307 {
2308 crypto_freesession(s->sw_session);
2309 sglist_free(s->sg_input);
2310 sglist_free(s->sg_output);
2311 sglist_free(s->sg_ulptx);
2312 sglist_free(s->sg_dsgl);
2313 mtx_destroy(&s->lock);
2314 }
2315
2316 static int
2317 ccr_newsession(device_t dev, crypto_session_t cses,
2318 const struct crypto_session_params *csp)
2319 {
2320 struct ccr_softc *sc;
2321 struct ccr_session *s;
2322 const struct auth_hash *auth_hash;
2323 unsigned int auth_mode, cipher_mode, mk_size;
2324 unsigned int partial_digest_len;
2325 int error;
2326
2327 switch (csp->csp_auth_alg) {
2328 case CRYPTO_SHA1:
2329 case CRYPTO_SHA1_HMAC:
2330 auth_hash = &auth_hash_hmac_sha1;
2331 auth_mode = SCMD_AUTH_MODE_SHA1;
2332 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
2333 partial_digest_len = SHA1_HASH_LEN;
2334 break;
2335 case CRYPTO_SHA2_224:
2336 case CRYPTO_SHA2_224_HMAC:
2337 auth_hash = &auth_hash_hmac_sha2_224;
2338 auth_mode = SCMD_AUTH_MODE_SHA224;
2339 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2340 partial_digest_len = SHA2_256_HASH_LEN;
2341 break;
2342 case CRYPTO_SHA2_256:
2343 case CRYPTO_SHA2_256_HMAC:
2344 auth_hash = &auth_hash_hmac_sha2_256;
2345 auth_mode = SCMD_AUTH_MODE_SHA256;
2346 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
2347 partial_digest_len = SHA2_256_HASH_LEN;
2348 break;
2349 case CRYPTO_SHA2_384:
2350 case CRYPTO_SHA2_384_HMAC:
2351 auth_hash = &auth_hash_hmac_sha2_384;
2352 auth_mode = SCMD_AUTH_MODE_SHA512_384;
2353 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2354 partial_digest_len = SHA2_512_HASH_LEN;
2355 break;
2356 case CRYPTO_SHA2_512:
2357 case CRYPTO_SHA2_512_HMAC:
2358 auth_hash = &auth_hash_hmac_sha2_512;
2359 auth_mode = SCMD_AUTH_MODE_SHA512_512;
2360 mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
2361 partial_digest_len = SHA2_512_HASH_LEN;
2362 break;
2363 default:
2364 auth_hash = NULL;
2365 auth_mode = SCMD_AUTH_MODE_NOP;
2366 mk_size = 0;
2367 partial_digest_len = 0;
2368 break;
2369 }
2370
2371 cipher_mode = ccr_cipher_mode(csp);
2372
2373 #ifdef INVARIANTS
2374 switch (csp->csp_mode) {
2375 case CSP_MODE_CIPHER:
2376 if (cipher_mode == SCMD_CIPH_MODE_NOP ||
2377 cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
2378 cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2379 panic("invalid cipher algo");
2380 break;
2381 case CSP_MODE_DIGEST:
2382 if (auth_mode == SCMD_AUTH_MODE_NOP)
2383 panic("invalid auth algo");
2384 break;
2385 case CSP_MODE_AEAD:
2386 if (cipher_mode != SCMD_CIPH_MODE_AES_GCM &&
2387 cipher_mode != SCMD_CIPH_MODE_AES_CCM)
2388 panic("invalid aead cipher algo");
2389 if (auth_mode != SCMD_AUTH_MODE_NOP)
2390 panic("invalid aead auth aglo");
2391 break;
2392 case CSP_MODE_ETA:
2393 if (cipher_mode == SCMD_CIPH_MODE_NOP ||
2394 cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
2395 cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2396 panic("invalid cipher algo");
2397 if (auth_mode == SCMD_AUTH_MODE_NOP)
2398 panic("invalid auth algo");
2399 break;
2400 default:
2401 panic("invalid csp mode");
2402 }
2403 #endif
2404
2405 s = crypto_get_driver_session(cses);
2406 mtx_init(&s->lock, "ccr session", NULL, MTX_DEF);
2407 s->sg_input = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2408 s->sg_output = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2409 s->sg_ulptx = sglist_alloc(TX_SGL_SEGS, M_NOWAIT);
2410 s->sg_dsgl = sglist_alloc(MAX_RX_PHYS_DSGL_SGE, M_NOWAIT);
2411 if (s->sg_input == NULL || s->sg_output == NULL ||
2412 s->sg_ulptx == NULL || s->sg_dsgl == NULL) {
2413 ccr_delete_session(s);
2414 return (ENOMEM);
2415 }
2416
2417 if (csp->csp_mode == CSP_MODE_AEAD) {
2418 error = crypto_newsession(&s->sw_session, csp,
2419 CRYPTOCAP_F_SOFTWARE);
2420 if (error) {
2421 ccr_delete_session(s);
2422 return (error);
2423 }
2424 }
2425
2426 sc = device_get_softc(dev);
2427 s->sc = sc;
2428
2429 mtx_lock(&sc->lock);
2430 if (sc->detaching) {
2431 mtx_unlock(&sc->lock);
2432 ccr_delete_session(s);
2433 return (ENXIO);
2434 }
2435
2436 s->port = ccr_choose_port(sc);
2437 if (s->port == NULL) {
2438 mtx_unlock(&sc->lock);
2439 ccr_delete_session(s);
2440 return (ENXIO);
2441 }
2442
2443 switch (csp->csp_mode) {
2444 case CSP_MODE_AEAD:
2445 if (cipher_mode == SCMD_CIPH_MODE_AES_CCM)
2446 s->mode = CCM;
2447 else
2448 s->mode = GCM;
2449 break;
2450 case CSP_MODE_ETA:
2451 s->mode = ETA;
2452 break;
2453 case CSP_MODE_DIGEST:
2454 if (csp->csp_auth_klen != 0)
2455 s->mode = HMAC;
2456 else
2457 s->mode = HASH;
2458 break;
2459 case CSP_MODE_CIPHER:
2460 s->mode = CIPHER;
2461 break;
2462 }
2463
2464 if (s->mode == GCM) {
2465 if (csp->csp_auth_mlen == 0)
2466 s->gmac.hash_len = AES_GMAC_HASH_LEN;
2467 else
2468 s->gmac.hash_len = csp->csp_auth_mlen;
2469 t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen,
2470 s->gmac.ghash_h);
2471 } else if (s->mode == CCM) {
2472 if (csp->csp_auth_mlen == 0)
2473 s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN;
2474 else
2475 s->ccm_mac.hash_len = csp->csp_auth_mlen;
2476 } else if (auth_mode != SCMD_AUTH_MODE_NOP) {
2477 s->hmac.auth_hash = auth_hash;
2478 s->hmac.auth_mode = auth_mode;
2479 s->hmac.mk_size = mk_size;
2480 s->hmac.partial_digest_len = partial_digest_len;
2481 if (csp->csp_auth_mlen == 0)
2482 s->hmac.hash_len = auth_hash->hashsize;
2483 else
2484 s->hmac.hash_len = csp->csp_auth_mlen;
2485 if (csp->csp_auth_key != NULL)
2486 t4_init_hmac_digest(auth_hash, partial_digest_len,
2487 csp->csp_auth_key, csp->csp_auth_klen,
2488 s->hmac.pads);
2489 else
2490 ccr_init_hash_digest(s);
2491 }
2492 if (cipher_mode != SCMD_CIPH_MODE_NOP) {
2493 s->cipher.cipher_mode = cipher_mode;
2494 s->cipher.iv_len = csp->csp_ivlen;
2495 if (csp->csp_cipher_key != NULL)
2496 ccr_aes_setkey(s, csp->csp_cipher_key,
2497 csp->csp_cipher_klen);
2498 }
2499
2500 s->port->active_sessions++;
2501 mtx_unlock(&sc->lock);
2502 return (0);
2503 }
2504
2505 static void
2506 ccr_freesession(device_t dev, crypto_session_t cses)
2507 {
2508 struct ccr_softc *sc;
2509 struct ccr_session *s;
2510
2511 sc = device_get_softc(dev);
2512 s = crypto_get_driver_session(cses);
2513 #ifdef INVARIANTS
2514 if (s->pending != 0)
2515 device_printf(dev,
2516 "session %p freed with %d pending requests\n", s,
2517 s->pending);
2518 #endif
2519 mtx_lock(&sc->lock);
2520 s->port->active_sessions--;
2521 mtx_unlock(&sc->lock);
2522 ccr_delete_session(s);
2523 }
2524
2525 static int
2526 ccr_process(device_t dev, struct cryptop *crp, int hint)
2527 {
2528 const struct crypto_session_params *csp;
2529 struct ccr_softc *sc;
2530 struct ccr_session *s;
2531 int error;
2532
2533 csp = crypto_get_params(crp->crp_session);
2534 s = crypto_get_driver_session(crp->crp_session);
2535 sc = device_get_softc(dev);
2536
2537 mtx_lock(&s->lock);
2538 error = ccr_populate_sglist(s->sg_input, &crp->crp_buf);
2539 if (error == 0 && CRYPTO_HAS_OUTPUT_BUFFER(crp))
2540 error = ccr_populate_sglist(s->sg_output, &crp->crp_obuf);
2541 if (error) {
2542 counter_u64_add(sc->stats_sglist_error, 1);
2543 goto out;
2544 }
2545
2546 switch (s->mode) {
2547 case HASH:
2548 error = ccr_hash(sc, s, crp);
2549 if (error == 0)
2550 counter_u64_add(sc->stats_hash, 1);
2551 break;
2552 case HMAC:
2553 if (crp->crp_auth_key != NULL)
2554 t4_init_hmac_digest(s->hmac.auth_hash,
2555 s->hmac.partial_digest_len, crp->crp_auth_key,
2556 csp->csp_auth_klen, s->hmac.pads);
2557 error = ccr_hash(sc, s, crp);
2558 if (error == 0)
2559 counter_u64_add(sc->stats_hmac, 1);
2560 break;
2561 case CIPHER:
2562 if (crp->crp_cipher_key != NULL)
2563 ccr_aes_setkey(s, crp->crp_cipher_key,
2564 csp->csp_cipher_klen);
2565 error = ccr_cipher(sc, s, crp);
2566 if (error == 0) {
2567 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2568 counter_u64_add(sc->stats_cipher_encrypt, 1);
2569 else
2570 counter_u64_add(sc->stats_cipher_decrypt, 1);
2571 }
2572 break;
2573 case ETA:
2574 if (crp->crp_auth_key != NULL)
2575 t4_init_hmac_digest(s->hmac.auth_hash,
2576 s->hmac.partial_digest_len, crp->crp_auth_key,
2577 csp->csp_auth_klen, s->hmac.pads);
2578 if (crp->crp_cipher_key != NULL)
2579 ccr_aes_setkey(s, crp->crp_cipher_key,
2580 csp->csp_cipher_klen);
2581 error = ccr_eta(sc, s, crp);
2582 if (error == 0) {
2583 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2584 counter_u64_add(sc->stats_eta_encrypt, 1);
2585 else
2586 counter_u64_add(sc->stats_eta_decrypt, 1);
2587 }
2588 break;
2589 case GCM:
2590 if (crp->crp_cipher_key != NULL) {
2591 t4_init_gmac_hash(crp->crp_cipher_key,
2592 csp->csp_cipher_klen, s->gmac.ghash_h);
2593 ccr_aes_setkey(s, crp->crp_cipher_key,
2594 csp->csp_cipher_klen);
2595 }
2596 error = ccr_gcm(sc, s, crp);
2597 if (error == EMSGSIZE || error == EFBIG) {
2598 counter_u64_add(sc->stats_sw_fallback, 1);
2599 mtx_unlock(&s->lock);
2600 ccr_soft(s, crp);
2601 return (0);
2602 }
2603 if (error == 0) {
2604 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2605 counter_u64_add(sc->stats_gcm_encrypt, 1);
2606 else
2607 counter_u64_add(sc->stats_gcm_decrypt, 1);
2608 }
2609 break;
2610 case CCM:
2611 if (crp->crp_cipher_key != NULL) {
2612 ccr_aes_setkey(s, crp->crp_cipher_key,
2613 csp->csp_cipher_klen);
2614 }
2615 error = ccr_ccm(sc, s, crp);
2616 if (error == EMSGSIZE || error == EFBIG) {
2617 counter_u64_add(sc->stats_sw_fallback, 1);
2618 mtx_unlock(&s->lock);
2619 ccr_soft(s, crp);
2620 return (0);
2621 }
2622 if (error == 0) {
2623 if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
2624 counter_u64_add(sc->stats_ccm_encrypt, 1);
2625 else
2626 counter_u64_add(sc->stats_ccm_decrypt, 1);
2627 }
2628 break;
2629 }
2630
2631 if (error == 0) {
2632 #ifdef INVARIANTS
2633 s->pending++;
2634 #endif
2635 counter_u64_add(sc->stats_inflight, 1);
2636 counter_u64_add(s->port->stats_queued, 1);
2637 } else
2638 counter_u64_add(sc->stats_process_error, 1);
2639
2640 out:
2641 mtx_unlock(&s->lock);
2642
2643 if (error) {
2644 crp->crp_etype = error;
2645 crypto_done(crp);
2646 }
2647
2648 return (0);
2649 }
2650
2651 static int
2652 do_cpl6_fw_pld(struct sge_iq *iq, const struct rss_header *rss,
2653 struct mbuf *m)
2654 {
2655 struct ccr_softc *sc;
2656 struct ccr_session *s;
2657 const struct cpl_fw6_pld *cpl;
2658 struct cryptop *crp;
2659 uint32_t status;
2660 int error;
2661
2662 if (m != NULL)
2663 cpl = mtod(m, const void *);
2664 else
2665 cpl = (const void *)(rss + 1);
2666
2667 crp = (struct cryptop *)(uintptr_t)be64toh(cpl->data[1]);
2668 s = crypto_get_driver_session(crp->crp_session);
2669 status = be64toh(cpl->data[0]);
2670 if (CHK_MAC_ERR_BIT(status) || CHK_PAD_ERR_BIT(status))
2671 error = EBADMSG;
2672 else
2673 error = 0;
2674
2675 sc = s->sc;
2676 #ifdef INVARIANTS
2677 mtx_lock(&s->lock);
2678 s->pending--;
2679 mtx_unlock(&s->lock);
2680 #endif
2681 counter_u64_add(sc->stats_inflight, -1);
2682 counter_u64_add(s->port->stats_completed, 1);
2683
2684 switch (s->mode) {
2685 case HASH:
2686 case HMAC:
2687 error = ccr_hash_done(sc, s, crp, cpl, error);
2688 break;
2689 case CIPHER:
2690 error = ccr_cipher_done(sc, s, crp, cpl, error);
2691 break;
2692 case ETA:
2693 error = ccr_eta_done(sc, s, crp, cpl, error);
2694 break;
2695 case GCM:
2696 error = ccr_gcm_done(sc, s, crp, cpl, error);
2697 break;
2698 case CCM:
2699 error = ccr_ccm_done(sc, s, crp, cpl, error);
2700 break;
2701 }
2702
2703 if (error == EBADMSG) {
2704 if (CHK_MAC_ERR_BIT(status))
2705 counter_u64_add(sc->stats_mac_error, 1);
2706 if (CHK_PAD_ERR_BIT(status))
2707 counter_u64_add(sc->stats_pad_error, 1);
2708 }
2709 crp->crp_etype = error;
2710 crypto_done(crp);
2711 m_freem(m);
2712 return (0);
2713 }
2714
2715 static int
2716 ccr_modevent(module_t mod, int cmd, void *arg)
2717 {
2718
2719 switch (cmd) {
2720 case MOD_LOAD:
2721 t4_register_cpl_handler(CPL_FW6_PLD, do_cpl6_fw_pld);
2722 return (0);
2723 case MOD_UNLOAD:
2724 t4_register_cpl_handler(CPL_FW6_PLD, NULL);
2725 return (0);
2726 default:
2727 return (EOPNOTSUPP);
2728 }
2729 }
2730
2731 static device_method_t ccr_methods[] = {
2732 DEVMETHOD(device_identify, ccr_identify),
2733 DEVMETHOD(device_probe, ccr_probe),
2734 DEVMETHOD(device_attach, ccr_attach),
2735 DEVMETHOD(device_detach, ccr_detach),
2736
2737 DEVMETHOD(cryptodev_probesession, ccr_probesession),
2738 DEVMETHOD(cryptodev_newsession, ccr_newsession),
2739 DEVMETHOD(cryptodev_freesession, ccr_freesession),
2740 DEVMETHOD(cryptodev_process, ccr_process),
2741
2742 DEVMETHOD_END
2743 };
2744
2745 static driver_t ccr_driver = {
2746 "ccr",
2747 ccr_methods,
2748 sizeof(struct ccr_softc)
2749 };
2750
2751 DRIVER_MODULE(ccr, t6nex, ccr_driver, ccr_modevent, NULL);
2752 MODULE_VERSION(ccr, 1);
2753 MODULE_DEPEND(ccr, crypto, 1, 1, 1);
2754 MODULE_DEPEND(ccr, t6nex, 1, 1, 1);
Cache object: 3197a61fdcf6180f07acf8fbbc9675ac
|