1 /*-
2 * Copyright (c) 2021-2022 NVIDIA corporation & affiliates.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #include "opt_kern_tls.h"
29 #include "opt_rss.h"
30 #include "opt_ratelimit.h"
31
32 #include <dev/mlx5/mlx5_en/en.h>
33
34 #include <dev/mlx5/tls.h>
35
36 #include <dev/mlx5/fs.h>
37 #include <dev/mlx5/mlx5_core/fs_tcp.h>
38
39 #include <sys/ktls.h>
40 #include <opencrypto/cryptodev.h>
41
42 #ifdef KERN_TLS
43
44 static if_snd_tag_free_t mlx5e_tls_rx_snd_tag_free;
45 static if_snd_tag_modify_t mlx5e_tls_rx_snd_tag_modify;
46
47 static const struct if_snd_tag_sw mlx5e_tls_rx_snd_tag_sw = {
48 .snd_tag_modify = mlx5e_tls_rx_snd_tag_modify,
49 .snd_tag_free = mlx5e_tls_rx_snd_tag_free,
50 .type = IF_SND_TAG_TYPE_TLS_RX
51 };
52
53 MALLOC_DEFINE(M_MLX5E_TLS_RX, "MLX5E_TLS_RX", "MLX5 ethernet HW TLS RX");
54
55 /* software TLS RX context */
56 struct mlx5_ifc_sw_tls_rx_cntx_bits {
57 struct mlx5_ifc_tls_static_params_bits param;
58 struct mlx5_ifc_tls_progress_params_bits progress;
59 struct {
60 uint8_t key_data[8][0x20];
61 uint8_t key_len[0x20];
62 } key;
63 };
64
65 CTASSERT(MLX5_ST_SZ_BYTES(sw_tls_rx_cntx) <= sizeof(((struct mlx5e_tls_rx_tag *)NULL)->crypto_params));
66 CTASSERT(MLX5_ST_SZ_BYTES(mkc) == sizeof(((struct mlx5e_tx_umr_wqe *)NULL)->mkc));
67
68 static const char *mlx5e_tls_rx_stats_desc[] = {
69 MLX5E_TLS_RX_STATS(MLX5E_STATS_DESC)
70 };
71
72 static void mlx5e_tls_rx_work(struct work_struct *);
73 static bool mlx5e_tls_rx_snd_tag_find_tcp_sn_and_tls_rcd(struct mlx5e_tls_rx_tag *,
74 uint32_t, uint32_t *, uint64_t *);
75
76 CTASSERT((MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param) % 16) == 0);
77
78 static uint32_t
79 mlx5e_tls_rx_get_ch(struct mlx5e_priv *priv, uint32_t flowid, uint32_t flowtype)
80 {
81 u32 ch;
82 #ifdef RSS
83 u32 temp;
84 #endif
85
86 /* keep this code synced with mlx5e_select_queue() */
87 ch = priv->params.num_channels;
88 #ifdef RSS
89 if (rss_hash2bucket(flowid, flowtype, &temp) == 0)
90 ch = temp % ch;
91 else
92 #endif
93 ch = (flowid % 128) % ch;
94 return (ch);
95 }
96
97 /*
98 * This function gets a pointer to an internal queue, IQ, based on the
99 * provided "flowid" and "flowtype". The IQ returned may in some rare
100 * cases not be activated or running, but this is all handled by the
101 * "mlx5e_iq_get_producer_index()" function.
102 *
103 * The idea behind this function is to spread the IQ traffic as much
104 * as possible and to avoid congestion on the same IQ when processing
105 * RX traffic.
106 */
107 static struct mlx5e_iq *
108 mlx5e_tls_rx_get_iq(struct mlx5e_priv *priv, uint32_t flowid, uint32_t flowtype)
109 {
110 /*
111 * NOTE: The channels array is only freed at detach
112 * and it safe to return a pointer to the send tag
113 * inside the channels structure as long as we
114 * reference the priv.
115 */
116 return (&priv->channel[mlx5e_tls_rx_get_ch(priv, flowid, flowtype)].iq);
117 }
118
119 static void
120 mlx5e_tls_rx_send_static_parameters_cb(void *arg)
121 {
122 struct mlx5e_tls_rx_tag *ptag;
123
124 ptag = (struct mlx5e_tls_rx_tag *)arg;
125
126 m_snd_tag_rele(&ptag->tag);
127 }
128
129 /*
130 * This function sends the so-called TLS RX static parameters to the
131 * hardware. These parameters are temporarily stored in the
132 * "crypto_params" field of the TLS RX tag. Most importantly this
133 * function sets the TCP sequence number (32-bit) and TLS record
134 * number (64-bit) where the decryption can resume.
135 *
136 * Zero is returned upon success. Else some error happend.
137 */
138 static int
139 mlx5e_tls_rx_send_static_parameters(struct mlx5e_iq *iq, struct mlx5e_tls_rx_tag *ptag)
140 {
141 const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_umr_wqe) +
142 MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param), MLX5_SEND_WQE_DS);
143 struct mlx5e_tx_umr_wqe *wqe;
144 int pi;
145
146 mtx_lock(&iq->lock);
147 pi = mlx5e_iq_get_producer_index(iq);
148 if (pi < 0) {
149 mtx_unlock(&iq->lock);
150 return (-ENOMEM);
151 }
152 wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi);
153
154 memset(wqe, 0, sizeof(*wqe));
155
156 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) |
157 MLX5_OPCODE_UMR | (MLX5_OPCODE_MOD_UMR_TLS_TIR_STATIC_PARAMS << 24));
158 wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt);
159 wqe->ctrl.imm = cpu_to_be32(ptag->tirn << 8);
160 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE | MLX5_FENCE_MODE_INITIATOR_SMALL;
161
162 /* fill out UMR control segment */
163 wqe->umr.flags = 0x80; /* inline data */
164 wqe->umr.bsf_octowords =
165 cpu_to_be16(MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param) / 16);
166
167 /* copy in the static crypto parameters */
168 memcpy(wqe + 1, MLX5_ADDR_OF(sw_tls_rx_cntx, ptag->crypto_params, param),
169 MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param));
170
171 /* copy data for doorbell */
172 memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32));
173
174 iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
175 iq->data[pi].callback = &mlx5e_tls_rx_send_static_parameters_cb;
176 iq->data[pi].arg = ptag;
177
178 m_snd_tag_ref(&ptag->tag);
179
180 iq->pc += iq->data[pi].num_wqebbs;
181
182 mlx5e_iq_notify_hw(iq);
183
184 mtx_unlock(&iq->lock);
185
186 return (0); /* success */
187 }
188
189 static void
190 mlx5e_tls_rx_send_progress_parameters_cb(void *arg)
191 {
192 struct mlx5e_tls_rx_tag *ptag;
193
194 ptag = (struct mlx5e_tls_rx_tag *)arg;
195
196 complete(&ptag->progress_complete);
197 }
198
199 CTASSERT(MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, progress) ==
200 sizeof(((struct mlx5e_tx_psv_wqe *)NULL)->psv));
201
202 /*
203 * This function resets the state of the TIR context to start
204 * searching for a valid TLS header and is used only when allocating
205 * the TLS RX tag.
206 *
207 * Zero is returned upon success, else some error happened.
208 */
209 static int
210 mlx5e_tls_rx_send_progress_parameters_sync(struct mlx5e_iq *iq,
211 struct mlx5e_tls_rx_tag *ptag)
212 {
213 const u32 ds_cnt = DIV_ROUND_UP(sizeof(struct mlx5e_tx_psv_wqe),
214 MLX5_SEND_WQE_DS);
215 struct mlx5e_priv *priv;
216 struct mlx5e_tx_psv_wqe *wqe;
217 int pi;
218
219 mtx_lock(&iq->lock);
220 pi = mlx5e_iq_get_producer_index(iq);
221 if (pi < 0) {
222 mtx_unlock(&iq->lock);
223 return (-ENOMEM);
224 }
225 wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi);
226
227 memset(wqe, 0, sizeof(*wqe));
228
229 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) |
230 MLX5_OPCODE_SET_PSV | (MLX5_OPCODE_MOD_PSV_TLS_TIR_PROGRESS_PARAMS << 24));
231 wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt);
232 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
233
234 /* copy in the PSV control segment */
235 memcpy(&wqe->psv, MLX5_ADDR_OF(sw_tls_rx_cntx, ptag->crypto_params, progress),
236 sizeof(wqe->psv));
237
238 /* copy data for doorbell */
239 memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32));
240
241 iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
242 iq->data[pi].callback = &mlx5e_tls_rx_send_progress_parameters_cb;
243 iq->data[pi].arg = ptag;
244
245 iq->pc += iq->data[pi].num_wqebbs;
246
247 init_completion(&ptag->progress_complete);
248
249 mlx5e_iq_notify_hw(iq);
250
251 mtx_unlock(&iq->lock);
252
253 while (1) {
254 if (wait_for_completion_timeout(&ptag->progress_complete, hz) != 0)
255 break;
256 priv = container_of(iq, struct mlx5e_channel, iq)->priv;
257 if (priv->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
258 pci_channel_offline(priv->mdev->pdev) != 0)
259 return (-EWOULDBLOCK);
260 }
261
262 return (0); /* success */
263 }
264
265 CTASSERT(MLX5E_TLS_RX_PROGRESS_BUFFER_SIZE >= MLX5_ST_SZ_BYTES(tls_progress_params));
266 CTASSERT(MLX5E_TLS_RX_PROGRESS_BUFFER_SIZE <= PAGE_SIZE);
267
268 struct mlx5e_get_tls_progress_params_wqe {
269 struct mlx5_wqe_ctrl_seg ctrl;
270 struct mlx5_seg_get_psv psv;
271 };
272
273 static void
274 mlx5e_tls_rx_receive_progress_parameters_cb(void *arg)
275 {
276 struct mlx5e_tls_rx_tag *ptag;
277 struct mlx5e_iq *iq;
278 uint32_t tcp_curr_sn_he;
279 uint32_t tcp_next_sn_he;
280 uint64_t tls_rcd_num;
281 void *buffer;
282
283 ptag = (struct mlx5e_tls_rx_tag *)arg;
284 buffer = mlx5e_tls_rx_get_progress_buffer(ptag);
285
286 MLX5E_TLS_RX_TAG_LOCK(ptag);
287
288 ptag->tcp_resync_pending = 0;
289
290 switch (MLX5_GET(tls_progress_params, buffer, record_tracker_state)) {
291 case MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_TRACKING:
292 break;
293 default:
294 goto done;
295 }
296
297 switch (MLX5_GET(tls_progress_params, buffer, auth_state)) {
298 case MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD:
299 break;
300 default:
301 goto done;
302 }
303
304 tcp_curr_sn_he = MLX5_GET(tls_progress_params, buffer, hw_resync_tcp_sn);
305
306 if (mlx5e_tls_rx_snd_tag_find_tcp_sn_and_tls_rcd(ptag, tcp_curr_sn_he,
307 &tcp_next_sn_he, &tls_rcd_num)) {
308
309 MLX5_SET64(sw_tls_rx_cntx, ptag->crypto_params,
310 param.initial_record_number, tls_rcd_num);
311 MLX5_SET(sw_tls_rx_cntx, ptag->crypto_params,
312 param.resync_tcp_sn, tcp_curr_sn_he);
313
314 iq = mlx5e_tls_rx_get_iq(
315 container_of(ptag->tls_rx, struct mlx5e_priv, tls_rx),
316 ptag->flowid, ptag->flowtype);
317
318 if (mlx5e_tls_rx_send_static_parameters(iq, ptag) != 0)
319 MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
320 }
321 done:
322 MLX5E_TLS_RX_TAG_UNLOCK(ptag);
323
324 m_snd_tag_rele(&ptag->tag);
325 }
326
327 /*
328 * This function queries the hardware for the current state of the TIR
329 * in question. It is typically called when encrypted data is received
330 * to re-establish hardware decryption of received TLS data.
331 *
332 * Zero is returned upon success, else some error happened.
333 */
334 static int
335 mlx5e_tls_rx_receive_progress_parameters(struct mlx5e_iq *iq, struct mlx5e_tls_rx_tag *ptag)
336 {
337 struct mlx5e_get_tls_progress_params_wqe *wqe;
338 const u32 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
339 u64 dma_address;
340 int pi;
341
342 mtx_lock(&iq->lock);
343 pi = mlx5e_iq_get_producer_index(iq);
344 if (pi < 0) {
345 mtx_unlock(&iq->lock);
346 return (-ENOMEM);
347 }
348
349 mlx5e_iq_load_memory_single(iq, pi,
350 mlx5e_tls_rx_get_progress_buffer(ptag),
351 MLX5E_TLS_RX_PROGRESS_BUFFER_SIZE,
352 &dma_address, BUS_DMASYNC_PREREAD);
353
354 wqe = mlx5_wq_cyc_get_wqe(&iq->wq, pi);
355
356 memset(wqe, 0, sizeof(*wqe));
357
358 wqe->ctrl.opmod_idx_opcode = cpu_to_be32((iq->pc << 8) |
359 MLX5_OPCODE_GET_PSV | (MLX5_OPCODE_MOD_PSV_TLS_TIR_PROGRESS_PARAMS << 24));
360 wqe->ctrl.qpn_ds = cpu_to_be32((iq->sqn << 8) | ds_cnt);
361 wqe->ctrl.fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
362 wqe->psv.num_psv = 1 << 4;
363 wqe->psv.l_key = iq->mkey_be;
364 wqe->psv.psv_index[0] = cpu_to_be32(ptag->tirn);
365 wqe->psv.va = cpu_to_be64(dma_address);
366
367 /* copy data for doorbell */
368 memcpy(iq->doorbell.d32, &wqe->ctrl, sizeof(iq->doorbell.d32));
369
370 iq->data[pi].num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
371 iq->data[pi].callback = &mlx5e_tls_rx_receive_progress_parameters_cb;
372 iq->data[pi].arg = ptag;
373
374 m_snd_tag_ref(&ptag->tag);
375
376 iq->pc += iq->data[pi].num_wqebbs;
377
378 mlx5e_iq_notify_hw(iq);
379
380 mtx_unlock(&iq->lock);
381
382 return (0); /* success */
383 }
384
385 /*
386 * This is the import function for TLS RX tags.
387 */
388 static int
389 mlx5e_tls_rx_tag_import(void *arg, void **store, int cnt, int domain, int flags)
390 {
391 struct mlx5e_tls_rx_tag *ptag;
392 int i;
393
394 for (i = 0; i != cnt; i++) {
395 ptag = malloc_domainset(sizeof(*ptag), M_MLX5E_TLS_RX,
396 mlx5_dev_domainset(arg), flags | M_ZERO);
397 mtx_init(&ptag->mtx, "mlx5-tls-rx-tag-mtx", NULL, MTX_DEF);
398 INIT_WORK(&ptag->work, mlx5e_tls_rx_work);
399 store[i] = ptag;
400 }
401 return (i);
402 }
403
404 /*
405 * This is the release function for TLS RX tags.
406 */
407 static void
408 mlx5e_tls_rx_tag_release(void *arg, void **store, int cnt)
409 {
410 struct mlx5e_tls_rx_tag *ptag;
411 int i;
412
413 for (i = 0; i != cnt; i++) {
414 ptag = store[i];
415
416 flush_work(&ptag->work);
417 mtx_destroy(&ptag->mtx);
418 free(ptag, M_MLX5E_TLS_RX);
419 }
420 }
421
422 /*
423 * This is a convenience function to free TLS RX tags. It resets some
424 * selected fields, updates the number of resources and returns the
425 * TLS RX tag to the UMA pool of free tags.
426 */
427 static void
428 mlx5e_tls_rx_tag_zfree(struct mlx5e_tls_rx_tag *ptag)
429 {
430 /* make sure any unhandled taskqueue events are ignored */
431 ptag->state = MLX5E_TLS_RX_ST_FREED;
432
433 /* reset some variables */
434 ptag->dek_index = 0;
435 ptag->dek_index_ok = 0;
436 ptag->tirn = 0;
437 ptag->flow_rule = NULL;
438 ptag->tcp_resync_active = 0;
439 ptag->tcp_resync_pending = 0;
440
441 /* avoid leaking keys */
442 memset(ptag->crypto_params, 0, sizeof(ptag->crypto_params));
443
444 /* update number of resources in use */
445 atomic_add_32(&ptag->tls_rx->num_resources, -1U);
446
447 /* return tag to UMA */
448 uma_zfree(ptag->tls_rx->zone, ptag);
449 }
450
451 /*
452 * This function enables TLS RX support for the given NIC, if all
453 * needed firmware capabilites are present.
454 */
455 int
456 mlx5e_tls_rx_init(struct mlx5e_priv *priv)
457 {
458 struct mlx5e_tls_rx *ptls = &priv->tls_rx;
459 struct sysctl_oid *node;
460 uint32_t x;
461
462 if (MLX5_CAP_GEN(priv->mdev, tls_rx) == 0 ||
463 MLX5_CAP_GEN(priv->mdev, log_max_dek) == 0 ||
464 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version) == 0)
465 return (0);
466
467 ptls->wq = create_singlethread_workqueue("mlx5-tls-rx-wq");
468 if (ptls->wq == NULL)
469 return (ENOMEM);
470
471 sysctl_ctx_init(&ptls->ctx);
472
473 snprintf(ptls->zname, sizeof(ptls->zname),
474 "mlx5_%u_tls_rx", device_get_unit(priv->mdev->pdev->dev.bsddev));
475
476 ptls->zone = uma_zcache_create(ptls->zname,
477 sizeof(struct mlx5e_tls_rx_tag), NULL, NULL, NULL, NULL,
478 mlx5e_tls_rx_tag_import, mlx5e_tls_rx_tag_release, priv->mdev,
479 UMA_ZONE_UNMANAGED);
480
481 /* shared between RX and TX TLS */
482 ptls->max_resources = 1U << (MLX5_CAP_GEN(priv->mdev, log_max_dek) - 1);
483
484 for (x = 0; x != MLX5E_TLS_RX_STATS_NUM; x++)
485 ptls->stats.arg[x] = counter_u64_alloc(M_WAITOK);
486
487 ptls->init = 1;
488
489 node = SYSCTL_ADD_NODE(&priv->sysctl_ctx,
490 SYSCTL_CHILDREN(priv->sysctl_ifnet), OID_AUTO,
491 "tls_rx", CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, "Hardware TLS receive offload");
492 if (node == NULL)
493 return (0);
494
495 mlx5e_create_counter_stats(&ptls->ctx,
496 SYSCTL_CHILDREN(node), "stats",
497 mlx5e_tls_rx_stats_desc, MLX5E_TLS_RX_STATS_NUM,
498 ptls->stats.arg);
499
500 return (0);
501 }
502
503 /*
504 * This function disables TLS RX support for the given NIC.
505 */
506 void
507 mlx5e_tls_rx_cleanup(struct mlx5e_priv *priv)
508 {
509 struct mlx5e_tls_rx *ptls = &priv->tls_rx;
510 uint32_t x;
511
512 if (ptls->init == 0)
513 return;
514
515 ptls->init = 0;
516 flush_workqueue(ptls->wq);
517 sysctl_ctx_free(&ptls->ctx);
518 uma_zdestroy(ptls->zone);
519 destroy_workqueue(ptls->wq);
520
521 /* check if all resources are freed */
522 MPASS(priv->tls_rx.num_resources == 0);
523
524 for (x = 0; x != MLX5E_TLS_RX_STATS_NUM; x++)
525 counter_u64_free(ptls->stats.arg[x]);
526 }
527
528 /*
529 * This function is used to serialize sleeping firmware operations
530 * needed in order to establish and destroy a TLS RX tag.
531 */
532 static void
533 mlx5e_tls_rx_work(struct work_struct *work)
534 {
535 struct mlx5e_tls_rx_tag *ptag;
536 struct mlx5e_priv *priv;
537 int err;
538
539 ptag = container_of(work, struct mlx5e_tls_rx_tag, work);
540 priv = container_of(ptag->tls_rx, struct mlx5e_priv, tls_rx);
541
542 switch (ptag->state) {
543 case MLX5E_TLS_RX_ST_INIT:
544 /* try to allocate new TIR context */
545 err = mlx5_tls_open_tir(priv->mdev, priv->tdn,
546 priv->channel[mlx5e_tls_rx_get_ch(priv, ptag->flowid, ptag->flowtype)].rqtn,
547 &ptag->tirn);
548 if (err) {
549 MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
550 break;
551 }
552 MLX5_SET(sw_tls_rx_cntx, ptag->crypto_params, progress.pd, ptag->tirn);
553
554 /* try to allocate a DEK context ID */
555 err = mlx5_encryption_key_create(priv->mdev, priv->pdn,
556 MLX5_ADDR_OF(sw_tls_rx_cntx, ptag->crypto_params, key.key_data),
557 MLX5_GET(sw_tls_rx_cntx, ptag->crypto_params, key.key_len),
558 &ptag->dek_index);
559 if (err) {
560 MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
561 break;
562 }
563
564 MLX5_SET(sw_tls_rx_cntx, ptag->crypto_params, param.dek_index, ptag->dek_index);
565
566 ptag->dek_index_ok = 1;
567
568 MLX5E_TLS_RX_TAG_LOCK(ptag);
569 if (ptag->state == MLX5E_TLS_RX_ST_INIT)
570 ptag->state = MLX5E_TLS_RX_ST_SETUP;
571 MLX5E_TLS_RX_TAG_UNLOCK(ptag);
572 break;
573
574 case MLX5E_TLS_RX_ST_RELEASE:
575 /* remove flow rule for incoming traffic, if any */
576 if (ptag->flow_rule != NULL)
577 mlx5e_accel_fs_del_inpcb(ptag->flow_rule);
578
579 /* try to destroy DEK context by ID */
580 if (ptag->dek_index_ok)
581 mlx5_encryption_key_destroy(priv->mdev, ptag->dek_index);
582
583 /* try to destroy TIR context by ID */
584 if (ptag->tirn != 0)
585 mlx5_tls_close_tir(priv->mdev, ptag->tirn);
586
587 /* free tag */
588 mlx5e_tls_rx_tag_zfree(ptag);
589 break;
590
591 default:
592 break;
593 }
594 }
595
596 /*
597 * This function translates the crypto parameters into the format used
598 * by the firmware and hardware. Currently only AES-128 and AES-256 is
599 * supported for TLS v1.2 and TLS v1.3.
600 *
601 * Returns zero on success, else an error happened.
602 */
603 static int
604 mlx5e_tls_rx_set_params(void *ctx, struct inpcb *inp, const struct tls_session_params *en)
605 {
606 uint32_t tcp_sn_he;
607 uint64_t tls_sn_he;
608
609 MLX5_SET(sw_tls_rx_cntx, ctx, param.const_2, 2);
610 if (en->tls_vminor == TLS_MINOR_VER_TWO)
611 MLX5_SET(sw_tls_rx_cntx, ctx, param.tls_version, 2); /* v1.2 */
612 else
613 MLX5_SET(sw_tls_rx_cntx, ctx, param.tls_version, 3); /* v1.3 */
614 MLX5_SET(sw_tls_rx_cntx, ctx, param.const_1, 1);
615 MLX5_SET(sw_tls_rx_cntx, ctx, param.encryption_standard, 1); /* TLS */
616
617 /* copy the initial vector in place */
618 switch (en->iv_len) {
619 case MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param.gcm_iv):
620 case MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param.gcm_iv) +
621 MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, param.implicit_iv):
622 memcpy(MLX5_ADDR_OF(sw_tls_rx_cntx, ctx, param.gcm_iv),
623 en->iv, en->iv_len);
624 break;
625 default:
626 return (EINVAL);
627 }
628
629 if (en->cipher_key_len <= MLX5_FLD_SZ_BYTES(sw_tls_rx_cntx, key.key_data)) {
630 memcpy(MLX5_ADDR_OF(sw_tls_rx_cntx, ctx, key.key_data),
631 en->cipher_key, en->cipher_key_len);
632 MLX5_SET(sw_tls_rx_cntx, ctx, key.key_len, en->cipher_key_len);
633 } else {
634 return (EINVAL);
635 }
636
637 if (__predict_false(inp == NULL ||
638 ktls_get_rx_sequence(inp, &tcp_sn_he, &tls_sn_he) != 0))
639 return (EINVAL);
640
641 MLX5_SET64(sw_tls_rx_cntx, ctx, param.initial_record_number, tls_sn_he);
642 MLX5_SET(sw_tls_rx_cntx, ctx, param.resync_tcp_sn, tcp_sn_he);
643
644 return (0);
645 }
646
647 /* Verify zero default */
648 CTASSERT(MLX5E_TLS_RX_ST_INIT == 0);
649
650 /*
651 * This function is responsible for allocating a TLS RX tag. It is a
652 * callback function invoked by the network stack.
653 *
654 * Returns zero on success else an error happened.
655 */
656 int
657 mlx5e_tls_rx_snd_tag_alloc(struct ifnet *ifp,
658 union if_snd_tag_alloc_params *params,
659 struct m_snd_tag **ppmt)
660 {
661 struct mlx5e_iq *iq;
662 struct mlx5e_priv *priv;
663 struct mlx5e_tls_rx_tag *ptag;
664 struct mlx5_flow_rule *flow_rule;
665 const struct tls_session_params *en;
666 uint32_t value;
667 int error;
668
669 priv = ifp->if_softc;
670
671 if (unlikely(priv->gone != 0 || priv->tls_rx.init == 0 ||
672 params->hdr.flowtype == M_HASHTYPE_NONE))
673 return (EOPNOTSUPP);
674
675 /* allocate new tag from zone, if any */
676 ptag = uma_zalloc(priv->tls_rx.zone, M_NOWAIT);
677 if (ptag == NULL)
678 return (ENOMEM);
679
680 /* sanity check default values */
681 MPASS(ptag->dek_index == 0);
682 MPASS(ptag->dek_index_ok == 0);
683
684 /* setup TLS RX tag */
685 ptag->tls_rx = &priv->tls_rx;
686 ptag->flowtype = params->hdr.flowtype;
687 ptag->flowid = params->hdr.flowid;
688
689 value = atomic_fetchadd_32(&priv->tls_rx.num_resources, 1U);
690
691 /* check resource limits */
692 if (value >= priv->tls_rx.max_resources) {
693 error = ENOMEM;
694 goto failure;
695 }
696
697 en = ¶ms->tls_rx.tls->params;
698
699 /* only TLS v1.2 and v1.3 is currently supported */
700 if (en->tls_vmajor != TLS_MAJOR_VER_ONE ||
701 (en->tls_vminor != TLS_MINOR_VER_TWO
702 #ifdef TLS_MINOR_VER_THREE
703 && en->tls_vminor != TLS_MINOR_VER_THREE
704 #endif
705 )) {
706 error = EPROTONOSUPPORT;
707 goto failure;
708 }
709
710 switch (en->cipher_algorithm) {
711 case CRYPTO_AES_NIST_GCM_16:
712 switch (en->cipher_key_len) {
713 case 128 / 8:
714 if (en->tls_vminor == TLS_MINOR_VER_TWO) {
715 if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_128) == 0) {
716 error = EPROTONOSUPPORT;
717 goto failure;
718 }
719 } else {
720 if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_128) == 0) {
721 error = EPROTONOSUPPORT;
722 goto failure;
723 }
724 }
725 error = mlx5e_tls_rx_set_params(
726 ptag->crypto_params, params->tls_rx.inp, en);
727 if (error)
728 goto failure;
729 break;
730
731 case 256 / 8:
732 if (en->tls_vminor == TLS_MINOR_VER_TWO) {
733 if (MLX5_CAP_TLS(priv->mdev, tls_1_2_aes_gcm_256) == 0) {
734 error = EPROTONOSUPPORT;
735 goto failure;
736 }
737 } else {
738 if (MLX5_CAP_TLS(priv->mdev, tls_1_3_aes_gcm_256) == 0) {
739 error = EPROTONOSUPPORT;
740 goto failure;
741 }
742 }
743 error = mlx5e_tls_rx_set_params(
744 ptag->crypto_params, params->tls_rx.inp, en);
745 if (error)
746 goto failure;
747 break;
748
749 default:
750 error = EINVAL;
751 goto failure;
752 }
753 break;
754 default:
755 error = EPROTONOSUPPORT;
756 goto failure;
757 }
758
759 /* store pointer to mbuf tag */
760 MPASS(ptag->tag.refcount == 0);
761 m_snd_tag_init(&ptag->tag, ifp, &mlx5e_tls_rx_snd_tag_sw);
762 *ppmt = &ptag->tag;
763
764 /* reset state */
765 ptag->state = MLX5E_TLS_RX_ST_INIT;
766
767 queue_work(priv->tls_rx.wq, &ptag->work);
768 flush_work(&ptag->work);
769
770 /* check that worker task completed successfully */
771 MLX5E_TLS_RX_TAG_LOCK(ptag);
772 if (ptag->state == MLX5E_TLS_RX_ST_SETUP) {
773 ptag->state = MLX5E_TLS_RX_ST_READY;
774 error = 0;
775 } else {
776 error = ENOMEM;
777 }
778 MLX5E_TLS_RX_TAG_UNLOCK(ptag);
779
780 if (unlikely(error))
781 goto cleanup;
782
783 iq = mlx5e_tls_rx_get_iq(priv, ptag->flowid, ptag->flowtype);
784
785 /* establish connection between DEK and TIR */
786 if (mlx5e_tls_rx_send_static_parameters(iq, ptag) != 0) {
787 MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
788 error = ENOMEM;
789 goto cleanup;
790 }
791
792 MLX5_SET(sw_tls_rx_cntx, ptag->crypto_params, progress.auth_state,
793 MLX5E_TLS_RX_PROGRESS_PARAMS_AUTH_STATE_NO_OFFLOAD);
794 MLX5_SET(sw_tls_rx_cntx, ptag->crypto_params, progress.record_tracker_state,
795 MLX5E_TLS_RX_PROGRESS_PARAMS_RECORD_TRACKER_STATE_START);
796
797 /* reset state to all zeros */
798 if (mlx5e_tls_rx_send_progress_parameters_sync(iq, ptag) != 0) {
799 MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
800 error = ENOMEM;
801 goto cleanup;
802 }
803
804 if (ifp->if_pcp != IFNET_PCP_NONE || params->tls_rx.vlan_id != 0) {
805 /* create flow rule for TLS RX traffic (tagged) */
806 flow_rule = mlx5e_accel_fs_add_inpcb(priv, params->tls_rx.inp,
807 ptag->tirn, MLX5_FS_DEFAULT_FLOW_TAG, params->tls_rx.vlan_id);
808 } else {
809 /* create flow rule for TLS RX traffic (untagged) */
810 flow_rule = mlx5e_accel_fs_add_inpcb(priv, params->tls_rx.inp,
811 ptag->tirn, MLX5_FS_DEFAULT_FLOW_TAG, MLX5E_ACCEL_FS_ADD_INPCB_NO_VLAN);
812 }
813
814 if (IS_ERR_OR_NULL(flow_rule)) {
815 MLX5E_TLS_RX_STAT_INC(ptag, rx_error, 1);
816 error = ENOMEM;
817 goto cleanup;
818 }
819
820 ptag->flow_rule = flow_rule;
821
822 return (0);
823
824 cleanup:
825 m_snd_tag_rele(&ptag->tag);
826 return (error);
827
828 failure:
829 mlx5e_tls_rx_tag_zfree(ptag);
830 return (error);
831 }
832
833
834 /*
835 * This function adds the TCP sequence number and TLS record number in
836 * host endian format to a small database. When TLS records have the
837 * same length, they are simply accumulated by counting instead of
838 * separated entries in the TLS database. The dimension of the
839 * database is such that it cannot store more than 1GByte of
840 * continuous TCP data to avoid issues with TCP sequence number wrap
841 * around. A record length of zero bytes has special meaning and means
842 * that resync completed and all data in the database can be
843 * discarded. This function is called after the TCP stack has
844 * re-assembled all TCP fragments due to out of order packet reception
845 * and all TCP sequence numbers should be sequential.
846 *
847 * This function returns true if a so-called TLS RX resync operation
848 * is in progress. Else no such operation is in progress.
849 */
850 static bool
851 mlx5e_tls_rx_snd_tag_add_tcp_sequence(struct mlx5e_tls_rx_tag *ptag,
852 uint32_t tcp_sn_he, uint32_t len, uint64_t tls_rcd)
853 {
854 uint16_t i, j, n;
855
856 if (ptag->tcp_resync_active == 0 ||
857 ptag->tcp_resync_next != tcp_sn_he ||
858 len == 0) {
859 /* start over again or terminate */
860 ptag->tcp_resync_active = (len != 0);
861 ptag->tcp_resync_len[0] = len;
862 ptag->tcp_resync_num[0] = 1;
863 ptag->tcp_resync_pc = (len != 0);
864 ptag->tcp_resync_cc = 0;
865 ptag->tcp_resync_start = tcp_sn_he;
866 ptag->rcd_resync_start = tls_rcd;
867 } else {
868 i = (ptag->tcp_resync_pc - 1) & (MLX5E_TLS_RX_RESYNC_MAX - 1);
869 n = ptag->tcp_resync_pc - ptag->tcp_resync_cc;
870
871 /* check if same length like last time */
872 if (ptag->tcp_resync_len[i] == len &&
873 ptag->tcp_resync_num[i] != MLX5E_TLS_RX_NUM_MAX) {
874 /* use existing entry */
875 ptag->tcp_resync_num[i]++;
876 } else if (n == MLX5E_TLS_RX_RESYNC_MAX) {
877 j = ptag->tcp_resync_cc++ & (MLX5E_TLS_RX_RESYNC_MAX - 1);
878 /* adjust starting TCP sequence number */
879 ptag->rcd_resync_start += ptag->tcp_resync_num[j];
880 ptag->tcp_resync_start += ptag->tcp_resync_len[j] * ptag->tcp_resync_num[j];
881 i = ptag->tcp_resync_pc++ & (MLX5E_TLS_RX_RESYNC_MAX - 1);
882 /* store new entry */
883 ptag->tcp_resync_len[i] = len;
884 ptag->tcp_resync_num[i] = 1;
885 } else {
886 i = ptag->tcp_resync_pc++ & (MLX5E_TLS_RX_RESYNC_MAX - 1);
887 /* add new entry */
888 ptag->tcp_resync_len[i] = len;
889 ptag->tcp_resync_num[i] = 1;
890 }
891 }
892
893 /* store next TCP SN in host endian format */
894 ptag->tcp_resync_next = tcp_sn_he + len;
895
896 return (ptag->tcp_resync_active);
897 }
898
899 /*
900 * This function checks if the given TCP sequence number points to the
901 * beginning of a valid TLS header.
902 *
903 * Returns true if a match is found. Else false.
904 */
905 static bool
906 mlx5e_tls_rx_snd_tag_find_tcp_sn_and_tls_rcd(struct mlx5e_tls_rx_tag *ptag,
907 uint32_t tcp_sn_he, uint32_t *p_next_tcp_sn_he, uint64_t *p_tls_rcd)
908 {
909 uint16_t i, j;
910 uint32_t off = 0;
911 uint32_t rcd = 0;
912 uint32_t delta;
913 uint32_t leap;
914
915 for (i = ptag->tcp_resync_cc; i != ptag->tcp_resync_pc; i++) {
916 delta = tcp_sn_he - off - ptag->tcp_resync_start;
917
918 /* check if subtraction went negative */
919 if ((int32_t)delta < 0)
920 break;
921
922 j = i & (MLX5E_TLS_RX_RESYNC_MAX - 1);
923 leap = ptag->tcp_resync_len[j] * ptag->tcp_resync_num[j];
924 if (delta < leap) {
925 if ((delta % ptag->tcp_resync_len[j]) == 0) {
926 *p_next_tcp_sn_he = tcp_sn_he +
927 ptag->tcp_resync_len[j];
928 *p_tls_rcd = ptag->rcd_resync_start +
929 (uint64_t)rcd +
930 (uint64_t)(delta / ptag->tcp_resync_len[j]);
931 return (true); /* success */
932 }
933 break; /* invalid offset */
934 }
935 rcd += ptag->tcp_resync_num[j];
936 off += leap;
937 }
938 return (false); /* not found */
939 }
940
941 /*
942 * This is a callback function from the network stack to keep track of
943 * TLS RX TCP sequence numbers.
944 *
945 * Returns zero on success else an error happened.
946 */
947 static int
948 mlx5e_tls_rx_snd_tag_modify(struct m_snd_tag *pmt, union if_snd_tag_modify_params *params)
949 {
950 struct mlx5e_tls_rx_tag *ptag;
951 struct mlx5e_priv *priv;
952 struct mlx5e_iq *iq;
953 int err;
954
955 ptag = container_of(pmt, struct mlx5e_tls_rx_tag, tag);
956 priv = container_of(ptag->tls_rx, struct mlx5e_priv, tls_rx);
957
958 if (unlikely(priv->gone != 0))
959 return (ENXIO);
960
961 iq = mlx5e_tls_rx_get_iq(priv, ptag->flowid, ptag->flowtype);
962
963 MLX5E_TLS_RX_TAG_LOCK(ptag);
964
965 if (mlx5e_tls_rx_snd_tag_add_tcp_sequence(ptag,
966 params->tls_rx.tls_hdr_tcp_sn,
967 params->tls_rx.tls_rec_length,
968 params->tls_rx.tls_seq_number) &&
969 ptag->tcp_resync_pending == 0) {
970 err = mlx5e_tls_rx_receive_progress_parameters(iq, ptag);
971 if (err != 0) {
972 MLX5E_TLS_RX_STAT_INC(ptag, rx_resync_err, 1);
973 } else {
974 ptag->tcp_resync_pending = 1;
975 MLX5E_TLS_RX_STAT_INC(ptag, rx_resync_ok, 1);
976 }
977 } else {
978 err = 0;
979 }
980 MLX5E_TLS_RX_TAG_UNLOCK(ptag);
981
982 return (-err);
983 }
984
985 /*
986 * This function frees a TLS RX tag in a non-blocking way.
987 */
988 static void
989 mlx5e_tls_rx_snd_tag_free(struct m_snd_tag *pmt)
990 {
991 struct mlx5e_tls_rx_tag *ptag =
992 container_of(pmt, struct mlx5e_tls_rx_tag, tag);
993 struct mlx5e_priv *priv;
994
995 MLX5E_TLS_RX_TAG_LOCK(ptag);
996 ptag->state = MLX5E_TLS_RX_ST_RELEASE;
997 MLX5E_TLS_RX_TAG_UNLOCK(ptag);
998
999 priv = ptag->tag.ifp->if_softc;
1000 queue_work(priv->tls_rx.wq, &ptag->work);
1001 }
1002
1003 #else
1004
1005 int
1006 mlx5e_tls_rx_init(struct mlx5e_priv *priv)
1007 {
1008
1009 return (0);
1010 }
1011
1012 void
1013 mlx5e_tls_rx_cleanup(struct mlx5e_priv *priv)
1014 {
1015 /* NOP */
1016 }
1017
1018 #endif
Cache object: ef03b43625fd0db28ef4d7bfc052b4a5
|