1 /*-
2 * Copyright (c) 2013-2017, Mellanox Technologies, Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
16 * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
23 * SUCH DAMAGE.
24 *
25 * $FreeBSD$
26 */
27
28 #include "opt_rss.h"
29 #include "opt_ratelimit.h"
30
31 #include <linux/etherdevice.h>
32 #include <dev/mlx5/driver.h>
33 #include <dev/mlx5/mlx5_ifc.h>
34 #include <dev/mlx5/vport.h>
35 #include <dev/mlx5/fs.h>
36 #include <dev/mlx5/mpfs.h>
37 #include <dev/mlx5/mlx5_core/mlx5_core.h>
38 #include <dev/mlx5/mlx5_core/eswitch.h>
39
40 #define UPLINK_VPORT 0xFFFF
41
42 #define MLX5_DEBUG_ESWITCH_MASK BIT(3)
43
44 #define esw_info(dev, format, ...) \
45 printf("mlx5_core: INFO: ""(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
46
47 #define esw_warn(dev, format, ...) \
48 printf("mlx5_core: WARN: ""(%s): E-Switch: " format, (dev)->priv.name, ##__VA_ARGS__)
49
50 #define esw_debug(dev, format, ...) \
51 mlx5_core_dbg_mask(dev, MLX5_DEBUG_ESWITCH_MASK, format, ##__VA_ARGS__)
52
53 enum {
54 MLX5_ACTION_NONE = 0,
55 MLX5_ACTION_ADD = 1,
56 MLX5_ACTION_DEL = 2,
57 };
58
59 /* E-Switch UC L2 table hash node */
60 struct esw_uc_addr {
61 struct l2addr_node node;
62 u32 table_index;
63 u32 vport;
64 };
65
66 /* E-Switch MC FDB table hash node */
67 struct esw_mc_addr { /* SRIOV only */
68 struct l2addr_node node;
69 struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */
70 u32 refcnt;
71 };
72
73 /* Vport UC/MC hash node */
74 struct vport_addr {
75 struct l2addr_node node;
76 u8 action;
77 u32 vport;
78 struct mlx5_flow_rule *flow_rule; /* SRIOV only */
79 };
80
81 enum {
82 UC_ADDR_CHANGE = BIT(0),
83 MC_ADDR_CHANGE = BIT(1),
84 };
85
86 /* Vport context events */
87 #define SRIOV_VPORT_EVENTS (UC_ADDR_CHANGE | \
88 MC_ADDR_CHANGE)
89
90 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
91 u32 events_mask)
92 {
93 int in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {0};
94 int out[MLX5_ST_SZ_DW(modify_nic_vport_context_out)] = {0};
95 void *nic_vport_ctx;
96
97 MLX5_SET(modify_nic_vport_context_in, in,
98 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
99 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
100 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
101 if (vport)
102 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
103 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
104 in, nic_vport_context);
105
106 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
107
108 if (events_mask & UC_ADDR_CHANGE)
109 MLX5_SET(nic_vport_context, nic_vport_ctx,
110 event_on_uc_address_change, 1);
111 if (events_mask & MC_ADDR_CHANGE)
112 MLX5_SET(nic_vport_context, nic_vport_ctx,
113 event_on_mc_address_change, 1);
114
115 return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
116 }
117
118 /* E-Switch vport context HW commands */
119 static int query_esw_vport_context_cmd(struct mlx5_core_dev *mdev, u32 vport,
120 u32 *out, int outlen)
121 {
122 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {0};
123
124 MLX5_SET(query_nic_vport_context_in, in, opcode,
125 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
126
127 MLX5_SET(query_esw_vport_context_in, in, vport_number, vport);
128 if (vport)
129 MLX5_SET(query_esw_vport_context_in, in, other_vport, 1);
130
131 return mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
132 }
133
134 static int query_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
135 u16 *vlan, u8 *qos)
136 {
137 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {0};
138 int err;
139 bool cvlan_strip;
140 bool cvlan_insert;
141
142 *vlan = 0;
143 *qos = 0;
144
145 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
146 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
147 return -ENOTSUPP;
148
149 err = query_esw_vport_context_cmd(dev, vport, out, sizeof(out));
150 if (err)
151 goto out;
152
153 cvlan_strip = MLX5_GET(query_esw_vport_context_out, out,
154 esw_vport_context.vport_cvlan_strip);
155
156 cvlan_insert = MLX5_GET(query_esw_vport_context_out, out,
157 esw_vport_context.vport_cvlan_insert);
158
159 if (cvlan_strip || cvlan_insert) {
160 *vlan = MLX5_GET(query_esw_vport_context_out, out,
161 esw_vport_context.cvlan_id);
162 *qos = MLX5_GET(query_esw_vport_context_out, out,
163 esw_vport_context.cvlan_pcp);
164 }
165
166 esw_debug(dev, "Query Vport[%d] cvlan: VLAN %d qos=%d\n",
167 vport, *vlan, *qos);
168 out:
169 return err;
170 }
171
172 static int modify_esw_vport_context_cmd(struct mlx5_core_dev *dev, u16 vport,
173 void *in, int inlen)
174 {
175 u32 out[MLX5_ST_SZ_DW(modify_esw_vport_context_out)] = {0};
176
177 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
178 if (vport)
179 MLX5_SET(modify_esw_vport_context_in, in, other_vport, 1);
180
181 MLX5_SET(modify_esw_vport_context_in, in, opcode,
182 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
183
184 return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
185 }
186
187 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u32 vport,
188 u16 vlan, u8 qos, bool set)
189 {
190 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {0};
191
192 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
193 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
194 return -ENOTSUPP;
195
196 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%d\n",
197 vport, vlan, qos, set);
198
199 if (set) {
200 MLX5_SET(modify_esw_vport_context_in, in,
201 esw_vport_context.vport_cvlan_strip, 1);
202 /* insert only if no vlan in packet */
203 MLX5_SET(modify_esw_vport_context_in, in,
204 esw_vport_context.vport_cvlan_insert, 1);
205 MLX5_SET(modify_esw_vport_context_in, in,
206 esw_vport_context.cvlan_pcp, qos);
207 MLX5_SET(modify_esw_vport_context_in, in,
208 esw_vport_context.cvlan_id, vlan);
209 }
210
211 MLX5_SET(modify_esw_vport_context_in, in,
212 field_select.vport_cvlan_strip, 1);
213 MLX5_SET(modify_esw_vport_context_in, in,
214 field_select.vport_cvlan_insert, 1);
215
216 return modify_esw_vport_context_cmd(dev, vport, in, sizeof(in));
217 }
218
219 /* E-Switch FDB */
220 static struct mlx5_flow_rule *
221 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport)
222 {
223 int match_header = MLX5_MATCH_OUTER_HEADERS;
224 struct mlx5_flow_destination dest;
225 struct mlx5_flow_rule *flow_rule = NULL;
226 u32 *match_v;
227 u32 *match_c;
228 u8 *dmac_v;
229 u8 *dmac_c;
230
231 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
232 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
233 if (!match_v || !match_c) {
234 printf("mlx5_core: WARN: ""FDB: Failed to alloc match parameters\n");
235 goto out;
236 }
237 dmac_v = MLX5_ADDR_OF(fte_match_param, match_v,
238 outer_headers.dmac_47_16);
239 dmac_c = MLX5_ADDR_OF(fte_match_param, match_c,
240 outer_headers.dmac_47_16);
241
242 ether_addr_copy(dmac_v, mac);
243 /* Match criteria mask */
244 memset(dmac_c, 0xff, 6);
245
246 dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
247 dest.vport_num = vport;
248
249 esw_debug(esw->dev,
250 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
251 dmac_v, dmac_c, vport);
252 flow_rule =
253 mlx5_add_flow_rule(esw->fdb_table.fdb,
254 match_header,
255 match_c,
256 match_v,
257 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
258 0, &dest);
259 if (IS_ERR_OR_NULL(flow_rule)) {
260 printf("mlx5_core: WARN: ""FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
261 flow_rule = NULL;
262 }
263 out:
264 kfree(match_v);
265 kfree(match_c);
266 return flow_rule;
267 }
268
269 static int esw_create_fdb_table(struct mlx5_eswitch *esw)
270 {
271 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
272 struct mlx5_core_dev *dev = esw->dev;
273 struct mlx5_flow_namespace *root_ns;
274 struct mlx5_flow_table *fdb;
275 struct mlx5_flow_group *g;
276 void *match_criteria;
277 int table_size;
278 u32 *flow_group_in;
279 u8 *dmac;
280 int err = 0;
281
282 esw_debug(dev, "Create FDB log_max_size(%d)\n",
283 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
284
285 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
286 if (!root_ns) {
287 esw_warn(dev, "Failed to get FDB flow namespace\n");
288 return -ENOMEM;
289 }
290
291 flow_group_in = mlx5_vzalloc(inlen);
292 if (!flow_group_in)
293 return -ENOMEM;
294 memset(flow_group_in, 0, inlen);
295
296 /* (-2) Since MaorG said so .. */
297 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)) - 2;
298
299 fdb = mlx5_create_flow_table(root_ns, 0, "FDB", table_size);
300 if (IS_ERR_OR_NULL(fdb)) {
301 err = PTR_ERR(fdb);
302 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
303 goto out;
304 }
305
306 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
307 MLX5_MATCH_OUTER_HEADERS);
308 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
309 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
310 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
311 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
312 eth_broadcast_addr(dmac);
313
314 g = mlx5_create_flow_group(fdb, flow_group_in);
315 if (IS_ERR_OR_NULL(g)) {
316 err = PTR_ERR(g);
317 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
318 goto out;
319 }
320
321 esw->fdb_table.addr_grp = g;
322 esw->fdb_table.fdb = fdb;
323 out:
324 kfree(flow_group_in);
325 if (err && !IS_ERR_OR_NULL(fdb))
326 mlx5_destroy_flow_table(fdb);
327 return err;
328 }
329
330 static void esw_destroy_fdb_table(struct mlx5_eswitch *esw)
331 {
332 if (!esw->fdb_table.fdb)
333 return;
334
335 esw_debug(esw->dev, "Destroy FDB Table\n");
336 mlx5_destroy_flow_group(esw->fdb_table.addr_grp);
337 mlx5_destroy_flow_table(esw->fdb_table.fdb);
338 esw->fdb_table.fdb = NULL;
339 esw->fdb_table.addr_grp = NULL;
340 }
341
342 /* E-Switch vport UC/MC lists management */
343 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
344 struct vport_addr *vaddr);
345
346 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
347 {
348 struct hlist_head *hash = esw->l2_table.l2_hash;
349 struct esw_uc_addr *esw_uc;
350 u8 *mac = vaddr->node.addr;
351 u32 vport = vaddr->vport;
352 int err;
353
354 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
355 if (esw_uc) {
356 esw_warn(esw->dev,
357 "Failed to set L2 mac(%pM) for vport(%d), mac is already in use by vport(%d)\n",
358 mac, vport, esw_uc->vport);
359 return -EEXIST;
360 }
361
362 esw_uc = l2addr_hash_add(hash, mac, struct esw_uc_addr, GFP_KERNEL);
363 if (!esw_uc)
364 return -ENOMEM;
365 esw_uc->vport = vport;
366
367 err = mlx5_mpfs_add_mac(esw->dev, &esw_uc->table_index, mac, 0, 0);
368 if (err)
369 goto abort;
370
371 if (esw->fdb_table.fdb) /* SRIOV is enabled: Forward UC MAC to vport */
372 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
373
374 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM index:%d fr(%p)\n",
375 vport, mac, esw_uc->table_index, vaddr->flow_rule);
376 return err;
377 abort:
378 l2addr_hash_del(esw_uc);
379 return err;
380 }
381
382 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
383 {
384 struct hlist_head *hash = esw->l2_table.l2_hash;
385 struct esw_uc_addr *esw_uc;
386 u8 *mac = vaddr->node.addr;
387 u32 vport = vaddr->vport;
388
389 esw_uc = l2addr_hash_find(hash, mac, struct esw_uc_addr);
390 if (!esw_uc || esw_uc->vport != vport) {
391 esw_debug(esw->dev,
392 "MAC(%pM) doesn't belong to vport (%d)\n",
393 mac, vport);
394 return -EINVAL;
395 }
396 esw_debug(esw->dev, "\tDELETE UC MAC: vport[%d] %pM index:%d fr(%p)\n",
397 vport, mac, esw_uc->table_index, vaddr->flow_rule);
398
399 mlx5_mpfs_del_mac(esw->dev, esw_uc->table_index);
400
401 if (vaddr->flow_rule)
402 mlx5_del_flow_rule(vaddr->flow_rule);
403 vaddr->flow_rule = NULL;
404
405 l2addr_hash_del(esw_uc);
406 return 0;
407 }
408
409 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
410 {
411 struct hlist_head *hash = esw->mc_table;
412 struct esw_mc_addr *esw_mc;
413 u8 *mac = vaddr->node.addr;
414 u32 vport = vaddr->vport;
415
416 if (!esw->fdb_table.fdb)
417 return 0;
418
419 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
420 if (esw_mc)
421 goto add;
422
423 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
424 if (!esw_mc)
425 return -ENOMEM;
426
427 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
428 esw_fdb_set_vport_rule(esw, mac, UPLINK_VPORT);
429 add:
430 esw_mc->refcnt++;
431 /* Forward MC MAC to vport */
432 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
433 esw_debug(esw->dev,
434 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
435 vport, mac, vaddr->flow_rule,
436 esw_mc->refcnt, esw_mc->uplink_rule);
437 return 0;
438 }
439
440 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
441 {
442 struct hlist_head *hash = esw->mc_table;
443 struct esw_mc_addr *esw_mc;
444 u8 *mac = vaddr->node.addr;
445 u32 vport = vaddr->vport;
446
447 if (!esw->fdb_table.fdb)
448 return 0;
449
450 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
451 if (!esw_mc) {
452 esw_warn(esw->dev,
453 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
454 mac, vport);
455 return -EINVAL;
456 }
457 esw_debug(esw->dev,
458 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
459 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
460 esw_mc->uplink_rule);
461
462 if (vaddr->flow_rule)
463 mlx5_del_flow_rule(vaddr->flow_rule);
464 vaddr->flow_rule = NULL;
465
466 if (--esw_mc->refcnt)
467 return 0;
468
469 if (esw_mc->uplink_rule)
470 mlx5_del_flow_rule(esw_mc->uplink_rule);
471
472 l2addr_hash_del(esw_mc);
473 return 0;
474 }
475
476 /* Apply vport UC/MC list to HW l2 table and FDB table */
477 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
478 u32 vport_num, int list_type)
479 {
480 struct mlx5_vport *vport = &esw->vports[vport_num];
481 bool is_uc = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC;
482 vport_addr_action vport_addr_add;
483 vport_addr_action vport_addr_del;
484 struct vport_addr *addr;
485 struct l2addr_node *node;
486 struct hlist_head *hash;
487 struct hlist_node *tmp;
488 int hi;
489
490 vport_addr_add = is_uc ? esw_add_uc_addr :
491 esw_add_mc_addr;
492 vport_addr_del = is_uc ? esw_del_uc_addr :
493 esw_del_mc_addr;
494
495 hash = is_uc ? vport->uc_list : vport->mc_list;
496 for_each_l2hash_node(node, tmp, hash, hi) {
497 addr = container_of(node, struct vport_addr, node);
498 switch (addr->action) {
499 case MLX5_ACTION_ADD:
500 vport_addr_add(esw, addr);
501 addr->action = MLX5_ACTION_NONE;
502 break;
503 case MLX5_ACTION_DEL:
504 vport_addr_del(esw, addr);
505 l2addr_hash_del(addr);
506 break;
507 }
508 }
509 }
510
511 /* Sync vport UC/MC list from vport context */
512 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
513 u32 vport_num, int list_type)
514 {
515 struct mlx5_vport *vport = &esw->vports[vport_num];
516 bool is_uc = list_type == MLX5_NIC_VPORT_LIST_TYPE_UC;
517 u8 (*mac_list)[ETH_ALEN];
518 struct l2addr_node *node;
519 struct vport_addr *addr;
520 struct hlist_head *hash;
521 struct hlist_node *tmp;
522 int size;
523 int err;
524 int hi;
525 int i;
526
527 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
528 MLX5_MAX_MC_PER_VPORT(esw->dev);
529
530 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
531 if (!mac_list)
532 return;
533
534 hash = is_uc ? vport->uc_list : vport->mc_list;
535
536 for_each_l2hash_node(node, tmp, hash, hi) {
537 addr = container_of(node, struct vport_addr, node);
538 addr->action = MLX5_ACTION_DEL;
539 }
540
541 err = mlx5_query_nic_vport_mac_list(esw->dev, vport_num, list_type,
542 mac_list, &size);
543 if (err)
544 return;
545 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
546 vport_num, is_uc ? "UC" : "MC", size);
547
548 for (i = 0; i < size; i++) {
549 if (is_uc && !is_valid_ether_addr(mac_list[i]))
550 continue;
551
552 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
553 continue;
554
555 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
556 if (addr) {
557 addr->action = MLX5_ACTION_NONE;
558 continue;
559 }
560
561 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
562 GFP_KERNEL);
563 if (!addr) {
564 esw_warn(esw->dev,
565 "Failed to add MAC(%pM) to vport[%d] DB\n",
566 mac_list[i], vport_num);
567 continue;
568 }
569 addr->vport = vport_num;
570 addr->action = MLX5_ACTION_ADD;
571 }
572 kfree(mac_list);
573 }
574
575 static void esw_vport_change_handler(struct work_struct *work)
576 {
577 struct mlx5_vport *vport =
578 container_of(work, struct mlx5_vport, vport_change_handler);
579 struct mlx5_core_dev *dev = vport->dev;
580 struct mlx5_eswitch *esw = dev->priv.eswitch;
581 u8 mac[ETH_ALEN];
582
583 mlx5_query_nic_vport_mac_address(dev, vport->vport, mac);
584 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
585 vport->vport, mac);
586
587 if (vport->enabled_events & UC_ADDR_CHANGE) {
588 esw_update_vport_addr_list(esw, vport->vport,
589 MLX5_NIC_VPORT_LIST_TYPE_UC);
590 esw_apply_vport_addr_list(esw, vport->vport,
591 MLX5_NIC_VPORT_LIST_TYPE_UC);
592 }
593
594 if (vport->enabled_events & MC_ADDR_CHANGE) {
595 esw_update_vport_addr_list(esw, vport->vport,
596 MLX5_NIC_VPORT_LIST_TYPE_MC);
597 esw_apply_vport_addr_list(esw, vport->vport,
598 MLX5_NIC_VPORT_LIST_TYPE_MC);
599 }
600
601 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
602 if (vport->enabled)
603 arm_vport_context_events_cmd(dev, vport->vport,
604 vport->enabled_events);
605 }
606
607 static void esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
608 struct mlx5_vport *vport)
609 {
610 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
611 struct mlx5_flow_group *vlan_grp = NULL;
612 struct mlx5_flow_group *drop_grp = NULL;
613 struct mlx5_core_dev *dev = esw->dev;
614 struct mlx5_flow_namespace *root_ns;
615 struct mlx5_flow_table *acl;
616 void *match_criteria;
617 char table_name[32];
618 u32 *flow_group_in;
619 int table_size = 2;
620 int err = 0;
621
622 if (!MLX5_CAP_ESW_EGRESS_ACL(dev, ft_support))
623 return;
624
625 esw_debug(dev, "Create vport[%d] egress ACL log_max_size(%d)\n",
626 vport->vport, MLX5_CAP_ESW_EGRESS_ACL(dev, log_max_ft_size));
627
628 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_EGRESS);
629 if (!root_ns) {
630 esw_warn(dev, "Failed to get E-Switch egress flow namespace\n");
631 return;
632 }
633
634 flow_group_in = mlx5_vzalloc(inlen);
635 if (!flow_group_in)
636 return;
637
638 snprintf(table_name, 32, "egress_%d", vport->vport);
639 acl = mlx5_create_vport_flow_table(root_ns, vport->vport, 0, table_name, table_size);
640 if (IS_ERR_OR_NULL(acl)) {
641 err = PTR_ERR(acl);
642 esw_warn(dev, "Failed to create E-Switch vport[%d] egress flow Table, err(%d)\n",
643 vport->vport, err);
644 goto out;
645 }
646
647 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
648 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
649 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
650 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
651 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
652 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
653
654 vlan_grp = mlx5_create_flow_group(acl, flow_group_in);
655 if (IS_ERR_OR_NULL(vlan_grp)) {
656 err = PTR_ERR(vlan_grp);
657 esw_warn(dev, "Failed to create E-Switch vport[%d] egress allowed vlans flow group, err(%d)\n",
658 vport->vport, err);
659 goto out;
660 }
661
662 memset(flow_group_in, 0, inlen);
663 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
664 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
665 drop_grp = mlx5_create_flow_group(acl, flow_group_in);
666 if (IS_ERR_OR_NULL(drop_grp)) {
667 err = PTR_ERR(drop_grp);
668 esw_warn(dev, "Failed to create E-Switch vport[%d] egress drop flow group, err(%d)\n",
669 vport->vport, err);
670 goto out;
671 }
672
673 vport->egress.acl = acl;
674 vport->egress.drop_grp = drop_grp;
675 vport->egress.allowed_vlans_grp = vlan_grp;
676 out:
677 kfree(flow_group_in);
678 if (err && !IS_ERR_OR_NULL(vlan_grp))
679 mlx5_destroy_flow_group(vlan_grp);
680 if (err && !IS_ERR_OR_NULL(acl))
681 mlx5_destroy_flow_table(acl);
682 }
683
684 static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw,
685 struct mlx5_vport *vport)
686 {
687 if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan))
688 mlx5_del_flow_rule(vport->egress.allowed_vlan);
689
690 if (!IS_ERR_OR_NULL(vport->egress.drop_rule))
691 mlx5_del_flow_rule(vport->egress.drop_rule);
692
693 vport->egress.allowed_vlan = NULL;
694 vport->egress.drop_rule = NULL;
695 }
696
697 static void esw_vport_disable_egress_acl(struct mlx5_eswitch *esw,
698 struct mlx5_vport *vport)
699 {
700 if (IS_ERR_OR_NULL(vport->egress.acl))
701 return;
702
703 esw_debug(esw->dev, "Destroy vport[%d] E-Switch egress ACL\n", vport->vport);
704
705 esw_vport_cleanup_egress_rules(esw, vport);
706 mlx5_destroy_flow_group(vport->egress.allowed_vlans_grp);
707 mlx5_destroy_flow_group(vport->egress.drop_grp);
708 mlx5_destroy_flow_table(vport->egress.acl);
709 vport->egress.allowed_vlans_grp = NULL;
710 vport->egress.drop_grp = NULL;
711 vport->egress.acl = NULL;
712 }
713
714 static void esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
715 struct mlx5_vport *vport)
716 {
717 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
718 struct mlx5_core_dev *dev = esw->dev;
719 struct mlx5_flow_namespace *root_ns;
720 struct mlx5_flow_table *acl;
721 struct mlx5_flow_group *g;
722 void *match_criteria;
723 char table_name[32];
724 u32 *flow_group_in;
725 int table_size = 1;
726 int err = 0;
727
728 if (!MLX5_CAP_ESW_INGRESS_ACL(dev, ft_support))
729 return;
730
731 esw_debug(dev, "Create vport[%d] ingress ACL log_max_size(%d)\n",
732 vport->vport, MLX5_CAP_ESW_INGRESS_ACL(dev, log_max_ft_size));
733
734 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS);
735 if (!root_ns) {
736 esw_warn(dev, "Failed to get E-Switch ingress flow namespace\n");
737 return;
738 }
739
740 flow_group_in = mlx5_vzalloc(inlen);
741 if (!flow_group_in)
742 return;
743
744 snprintf(table_name, 32, "ingress_%d", vport->vport);
745 acl = mlx5_create_vport_flow_table(root_ns, vport->vport, 0, table_name, table_size);
746 if (IS_ERR_OR_NULL(acl)) {
747 err = PTR_ERR(acl);
748 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow Table, err(%d)\n",
749 vport->vport, err);
750 goto out;
751 }
752
753 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
754 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
755 MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
756 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
757 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
758
759 g = mlx5_create_flow_group(acl, flow_group_in);
760 if (IS_ERR_OR_NULL(g)) {
761 err = PTR_ERR(g);
762 esw_warn(dev, "Failed to create E-Switch vport[%d] ingress flow group, err(%d)\n",
763 vport->vport, err);
764 goto out;
765 }
766
767 vport->ingress.acl = acl;
768 vport->ingress.drop_grp = g;
769 out:
770 kfree(flow_group_in);
771 if (err && !IS_ERR_OR_NULL(acl))
772 mlx5_destroy_flow_table(acl);
773 }
774
775 static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw,
776 struct mlx5_vport *vport)
777 {
778 if (!IS_ERR_OR_NULL(vport->ingress.drop_rule))
779 mlx5_del_flow_rule(vport->ingress.drop_rule);
780 vport->ingress.drop_rule = NULL;
781 }
782
783 static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw,
784 struct mlx5_vport *vport)
785 {
786 if (IS_ERR_OR_NULL(vport->ingress.acl))
787 return;
788
789 esw_debug(esw->dev, "Destroy vport[%d] E-Switch ingress ACL\n", vport->vport);
790
791 esw_vport_cleanup_ingress_rules(esw, vport);
792 mlx5_destroy_flow_group(vport->ingress.drop_grp);
793 mlx5_destroy_flow_table(vport->ingress.acl);
794 vport->ingress.acl = NULL;
795 vport->ingress.drop_grp = NULL;
796 }
797
798 static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
799 struct mlx5_vport *vport)
800 {
801 struct mlx5_flow_destination dest;
802 u32 *match_v;
803 u32 *match_c;
804 int err = 0;
805
806 if (IS_ERR_OR_NULL(vport->ingress.acl)) {
807 esw_warn(esw->dev,
808 "vport[%d] configure ingress rules failed, ingress acl is not initialized!\n",
809 vport->vport);
810 return -EPERM;
811 }
812
813 esw_vport_cleanup_ingress_rules(esw, vport);
814
815 if (!vport->vlan && !vport->qos)
816 return 0;
817
818 esw_debug(esw->dev,
819 "vport[%d] configure ingress rules, vlan(%d) qos(%d)\n",
820 vport->vport, vport->vlan, vport->qos);
821
822 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
823 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
824 if (!match_v || !match_c) {
825 err = -ENOMEM;
826 esw_warn(esw->dev, "vport[%d] configure ingress rules failed, err(%d)\n",
827 vport->vport, err);
828 goto out;
829 }
830 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.cvlan_tag);
831 MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.cvlan_tag);
832
833 dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
834 dest.vport_num = vport->vport;
835
836 vport->ingress.drop_rule =
837 mlx5_add_flow_rule(vport->ingress.acl,
838 MLX5_MATCH_OUTER_HEADERS,
839 match_c,
840 match_v,
841 MLX5_FLOW_CONTEXT_ACTION_DROP,
842 0, &dest);
843 if (IS_ERR_OR_NULL(vport->ingress.drop_rule)) {
844 err = PTR_ERR(vport->ingress.drop_rule);
845 printf("mlx5_core: WARN: ""vport[%d] configure ingress rules, err(%d)\n", vport->vport, err);
846 vport->ingress.drop_rule = NULL;
847 }
848 out:
849 kfree(match_v);
850 kfree(match_c);
851 return err;
852 }
853
854 static int esw_vport_egress_config(struct mlx5_eswitch *esw,
855 struct mlx5_vport *vport)
856 {
857 struct mlx5_flow_destination dest;
858 u32 *match_v;
859 u32 *match_c;
860 int err = 0;
861
862 if (IS_ERR_OR_NULL(vport->egress.acl)) {
863 esw_warn(esw->dev, "vport[%d] configure rgress rules failed, egress acl is not initialized!\n",
864 vport->vport);
865 return -EPERM;
866 }
867
868 esw_vport_cleanup_egress_rules(esw, vport);
869
870 if (!vport->vlan && !vport->qos)
871 return 0;
872
873 esw_debug(esw->dev,
874 "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
875 vport->vport, vport->vlan, vport->qos);
876
877 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
878 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
879 if (!match_v || !match_c) {
880 err = -ENOMEM;
881 esw_warn(esw->dev, "vport[%d] configure egress rules failed, err(%d)\n",
882 vport->vport, err);
883 goto out;
884 }
885
886 /* Allowed vlan rule */
887 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.cvlan_tag);
888 MLX5_SET_TO_ONES(fte_match_param, match_v, outer_headers.cvlan_tag);
889 MLX5_SET_TO_ONES(fte_match_param, match_c, outer_headers.first_vid);
890 MLX5_SET(fte_match_param, match_v, outer_headers.first_vid, vport->vlan);
891
892 dest.type = MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT;
893 dest.vport_num = vport->vport;
894
895 vport->egress.allowed_vlan =
896 mlx5_add_flow_rule(vport->egress.acl,
897 MLX5_MATCH_OUTER_HEADERS,
898 match_c,
899 match_v,
900 MLX5_FLOW_CONTEXT_ACTION_ALLOW,
901 0, &dest);
902 if (IS_ERR_OR_NULL(vport->egress.allowed_vlan)) {
903 err = PTR_ERR(vport->egress.allowed_vlan);
904 printf("mlx5_core: WARN: ""vport[%d] configure egress allowed vlan rule failed, err(%d)\n", vport->vport, err);
905 vport->egress.allowed_vlan = NULL;
906 goto out;
907 }
908
909 /* Drop others rule (star rule) */
910 memset(match_c, 0, MLX5_ST_SZ_BYTES(fte_match_param));
911 memset(match_v, 0, MLX5_ST_SZ_BYTES(fte_match_param));
912 vport->egress.drop_rule =
913 mlx5_add_flow_rule(vport->egress.acl,
914 0,
915 match_c,
916 match_v,
917 MLX5_FLOW_CONTEXT_ACTION_DROP,
918 0, &dest);
919 if (IS_ERR_OR_NULL(vport->egress.drop_rule)) {
920 err = PTR_ERR(vport->egress.drop_rule);
921 printf("mlx5_core: WARN: ""vport[%d] configure egress drop rule failed, err(%d)\n", vport->vport, err);
922 vport->egress.drop_rule = NULL;
923 }
924 out:
925 kfree(match_v);
926 kfree(match_c);
927 return err;
928 }
929
930 static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
931 int enable_events)
932 {
933 struct mlx5_vport *vport = &esw->vports[vport_num];
934 unsigned long flags;
935
936 mutex_lock(&vport->state_lock);
937 WARN_ON(vport->enabled);
938
939 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
940
941 if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
942 esw_vport_enable_ingress_acl(esw, vport);
943 esw_vport_enable_egress_acl(esw, vport);
944 esw_vport_ingress_config(esw, vport);
945 esw_vport_egress_config(esw, vport);
946 }
947
948 mlx5_modify_vport_admin_state(esw->dev,
949 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
950 vport_num,
951 MLX5_ESW_VPORT_ADMIN_STATE_AUTO);
952
953 /* Sync with current vport context */
954 vport->enabled_events = enable_events;
955 esw_vport_change_handler(&vport->vport_change_handler);
956
957 spin_lock_irqsave(&vport->lock, flags);
958 vport->enabled = true;
959 spin_unlock_irqrestore(&vport->lock, flags);
960
961 arm_vport_context_events_cmd(esw->dev, vport_num, enable_events);
962
963 esw->enabled_vports++;
964 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
965 mutex_unlock(&vport->state_lock);
966 }
967
968 static void esw_cleanup_vport(struct mlx5_eswitch *esw, u16 vport_num)
969 {
970 struct mlx5_vport *vport = &esw->vports[vport_num];
971 struct l2addr_node *node;
972 struct vport_addr *addr;
973 struct hlist_node *tmp;
974 int hi;
975
976 for_each_l2hash_node(node, tmp, vport->uc_list, hi) {
977 addr = container_of(node, struct vport_addr, node);
978 addr->action = MLX5_ACTION_DEL;
979 }
980 esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_UC);
981
982 for_each_l2hash_node(node, tmp, vport->mc_list, hi) {
983 addr = container_of(node, struct vport_addr, node);
984 addr->action = MLX5_ACTION_DEL;
985 }
986 esw_apply_vport_addr_list(esw, vport_num, MLX5_NIC_VPORT_LIST_TYPE_MC);
987 }
988
989 static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
990 {
991 struct mlx5_vport *vport = &esw->vports[vport_num];
992 unsigned long flags;
993
994 mutex_lock(&vport->state_lock);
995 if (!vport->enabled) {
996 mutex_unlock(&vport->state_lock);
997 return;
998 }
999
1000 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1001 /* Mark this vport as disabled to discard new events */
1002 spin_lock_irqsave(&vport->lock, flags);
1003 vport->enabled = false;
1004 vport->enabled_events = 0;
1005 spin_unlock_irqrestore(&vport->lock, flags);
1006
1007 mlx5_modify_vport_admin_state(esw->dev,
1008 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1009 vport_num,
1010 MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
1011 /* Wait for current already scheduled events to complete */
1012 flush_workqueue(esw->work_queue);
1013 /* Disable events from this vport */
1014 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1015 /* We don't assume VFs will cleanup after themselves */
1016 esw_cleanup_vport(esw, vport_num);
1017 if (vport_num) {
1018 esw_vport_disable_egress_acl(esw, vport);
1019 esw_vport_disable_ingress_acl(esw, vport);
1020 }
1021 esw->enabled_vports--;
1022 mutex_unlock(&vport->state_lock);
1023 }
1024
1025 /* Public E-Switch API */
1026 int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs)
1027 {
1028 int err;
1029 int i;
1030
1031 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1032 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1033 return 0;
1034
1035 if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
1036 !MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1037 esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
1038 return -ENOTSUPP;
1039 }
1040
1041 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1042 esw_warn(esw->dev, "E-Switch ingress ACL is not supported by FW\n");
1043
1044 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1045 esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n");
1046
1047 esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d)\n", nvfs);
1048
1049 esw_disable_vport(esw, 0);
1050
1051 err = esw_create_fdb_table(esw);
1052 if (err)
1053 goto abort;
1054
1055 for (i = 0; i <= nvfs; i++)
1056 esw_enable_vport(esw, i, SRIOV_VPORT_EVENTS);
1057
1058 esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n",
1059 esw->enabled_vports);
1060 return 0;
1061
1062 abort:
1063 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1064 return err;
1065 }
1066
1067 void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw)
1068 {
1069 int i;
1070
1071 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1072 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1073 return;
1074
1075 esw_info(esw->dev, "disable SRIOV: active vports(%d)\n",
1076 esw->enabled_vports);
1077
1078 for (i = 0; i < esw->total_vports; i++)
1079 esw_disable_vport(esw, i);
1080
1081 esw_destroy_fdb_table(esw);
1082
1083 /* VPORT 0 (PF) must be enabled back with non-sriov configuration */
1084 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1085 }
1086
1087 int mlx5_eswitch_init(struct mlx5_core_dev *dev, int total_vports)
1088 {
1089 int l2_table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
1090 struct mlx5_eswitch *esw;
1091 int vport_num;
1092 int err;
1093
1094 if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
1095 MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1096 return 0;
1097
1098 esw_info(dev,
1099 "Total vports %d, l2 table size(%d), per vport: max uc(%d) max mc(%d)\n",
1100 total_vports, l2_table_size,
1101 MLX5_MAX_UC_PER_VPORT(dev),
1102 MLX5_MAX_MC_PER_VPORT(dev));
1103
1104 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1105 if (!esw)
1106 return -ENOMEM;
1107
1108 esw->dev = dev;
1109
1110 esw->l2_table.bitmap = kcalloc(BITS_TO_LONGS(l2_table_size),
1111 sizeof(uintptr_t), GFP_KERNEL);
1112 if (!esw->l2_table.bitmap) {
1113 err = -ENOMEM;
1114 goto abort;
1115 }
1116 esw->l2_table.size = l2_table_size;
1117
1118 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1119 if (!esw->work_queue) {
1120 err = -ENOMEM;
1121 goto abort;
1122 }
1123
1124 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1125 GFP_KERNEL);
1126 if (!esw->vports) {
1127 err = -ENOMEM;
1128 goto abort;
1129 }
1130
1131 for (vport_num = 0; vport_num < total_vports; vport_num++) {
1132 struct mlx5_vport *vport = &esw->vports[vport_num];
1133
1134 vport->vport = vport_num;
1135 vport->dev = dev;
1136 INIT_WORK(&vport->vport_change_handler,
1137 esw_vport_change_handler);
1138 spin_lock_init(&vport->lock);
1139 mutex_init(&vport->state_lock);
1140 }
1141
1142 esw->total_vports = total_vports;
1143 esw->enabled_vports = 0;
1144
1145 dev->priv.eswitch = esw;
1146 esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
1147 /* VF Vports will be enabled when SRIOV is enabled */
1148 return 0;
1149 abort:
1150 if (esw->work_queue)
1151 destroy_workqueue(esw->work_queue);
1152 kfree(esw->l2_table.bitmap);
1153 kfree(esw->vports);
1154 kfree(esw);
1155 return err;
1156 }
1157
1158 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1159 {
1160 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager) ||
1161 MLX5_CAP_GEN(esw->dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
1162 return;
1163
1164 esw_info(esw->dev, "cleanup\n");
1165 esw_disable_vport(esw, 0);
1166
1167 esw->dev->priv.eswitch = NULL;
1168 destroy_workqueue(esw->work_queue);
1169 kfree(esw->l2_table.bitmap);
1170 kfree(esw->vports);
1171 kfree(esw);
1172 }
1173
1174 void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe)
1175 {
1176 struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change;
1177 u16 vport_num = be16_to_cpu(vc_eqe->vport_num);
1178 struct mlx5_vport *vport;
1179
1180 if (!esw) {
1181 printf("mlx5_core: WARN: ""MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n", vport_num);
1182 return;
1183 }
1184
1185 vport = &esw->vports[vport_num];
1186 spin_lock(&vport->lock);
1187 if (vport->enabled)
1188 queue_work(esw->work_queue, &vport->vport_change_handler);
1189 spin_unlock(&vport->lock);
1190 }
1191
1192 /* Vport Administration */
1193 #define ESW_ALLOWED(esw) \
1194 (esw && MLX5_CAP_GEN(esw->dev, vport_group_manager) && mlx5_core_is_pf(esw->dev))
1195 #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports)
1196
1197 static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN])
1198 {
1199 ((u8 *)node_guid)[7] = mac[0];
1200 ((u8 *)node_guid)[6] = mac[1];
1201 ((u8 *)node_guid)[5] = mac[2];
1202 ((u8 *)node_guid)[4] = 0xff;
1203 ((u8 *)node_guid)[3] = 0xfe;
1204 ((u8 *)node_guid)[2] = mac[3];
1205 ((u8 *)node_guid)[1] = mac[4];
1206 ((u8 *)node_guid)[0] = mac[5];
1207 }
1208
1209 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1210 int vport, u8 mac[ETH_ALEN])
1211 {
1212 int err = 0;
1213 u64 node_guid;
1214
1215 if (!ESW_ALLOWED(esw))
1216 return -EPERM;
1217 if (!LEGAL_VPORT(esw, vport))
1218 return -EINVAL;
1219
1220 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport, mac);
1221 if (err) {
1222 mlx5_core_warn(esw->dev,
1223 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1224 vport, err);
1225 return err;
1226 }
1227
1228 node_guid_gen_from_mac(&node_guid, mac);
1229 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport, node_guid);
1230 if (err) {
1231 mlx5_core_warn(esw->dev,
1232 "Failed to mlx5_modify_nic_vport_node_guid vport(%d) err=(%d)\n",
1233 vport, err);
1234 return err;
1235 }
1236
1237 return err;
1238 }
1239
1240 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1241 int vport, int link_state)
1242 {
1243 if (!ESW_ALLOWED(esw))
1244 return -EPERM;
1245 if (!LEGAL_VPORT(esw, vport))
1246 return -EINVAL;
1247
1248 return mlx5_modify_vport_admin_state(esw->dev,
1249 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1250 vport, link_state);
1251 }
1252
1253 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1254 int vport, struct mlx5_esw_vport_info *ivi)
1255 {
1256 u16 vlan;
1257 u8 qos;
1258
1259 if (!ESW_ALLOWED(esw))
1260 return -EPERM;
1261 if (!LEGAL_VPORT(esw, vport))
1262 return -EINVAL;
1263
1264 memset(ivi, 0, sizeof(*ivi));
1265 ivi->vf = vport - 1;
1266
1267 mlx5_query_nic_vport_mac_address(esw->dev, vport, ivi->mac);
1268 ivi->linkstate = mlx5_query_vport_admin_state(esw->dev,
1269 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
1270 vport);
1271 query_esw_vport_cvlan(esw->dev, vport, &vlan, &qos);
1272 ivi->vlan = vlan;
1273 ivi->qos = qos;
1274 ivi->spoofchk = 0;
1275
1276 return 0;
1277 }
1278
1279 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
1280 int vport, u16 vlan, u8 qos)
1281 {
1282 struct mlx5_vport *evport;
1283 int err = 0;
1284 int set = 0;
1285
1286 if (!ESW_ALLOWED(esw))
1287 return -EPERM;
1288 if (!LEGAL_VPORT(esw, vport) || (vlan > 4095) || (qos > 7))
1289 return -EINVAL;
1290
1291 if (vlan || qos)
1292 set = 1;
1293
1294 evport = &esw->vports[vport];
1295
1296 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set);
1297 if (err)
1298 return err;
1299
1300 mutex_lock(&evport->state_lock);
1301 evport->vlan = vlan;
1302 evport->qos = qos;
1303 if (evport->enabled) {
1304 esw_vport_ingress_config(esw, evport);
1305 esw_vport_egress_config(esw, evport);
1306 }
1307 mutex_unlock(&evport->state_lock);
1308 return err;
1309 }
1310
Cache object: f1ef1e7e486da918c79f835fbcca425a
|