2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
46 struct mlx5_flow_handle *
47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
49 struct mlx5_esw_flow_attr *attr)
51 struct mlx5_flow_destination dest[2] = {};
52 struct mlx5_flow_act flow_act = {0};
53 struct mlx5_fc *counter = NULL;
54 struct mlx5_flow_handle *rule;
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
61 flow_act.action = attr->action;
63 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
64 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
65 dest[i].vport_num = attr->out_rep->vport;
68 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
69 counter = mlx5_fc_create(esw->dev, true);
71 return ERR_CAST(counter);
72 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
73 dest[i].counter = counter;
77 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
78 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
81 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
83 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
84 MLX5_MATCH_MISC_PARAMETERS;
85 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
86 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
88 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
89 spec, &flow_act, dest, i);
91 mlx5_fc_destroy(esw->dev, counter);
96 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
98 struct mlx5_eswitch_rep *rep;
99 int vf_vport, err = 0;
101 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
102 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
103 rep = &esw->offloads.vport_reps[vf_vport];
107 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
116 static struct mlx5_eswitch_rep *
117 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
119 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
121 in_rep = attr->in_rep;
122 out_rep = attr->out_rep;
134 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
135 bool push, bool pop, bool fwd)
137 struct mlx5_eswitch_rep *in_rep, *out_rep;
139 if ((push || pop) && !fwd)
142 in_rep = attr->in_rep;
143 out_rep = attr->out_rep;
145 if (push && in_rep->vport == FDB_UPLINK_VPORT)
148 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
151 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
152 if (!push && !pop && fwd)
153 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
156 /* protects against (1) setting rules with different vlans to push and
157 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
159 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
168 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
169 struct mlx5_esw_flow_attr *attr)
171 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
172 struct mlx5_eswitch_rep *vport = NULL;
176 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
177 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
178 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
180 err = esw_add_vlan_action_check(attr, push, pop, fwd);
184 attr->vlan_handled = false;
186 vport = esw_vlan_action_get_vport(attr, push, pop);
188 if (!push && !pop && fwd) {
189 /* tracks VF --> wire rules without vlan push action */
190 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
191 vport->vlan_refcount++;
192 attr->vlan_handled = true;
201 if (!(offloads->vlan_push_pop_refcount)) {
202 /* it's the 1st vlan rule, apply global vlan pop policy */
203 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
207 offloads->vlan_push_pop_refcount++;
210 if (vport->vlan_refcount)
213 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
214 SET_VLAN_INSERT | SET_VLAN_STRIP);
217 vport->vlan = attr->vlan;
219 vport->vlan_refcount++;
223 attr->vlan_handled = true;
227 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
228 struct mlx5_esw_flow_attr *attr)
230 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
231 struct mlx5_eswitch_rep *vport = NULL;
235 if (!attr->vlan_handled)
238 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
239 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
240 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
242 vport = esw_vlan_action_get_vport(attr, push, pop);
244 if (!push && !pop && fwd) {
245 /* tracks VF --> wire rules without vlan push action */
246 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
247 vport->vlan_refcount--;
253 vport->vlan_refcount--;
254 if (vport->vlan_refcount)
255 goto skip_unset_push;
258 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
259 0, 0, SET_VLAN_STRIP);
265 offloads->vlan_push_pop_refcount--;
266 if (offloads->vlan_push_pop_refcount)
269 /* no more vlan rules, stop global vlan pop policy */
270 err = esw_set_global_vlan_pop(esw, 0);
276 static struct mlx5_flow_handle *
277 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
279 struct mlx5_flow_act flow_act = {0};
280 struct mlx5_flow_destination dest;
281 struct mlx5_flow_handle *flow_rule;
282 struct mlx5_flow_spec *spec;
285 spec = mlx5_vzalloc(sizeof(*spec));
287 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
288 flow_rule = ERR_PTR(-ENOMEM);
292 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
293 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
294 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
296 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
297 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
298 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
300 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
301 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
302 dest.vport_num = vport;
303 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
305 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
306 &flow_act, &dest, 1);
307 if (IS_ERR(flow_rule))
308 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
314 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
315 struct mlx5_eswitch_rep *rep)
317 struct mlx5_esw_sq *esw_sq, *tmp;
319 if (esw->mode != SRIOV_OFFLOADS)
322 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
323 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
324 list_del(&esw_sq->list);
329 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
330 struct mlx5_eswitch_rep *rep,
331 u16 *sqns_array, int sqns_num)
333 struct mlx5_flow_handle *flow_rule;
334 struct mlx5_esw_sq *esw_sq;
338 if (esw->mode != SRIOV_OFFLOADS)
341 for (i = 0; i < sqns_num; i++) {
342 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
348 /* Add re-inject rule to the PF/representor sqs */
349 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
352 if (IS_ERR(flow_rule)) {
353 err = PTR_ERR(flow_rule);
357 esw_sq->send_to_vport_rule = flow_rule;
358 list_add(&esw_sq->list, &rep->vport_sqs_list);
363 mlx5_eswitch_sqs2vport_stop(esw, rep);
367 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
369 struct mlx5_flow_act flow_act = {0};
370 struct mlx5_flow_destination dest;
371 struct mlx5_flow_handle *flow_rule = NULL;
372 struct mlx5_flow_spec *spec;
375 spec = mlx5_vzalloc(sizeof(*spec));
377 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
382 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
384 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
386 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
387 &flow_act, &dest, 1);
388 if (IS_ERR(flow_rule)) {
389 err = PTR_ERR(flow_rule);
390 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
394 esw->fdb_table.offloads.miss_rule = flow_rule;
400 #define MAX_PF_SQ 256
401 #define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
402 #define ESW_OFFLOADS_NUM_GROUPS 4
404 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
406 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
407 struct mlx5_core_dev *dev = esw->dev;
408 struct mlx5_flow_namespace *root_ns;
409 struct mlx5_flow_table *fdb = NULL;
410 struct mlx5_flow_group *g;
412 void *match_criteria;
413 int table_size, ix, err = 0;
416 flow_group_in = mlx5_vzalloc(inlen);
420 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
422 esw_warn(dev, "Failed to get FDB flow namespace\n");
426 esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
427 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
429 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
430 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
431 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
433 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
434 ESW_OFFLOADS_NUM_ENTRIES,
435 ESW_OFFLOADS_NUM_GROUPS, 0,
439 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
442 esw->fdb_table.fdb = fdb;
444 table_size = nvports + MAX_PF_SQ + 1;
445 fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
448 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
451 esw->fdb_table.offloads.fdb = fdb;
453 /* create send-to-vport group */
454 memset(flow_group_in, 0, inlen);
455 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
456 MLX5_MATCH_MISC_PARAMETERS);
458 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
460 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
461 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
463 ix = nvports + MAX_PF_SQ;
464 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
465 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
467 g = mlx5_create_flow_group(fdb, flow_group_in);
470 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
473 esw->fdb_table.offloads.send_to_vport_grp = g;
475 /* create miss group */
476 memset(flow_group_in, 0, inlen);
477 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
479 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
480 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
482 g = mlx5_create_flow_group(fdb, flow_group_in);
485 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
488 esw->fdb_table.offloads.miss_grp = g;
490 err = esw_add_fdb_miss_rule(esw);
497 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
499 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
501 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
503 mlx5_destroy_flow_table(esw->fdb_table.fdb);
506 kvfree(flow_group_in);
510 static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
512 if (!esw->fdb_table.fdb)
515 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
516 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
517 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
518 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
520 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
521 mlx5_destroy_flow_table(esw->fdb_table.fdb);
524 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
526 struct mlx5_flow_namespace *ns;
527 struct mlx5_flow_table *ft_offloads;
528 struct mlx5_core_dev *dev = esw->dev;
531 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
533 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
537 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
538 if (IS_ERR(ft_offloads)) {
539 err = PTR_ERR(ft_offloads);
540 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
544 esw->offloads.ft_offloads = ft_offloads;
548 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
550 struct mlx5_esw_offload *offloads = &esw->offloads;
552 mlx5_destroy_flow_table(offloads->ft_offloads);
555 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
557 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
558 struct mlx5_flow_group *g;
559 struct mlx5_priv *priv = &esw->dev->priv;
561 void *match_criteria, *misc;
563 int nvports = priv->sriov.num_vfs + 2;
565 flow_group_in = mlx5_vzalloc(inlen);
569 /* create vport rx group */
570 memset(flow_group_in, 0, inlen);
571 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
572 MLX5_MATCH_MISC_PARAMETERS);
574 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
575 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
576 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
578 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
579 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
581 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
585 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
589 esw->offloads.vport_rx_group = g;
591 kfree(flow_group_in);
595 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
597 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
600 struct mlx5_flow_handle *
601 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
603 struct mlx5_flow_act flow_act = {0};
604 struct mlx5_flow_destination dest;
605 struct mlx5_flow_handle *flow_rule;
606 struct mlx5_flow_spec *spec;
609 spec = mlx5_vzalloc(sizeof(*spec));
611 esw_warn(esw->dev, "Failed to alloc match parameters\n");
612 flow_rule = ERR_PTR(-ENOMEM);
616 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
617 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
619 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
620 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
622 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
623 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
626 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
627 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
628 &flow_act, &dest, 1);
629 if (IS_ERR(flow_rule)) {
630 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
639 static int esw_offloads_start(struct mlx5_eswitch *esw)
641 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
643 if (esw->mode != SRIOV_LEGACY) {
644 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
648 mlx5_eswitch_disable_sriov(esw);
649 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
651 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
652 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
654 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
659 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
661 struct mlx5_eswitch_rep *rep;
665 err = esw_create_offloads_fdb_table(esw, nvports);
669 err = esw_create_offloads_table(esw);
673 err = esw_create_vport_rx_group(esw);
677 for (vport = 0; vport < nvports; vport++) {
678 rep = &esw->offloads.vport_reps[vport];
682 err = rep->load(esw, rep);
689 for (vport--; vport >= 0; vport--) {
690 rep = &esw->offloads.vport_reps[vport];
693 rep->unload(esw, rep);
695 esw_destroy_vport_rx_group(esw);
698 esw_destroy_offloads_table(esw);
701 esw_destroy_offloads_fdb_table(esw);
705 static int esw_offloads_stop(struct mlx5_eswitch *esw)
707 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
709 mlx5_eswitch_disable_sriov(esw);
710 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
712 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
713 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
715 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
721 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
723 struct mlx5_eswitch_rep *rep;
726 for (vport = 0; vport < nvports; vport++) {
727 rep = &esw->offloads.vport_reps[vport];
730 rep->unload(esw, rep);
733 esw_destroy_vport_rx_group(esw);
734 esw_destroy_offloads_table(esw);
735 esw_destroy_offloads_fdb_table(esw);
738 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
741 case DEVLINK_ESWITCH_MODE_LEGACY:
742 *mlx5_mode = SRIOV_LEGACY;
744 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
745 *mlx5_mode = SRIOV_OFFLOADS;
754 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
758 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
761 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
770 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
772 struct mlx5_core_dev *dev;
773 u16 cur_mlx5_mode, mlx5_mode = 0;
775 dev = devlink_priv(devlink);
777 if (!MLX5_CAP_GEN(dev, vport_group_manager))
780 cur_mlx5_mode = dev->priv.eswitch->mode;
782 if (cur_mlx5_mode == SRIOV_NONE)
785 if (esw_mode_from_devlink(mode, &mlx5_mode))
788 if (cur_mlx5_mode == mlx5_mode)
791 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
792 return esw_offloads_start(dev->priv.eswitch);
793 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
794 return esw_offloads_stop(dev->priv.eswitch);
799 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
801 struct mlx5_core_dev *dev;
803 dev = devlink_priv(devlink);
805 if (!MLX5_CAP_GEN(dev, vport_group_manager))
808 if (dev->priv.eswitch->mode == SRIOV_NONE)
811 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
814 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
816 struct mlx5_eswitch_rep *__rep)
818 struct mlx5_esw_offload *offloads = &esw->offloads;
819 struct mlx5_eswitch_rep *rep;
821 rep = &offloads->vport_reps[vport_index];
823 memset(rep, 0, sizeof(*rep));
825 rep->load = __rep->load;
826 rep->unload = __rep->unload;
827 rep->vport = __rep->vport;
828 rep->priv_data = __rep->priv_data;
829 ether_addr_copy(rep->hw_id, __rep->hw_id);
831 INIT_LIST_HEAD(&rep->vport_sqs_list);
835 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
838 struct mlx5_esw_offload *offloads = &esw->offloads;
839 struct mlx5_eswitch_rep *rep;
841 rep = &offloads->vport_reps[vport_index];
843 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
844 rep->unload(esw, rep);