2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
46 struct mlx5_flow_handle *
47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
49 struct mlx5_esw_flow_attr *attr)
51 struct mlx5_flow_destination dest[2] = {};
52 struct mlx5_flow_act flow_act = {0};
53 struct mlx5_fc *counter = NULL;
54 struct mlx5_flow_handle *rule;
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
70 counter = mlx5_fc_create(esw->dev, true);
72 return ERR_CAST(counter);
73 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
74 dest[i].counter = counter;
78 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
79 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
81 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
82 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
84 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
85 MLX5_MATCH_MISC_PARAMETERS;
86 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
87 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
90 flow_act.encap_id = attr->encap->encap_id;
92 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
93 spec, &flow_act, dest, i);
95 mlx5_fc_destroy(esw->dev, counter);
101 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
102 struct mlx5_flow_handle *rule,
103 struct mlx5_esw_flow_attr *attr)
105 struct mlx5_fc *counter = NULL;
108 counter = mlx5_flow_rule_counter(rule);
109 mlx5_del_flow_rules(rule);
110 mlx5_fc_destroy(esw->dev, counter);
114 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
116 struct mlx5_eswitch_rep *rep;
117 int vf_vport, err = 0;
119 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
120 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
121 rep = &esw->offloads.vport_reps[vf_vport];
125 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
134 static struct mlx5_eswitch_rep *
135 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
137 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
139 in_rep = attr->in_rep;
140 out_rep = attr->out_rep;
152 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
153 bool push, bool pop, bool fwd)
155 struct mlx5_eswitch_rep *in_rep, *out_rep;
157 if ((push || pop) && !fwd)
160 in_rep = attr->in_rep;
161 out_rep = attr->out_rep;
163 if (push && in_rep->vport == FDB_UPLINK_VPORT)
166 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
169 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
170 if (!push && !pop && fwd)
171 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
174 /* protects against (1) setting rules with different vlans to push and
175 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
177 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
186 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
187 struct mlx5_esw_flow_attr *attr)
189 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
190 struct mlx5_eswitch_rep *vport = NULL;
194 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
195 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
196 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
198 err = esw_add_vlan_action_check(attr, push, pop, fwd);
202 attr->vlan_handled = false;
204 vport = esw_vlan_action_get_vport(attr, push, pop);
206 if (!push && !pop && fwd) {
207 /* tracks VF --> wire rules without vlan push action */
208 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
209 vport->vlan_refcount++;
210 attr->vlan_handled = true;
219 if (!(offloads->vlan_push_pop_refcount)) {
220 /* it's the 1st vlan rule, apply global vlan pop policy */
221 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
225 offloads->vlan_push_pop_refcount++;
228 if (vport->vlan_refcount)
231 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
232 SET_VLAN_INSERT | SET_VLAN_STRIP);
235 vport->vlan = attr->vlan;
237 vport->vlan_refcount++;
241 attr->vlan_handled = true;
245 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
246 struct mlx5_esw_flow_attr *attr)
248 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
249 struct mlx5_eswitch_rep *vport = NULL;
253 if (!attr->vlan_handled)
256 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
257 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
258 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
260 vport = esw_vlan_action_get_vport(attr, push, pop);
262 if (!push && !pop && fwd) {
263 /* tracks VF --> wire rules without vlan push action */
264 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
265 vport->vlan_refcount--;
271 vport->vlan_refcount--;
272 if (vport->vlan_refcount)
273 goto skip_unset_push;
276 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
277 0, 0, SET_VLAN_STRIP);
283 offloads->vlan_push_pop_refcount--;
284 if (offloads->vlan_push_pop_refcount)
287 /* no more vlan rules, stop global vlan pop policy */
288 err = esw_set_global_vlan_pop(esw, 0);
294 static struct mlx5_flow_handle *
295 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
297 struct mlx5_flow_act flow_act = {0};
298 struct mlx5_flow_destination dest;
299 struct mlx5_flow_handle *flow_rule;
300 struct mlx5_flow_spec *spec;
303 spec = mlx5_vzalloc(sizeof(*spec));
305 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
306 flow_rule = ERR_PTR(-ENOMEM);
310 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
311 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
312 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
314 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
315 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
316 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
318 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
319 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
320 dest.vport_num = vport;
321 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
323 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
324 &flow_act, &dest, 1);
325 if (IS_ERR(flow_rule))
326 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
332 void mlx5_eswitch_sqs2vport_stop(struct mlx5_eswitch *esw,
333 struct mlx5_eswitch_rep *rep)
335 struct mlx5_esw_sq *esw_sq, *tmp;
337 if (esw->mode != SRIOV_OFFLOADS)
340 list_for_each_entry_safe(esw_sq, tmp, &rep->vport_sqs_list, list) {
341 mlx5_del_flow_rules(esw_sq->send_to_vport_rule);
342 list_del(&esw_sq->list);
347 int mlx5_eswitch_sqs2vport_start(struct mlx5_eswitch *esw,
348 struct mlx5_eswitch_rep *rep,
349 u16 *sqns_array, int sqns_num)
351 struct mlx5_flow_handle *flow_rule;
352 struct mlx5_esw_sq *esw_sq;
356 if (esw->mode != SRIOV_OFFLOADS)
359 for (i = 0; i < sqns_num; i++) {
360 esw_sq = kzalloc(sizeof(*esw_sq), GFP_KERNEL);
366 /* Add re-inject rule to the PF/representor sqs */
367 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
370 if (IS_ERR(flow_rule)) {
371 err = PTR_ERR(flow_rule);
375 esw_sq->send_to_vport_rule = flow_rule;
376 list_add(&esw_sq->list, &rep->vport_sqs_list);
381 mlx5_eswitch_sqs2vport_stop(esw, rep);
385 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
387 struct mlx5_flow_act flow_act = {0};
388 struct mlx5_flow_destination dest;
389 struct mlx5_flow_handle *flow_rule = NULL;
390 struct mlx5_flow_spec *spec;
393 spec = mlx5_vzalloc(sizeof(*spec));
395 esw_warn(esw->dev, "FDB: Failed to alloc match parameters\n");
400 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
402 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
404 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
405 &flow_act, &dest, 1);
406 if (IS_ERR(flow_rule)) {
407 err = PTR_ERR(flow_rule);
408 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
412 esw->fdb_table.offloads.miss_rule = flow_rule;
418 #define MAX_PF_SQ 256
419 #define ESW_OFFLOADS_NUM_GROUPS 4
421 static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
423 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
424 int table_size, ix, esw_size, err = 0;
425 struct mlx5_core_dev *dev = esw->dev;
426 struct mlx5_flow_namespace *root_ns;
427 struct mlx5_flow_table *fdb = NULL;
428 struct mlx5_flow_group *g;
430 void *match_criteria;
433 flow_group_in = mlx5_vzalloc(inlen);
437 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
439 esw_warn(dev, "Failed to get FDB flow namespace\n");
444 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
445 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
446 MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
448 esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
449 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
451 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
452 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
453 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
455 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
457 ESW_OFFLOADS_NUM_GROUPS, 0,
461 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
464 esw->fdb_table.fdb = fdb;
466 table_size = nvports + MAX_PF_SQ + 1;
467 fdb = mlx5_create_flow_table(root_ns, FDB_SLOW_PATH, table_size, 0, 0);
470 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
473 esw->fdb_table.offloads.fdb = fdb;
475 /* create send-to-vport group */
476 memset(flow_group_in, 0, inlen);
477 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
478 MLX5_MATCH_MISC_PARAMETERS);
480 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
482 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
483 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
485 ix = nvports + MAX_PF_SQ;
486 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
487 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
489 g = mlx5_create_flow_group(fdb, flow_group_in);
492 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
495 esw->fdb_table.offloads.send_to_vport_grp = g;
497 /* create miss group */
498 memset(flow_group_in, 0, inlen);
499 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
501 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
502 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
504 g = mlx5_create_flow_group(fdb, flow_group_in);
507 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
510 esw->fdb_table.offloads.miss_grp = g;
512 err = esw_add_fdb_miss_rule(esw);
519 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
521 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
523 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
525 mlx5_destroy_flow_table(esw->fdb_table.fdb);
528 kvfree(flow_group_in);
532 static void esw_destroy_offloads_fdb_table(struct mlx5_eswitch *esw)
534 if (!esw->fdb_table.fdb)
537 esw_debug(esw->dev, "Destroy offloads FDB Table\n");
538 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
539 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
540 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
542 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
543 mlx5_destroy_flow_table(esw->fdb_table.fdb);
546 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
548 struct mlx5_flow_namespace *ns;
549 struct mlx5_flow_table *ft_offloads;
550 struct mlx5_core_dev *dev = esw->dev;
553 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
555 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
559 ft_offloads = mlx5_create_flow_table(ns, 0, dev->priv.sriov.num_vfs + 2, 0, 0);
560 if (IS_ERR(ft_offloads)) {
561 err = PTR_ERR(ft_offloads);
562 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
566 esw->offloads.ft_offloads = ft_offloads;
570 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
572 struct mlx5_esw_offload *offloads = &esw->offloads;
574 mlx5_destroy_flow_table(offloads->ft_offloads);
577 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
579 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
580 struct mlx5_flow_group *g;
581 struct mlx5_priv *priv = &esw->dev->priv;
583 void *match_criteria, *misc;
585 int nvports = priv->sriov.num_vfs + 2;
587 flow_group_in = mlx5_vzalloc(inlen);
591 /* create vport rx group */
592 memset(flow_group_in, 0, inlen);
593 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
594 MLX5_MATCH_MISC_PARAMETERS);
596 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
597 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
598 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
600 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
601 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
603 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
607 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
611 esw->offloads.vport_rx_group = g;
613 kfree(flow_group_in);
617 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
619 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
622 struct mlx5_flow_handle *
623 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
625 struct mlx5_flow_act flow_act = {0};
626 struct mlx5_flow_destination dest;
627 struct mlx5_flow_handle *flow_rule;
628 struct mlx5_flow_spec *spec;
631 spec = mlx5_vzalloc(sizeof(*spec));
633 esw_warn(esw->dev, "Failed to alloc match parameters\n");
634 flow_rule = ERR_PTR(-ENOMEM);
638 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
639 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
641 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
642 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
644 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
645 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
648 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
649 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
650 &flow_act, &dest, 1);
651 if (IS_ERR(flow_rule)) {
652 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
661 static int esw_offloads_start(struct mlx5_eswitch *esw)
663 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
665 if (esw->mode != SRIOV_LEGACY) {
666 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
670 mlx5_eswitch_disable_sriov(esw);
671 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
673 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
674 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
676 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
678 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
679 if (mlx5_eswitch_inline_mode_get(esw,
681 &esw->offloads.inline_mode)) {
682 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
683 esw_warn(esw->dev, "Inline mode is different between vports\n");
689 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
691 struct mlx5_eswitch_rep *rep;
695 /* disable PF RoCE so missed packets don't go through RoCE steering */
696 mlx5_dev_list_lock();
697 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
698 mlx5_dev_list_unlock();
700 err = esw_create_offloads_fdb_table(esw, nvports);
704 err = esw_create_offloads_table(esw);
708 err = esw_create_vport_rx_group(esw);
712 for (vport = 0; vport < nvports; vport++) {
713 rep = &esw->offloads.vport_reps[vport];
717 err = rep->load(esw, rep);
725 for (vport--; vport >= 0; vport--) {
726 rep = &esw->offloads.vport_reps[vport];
729 rep->unload(esw, rep);
731 esw_destroy_vport_rx_group(esw);
734 esw_destroy_offloads_table(esw);
737 esw_destroy_offloads_fdb_table(esw);
740 /* enable back PF RoCE */
741 mlx5_dev_list_lock();
742 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
743 mlx5_dev_list_unlock();
748 static int esw_offloads_stop(struct mlx5_eswitch *esw)
750 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
752 mlx5_eswitch_disable_sriov(esw);
753 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
755 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
756 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
758 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
761 /* enable back PF RoCE */
762 mlx5_dev_list_lock();
763 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
764 mlx5_dev_list_unlock();
769 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
771 struct mlx5_eswitch_rep *rep;
774 for (vport = 0; vport < nvports; vport++) {
775 rep = &esw->offloads.vport_reps[vport];
778 rep->unload(esw, rep);
781 esw_destroy_vport_rx_group(esw);
782 esw_destroy_offloads_table(esw);
783 esw_destroy_offloads_fdb_table(esw);
786 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
789 case DEVLINK_ESWITCH_MODE_LEGACY:
790 *mlx5_mode = SRIOV_LEGACY;
792 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
793 *mlx5_mode = SRIOV_OFFLOADS;
802 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
806 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
809 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
818 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
821 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
822 *mlx5_mode = MLX5_INLINE_MODE_NONE;
824 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
825 *mlx5_mode = MLX5_INLINE_MODE_L2;
827 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
828 *mlx5_mode = MLX5_INLINE_MODE_IP;
830 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
831 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
840 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
843 case MLX5_INLINE_MODE_NONE:
844 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
846 case MLX5_INLINE_MODE_L2:
847 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
849 case MLX5_INLINE_MODE_IP:
850 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
852 case MLX5_INLINE_MODE_TCP_UDP:
853 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
862 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
864 struct mlx5_core_dev *dev;
865 u16 cur_mlx5_mode, mlx5_mode = 0;
867 dev = devlink_priv(devlink);
869 if (!MLX5_CAP_GEN(dev, vport_group_manager))
872 cur_mlx5_mode = dev->priv.eswitch->mode;
874 if (cur_mlx5_mode == SRIOV_NONE)
877 if (esw_mode_from_devlink(mode, &mlx5_mode))
880 if (cur_mlx5_mode == mlx5_mode)
883 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
884 return esw_offloads_start(dev->priv.eswitch);
885 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
886 return esw_offloads_stop(dev->priv.eswitch);
891 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
893 struct mlx5_core_dev *dev;
895 dev = devlink_priv(devlink);
897 if (!MLX5_CAP_GEN(dev, vport_group_manager))
900 if (dev->priv.eswitch->mode == SRIOV_NONE)
903 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
906 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
908 struct mlx5_core_dev *dev = devlink_priv(devlink);
909 struct mlx5_eswitch *esw = dev->priv.eswitch;
910 int num_vports = esw->enabled_vports;
915 if (!MLX5_CAP_GEN(dev, vport_group_manager))
918 if (esw->mode == SRIOV_NONE)
921 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
922 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
925 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
929 for (vport = 1; vport < num_vports; vport++) {
930 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
932 esw_warn(dev, "Failed to set min inline on vport %d\n",
934 goto revert_inline_mode;
938 esw->offloads.inline_mode = mlx5_mode;
943 mlx5_modify_nic_vport_min_inline(dev,
945 esw->offloads.inline_mode);
950 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
952 struct mlx5_core_dev *dev = devlink_priv(devlink);
953 struct mlx5_eswitch *esw = dev->priv.eswitch;
955 if (!MLX5_CAP_GEN(dev, vport_group_manager))
958 if (esw->mode == SRIOV_NONE)
961 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
962 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
965 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
968 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
970 struct mlx5_core_dev *dev = esw->dev;
972 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
974 if (!MLX5_CAP_GEN(dev, vport_group_manager))
977 if (esw->mode == SRIOV_NONE)
980 if (MLX5_CAP_ETH(dev, wqe_inline_mode) !=
981 MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
984 for (vport = 1; vport <= nvfs; vport++) {
985 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
986 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
988 prev_mlx5_mode = mlx5_mode;
995 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
997 struct mlx5_eswitch_rep *__rep)
999 struct mlx5_esw_offload *offloads = &esw->offloads;
1000 struct mlx5_eswitch_rep *rep;
1002 rep = &offloads->vport_reps[vport_index];
1004 memset(rep, 0, sizeof(*rep));
1006 rep->load = __rep->load;
1007 rep->unload = __rep->unload;
1008 rep->vport = __rep->vport;
1009 rep->netdev = __rep->netdev;
1010 ether_addr_copy(rep->hw_id, __rep->hw_id);
1012 INIT_LIST_HEAD(&rep->vport_sqs_list);
1016 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1019 struct mlx5_esw_offload *offloads = &esw->offloads;
1020 struct mlx5_eswitch_rep *rep;
1022 rep = &offloads->vport_reps[vport_index];
1024 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1025 rep->unload(esw, rep);
1030 struct net_device *mlx5_eswitch_get_uplink_netdev(struct mlx5_eswitch *esw)
1032 #define UPLINK_REP_INDEX 0
1033 struct mlx5_esw_offload *offloads = &esw->offloads;
1034 struct mlx5_eswitch_rep *rep;
1036 rep = &offloads->vport_reps[UPLINK_REP_INDEX];