2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
46 struct mlx5_flow_handle *
47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
49 struct mlx5_esw_flow_attr *attr)
51 struct mlx5_flow_destination dest[2] = {};
52 struct mlx5_flow_act flow_act = {0};
53 struct mlx5_fc *counter = NULL;
54 struct mlx5_flow_handle *rule;
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
61 /* per flow vlan pop/push is emulated, don't set that into the firmware */
62 flow_act.action = attr->action & ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
64 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
65 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
66 dest[i].vport_num = attr->out_rep->vport;
69 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
70 counter = mlx5_fc_create(esw->dev, true);
71 if (IS_ERR(counter)) {
72 rule = ERR_CAST(counter);
73 goto err_counter_alloc;
75 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76 dest[i].counter = counter;
80 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
81 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
83 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
84 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
86 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
87 MLX5_MATCH_MISC_PARAMETERS;
88 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
89 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
91 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
92 flow_act.modify_id = attr->mod_hdr_id;
94 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
95 flow_act.encap_id = attr->encap_id;
97 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
98 spec, &flow_act, dest, i);
102 esw->offloads.num_flows++;
107 mlx5_fc_destroy(esw->dev, counter);
113 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
114 struct mlx5_flow_handle *rule,
115 struct mlx5_esw_flow_attr *attr)
117 struct mlx5_fc *counter = NULL;
119 counter = mlx5_flow_rule_counter(rule);
120 mlx5_del_flow_rules(rule);
121 mlx5_fc_destroy(esw->dev, counter);
122 esw->offloads.num_flows--;
125 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
127 struct mlx5_eswitch_rep *rep;
128 int vf_vport, err = 0;
130 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
131 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
132 rep = &esw->offloads.vport_reps[vf_vport];
133 if (!rep->rep_if[REP_ETH].valid)
136 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
145 static struct mlx5_eswitch_rep *
146 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
148 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
150 in_rep = attr->in_rep;
151 out_rep = attr->out_rep;
163 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
164 bool push, bool pop, bool fwd)
166 struct mlx5_eswitch_rep *in_rep, *out_rep;
168 if ((push || pop) && !fwd)
171 in_rep = attr->in_rep;
172 out_rep = attr->out_rep;
174 if (push && in_rep->vport == FDB_UPLINK_VPORT)
177 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
180 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
181 if (!push && !pop && fwd)
182 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
185 /* protects against (1) setting rules with different vlans to push and
186 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
188 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan))
197 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
198 struct mlx5_esw_flow_attr *attr)
200 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
201 struct mlx5_eswitch_rep *vport = NULL;
205 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
206 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
207 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
209 err = esw_add_vlan_action_check(attr, push, pop, fwd);
213 attr->vlan_handled = false;
215 vport = esw_vlan_action_get_vport(attr, push, pop);
217 if (!push && !pop && fwd) {
218 /* tracks VF --> wire rules without vlan push action */
219 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
220 vport->vlan_refcount++;
221 attr->vlan_handled = true;
230 if (!(offloads->vlan_push_pop_refcount)) {
231 /* it's the 1st vlan rule, apply global vlan pop policy */
232 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
236 offloads->vlan_push_pop_refcount++;
239 if (vport->vlan_refcount)
242 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan, 0,
243 SET_VLAN_INSERT | SET_VLAN_STRIP);
246 vport->vlan = attr->vlan;
248 vport->vlan_refcount++;
252 attr->vlan_handled = true;
256 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
257 struct mlx5_esw_flow_attr *attr)
259 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
260 struct mlx5_eswitch_rep *vport = NULL;
264 if (!attr->vlan_handled)
267 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
268 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
269 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
271 vport = esw_vlan_action_get_vport(attr, push, pop);
273 if (!push && !pop && fwd) {
274 /* tracks VF --> wire rules without vlan push action */
275 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
276 vport->vlan_refcount--;
282 vport->vlan_refcount--;
283 if (vport->vlan_refcount)
284 goto skip_unset_push;
287 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
288 0, 0, SET_VLAN_STRIP);
294 offloads->vlan_push_pop_refcount--;
295 if (offloads->vlan_push_pop_refcount)
298 /* no more vlan rules, stop global vlan pop policy */
299 err = esw_set_global_vlan_pop(esw, 0);
305 struct mlx5_flow_handle *
306 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
308 struct mlx5_flow_act flow_act = {0};
309 struct mlx5_flow_destination dest = {};
310 struct mlx5_flow_handle *flow_rule;
311 struct mlx5_flow_spec *spec;
314 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
316 flow_rule = ERR_PTR(-ENOMEM);
320 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
321 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
322 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
324 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
325 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
326 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
328 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
329 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
330 dest.vport_num = vport;
331 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
333 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
334 &flow_act, &dest, 1);
335 if (IS_ERR(flow_rule))
336 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
341 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
343 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
345 mlx5_del_flow_rules(rule);
348 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
350 struct mlx5_flow_act flow_act = {0};
351 struct mlx5_flow_destination dest = {};
352 struct mlx5_flow_handle *flow_rule = NULL;
353 struct mlx5_flow_spec *spec;
356 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
362 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
364 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
366 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
367 &flow_act, &dest, 1);
368 if (IS_ERR(flow_rule)) {
369 err = PTR_ERR(flow_rule);
370 esw_warn(esw->dev, "FDB: Failed to add miss flow rule err %d\n", err);
374 esw->fdb_table.offloads.miss_rule = flow_rule;
380 #define ESW_OFFLOADS_NUM_GROUPS 4
382 static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
384 struct mlx5_core_dev *dev = esw->dev;
385 struct mlx5_flow_namespace *root_ns;
386 struct mlx5_flow_table *fdb = NULL;
387 int esw_size, err = 0;
389 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
390 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
392 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
394 esw_warn(dev, "Failed to get FDB flow namespace\n");
399 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
400 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
401 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
403 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
404 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
406 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
407 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
409 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
411 ESW_OFFLOADS_NUM_GROUPS, 0,
415 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
418 esw->fdb_table.fdb = fdb;
424 static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
426 mlx5_destroy_flow_table(esw->fdb_table.fdb);
429 #define MAX_PF_SQ 256
430 #define MAX_SQ_NVPORTS 32
432 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
434 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
435 struct mlx5_flow_table_attr ft_attr = {};
436 struct mlx5_core_dev *dev = esw->dev;
437 struct mlx5_flow_namespace *root_ns;
438 struct mlx5_flow_table *fdb = NULL;
439 int table_size, ix, err = 0;
440 struct mlx5_flow_group *g;
441 void *match_criteria;
444 esw_debug(esw->dev, "Create offloads FDB Tables\n");
445 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
449 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
451 esw_warn(dev, "Failed to get FDB flow namespace\n");
456 err = esw_create_offloads_fast_fdb_table(esw);
460 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 1;
462 ft_attr.max_fte = table_size;
463 ft_attr.prio = FDB_SLOW_PATH;
465 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
468 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
471 esw->fdb_table.offloads.fdb = fdb;
473 /* create send-to-vport group */
474 memset(flow_group_in, 0, inlen);
475 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
476 MLX5_MATCH_MISC_PARAMETERS);
478 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
480 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
481 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
483 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
484 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
485 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
487 g = mlx5_create_flow_group(fdb, flow_group_in);
490 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
493 esw->fdb_table.offloads.send_to_vport_grp = g;
495 /* create miss group */
496 memset(flow_group_in, 0, inlen);
497 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, 0);
499 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
500 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 1);
502 g = mlx5_create_flow_group(fdb, flow_group_in);
505 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
508 esw->fdb_table.offloads.miss_grp = g;
510 err = esw_add_fdb_miss_rule(esw);
517 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
519 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
521 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
523 mlx5_destroy_flow_table(esw->fdb_table.fdb);
526 kvfree(flow_group_in);
530 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
532 if (!esw->fdb_table.fdb)
535 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
536 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule);
537 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
538 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
540 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
541 esw_destroy_offloads_fast_fdb_table(esw);
544 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
546 struct mlx5_flow_table_attr ft_attr = {};
547 struct mlx5_core_dev *dev = esw->dev;
548 struct mlx5_flow_table *ft_offloads;
549 struct mlx5_flow_namespace *ns;
552 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
554 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
558 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
560 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
561 if (IS_ERR(ft_offloads)) {
562 err = PTR_ERR(ft_offloads);
563 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
567 esw->offloads.ft_offloads = ft_offloads;
571 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
573 struct mlx5_esw_offload *offloads = &esw->offloads;
575 mlx5_destroy_flow_table(offloads->ft_offloads);
578 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
580 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
581 struct mlx5_flow_group *g;
582 struct mlx5_priv *priv = &esw->dev->priv;
584 void *match_criteria, *misc;
586 int nvports = priv->sriov.num_vfs + 2;
588 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
592 /* create vport rx group */
593 memset(flow_group_in, 0, inlen);
594 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
595 MLX5_MATCH_MISC_PARAMETERS);
597 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
598 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
599 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
601 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
602 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
604 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
608 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
612 esw->offloads.vport_rx_group = g;
614 kfree(flow_group_in);
618 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
620 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
623 struct mlx5_flow_handle *
624 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
626 struct mlx5_flow_act flow_act = {0};
627 struct mlx5_flow_destination dest = {};
628 struct mlx5_flow_handle *flow_rule;
629 struct mlx5_flow_spec *spec;
632 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
634 flow_rule = ERR_PTR(-ENOMEM);
638 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
639 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
641 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
642 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
644 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
645 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
648 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
649 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
650 &flow_act, &dest, 1);
651 if (IS_ERR(flow_rule)) {
652 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
661 static int esw_offloads_start(struct mlx5_eswitch *esw)
663 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
665 if (esw->mode != SRIOV_LEGACY) {
666 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
670 mlx5_eswitch_disable_sriov(esw);
671 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
673 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
674 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
676 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
678 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
679 if (mlx5_eswitch_inline_mode_get(esw,
681 &esw->offloads.inline_mode)) {
682 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
683 esw_warn(esw->dev, "Inline mode is different between vports\n");
689 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
691 kfree(esw->offloads.vport_reps);
694 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
696 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
697 struct mlx5_core_dev *dev = esw->dev;
698 struct mlx5_esw_offload *offloads;
699 struct mlx5_eswitch_rep *rep;
703 esw->offloads.vport_reps = kcalloc(total_vfs,
704 sizeof(struct mlx5_eswitch_rep),
706 if (!esw->offloads.vport_reps)
709 offloads = &esw->offloads;
710 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
712 for (vport = 0; vport < total_vfs; vport++) {
713 rep = &offloads->vport_reps[vport];
716 ether_addr_copy(rep->hw_id, hw_id);
719 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
724 static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
727 struct mlx5_eswitch_rep *rep;
730 for (vport = nvports - 1; vport >= 0; vport--) {
731 rep = &esw->offloads.vport_reps[vport];
732 if (!rep->rep_if[rep_type].valid)
735 rep->rep_if[rep_type].unload(rep);
739 static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
741 u8 rep_type = NUM_REP_TYPES;
743 while (rep_type-- > 0)
744 esw_offloads_unload_reps_type(esw, nvports, rep_type);
747 static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
750 struct mlx5_eswitch_rep *rep;
754 for (vport = 0; vport < nvports; vport++) {
755 rep = &esw->offloads.vport_reps[vport];
756 if (!rep->rep_if[rep_type].valid)
759 err = rep->rep_if[rep_type].load(esw->dev, rep);
767 esw_offloads_unload_reps_type(esw, vport, rep_type);
771 static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
776 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
777 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
785 while (rep_type-- > 0)
786 esw_offloads_unload_reps_type(esw, nvports, rep_type);
790 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
794 /* disable PF RoCE so missed packets don't go through RoCE steering */
795 mlx5_dev_list_lock();
796 mlx5_remove_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
797 mlx5_dev_list_unlock();
799 err = esw_create_offloads_fdb_tables(esw, nvports);
803 err = esw_create_offloads_table(esw);
807 err = esw_create_vport_rx_group(esw);
811 err = esw_offloads_load_reps(esw, nvports);
818 esw_destroy_vport_rx_group(esw);
821 esw_destroy_offloads_table(esw);
824 esw_destroy_offloads_fdb_tables(esw);
827 /* enable back PF RoCE */
828 mlx5_dev_list_lock();
829 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
830 mlx5_dev_list_unlock();
835 static int esw_offloads_stop(struct mlx5_eswitch *esw)
837 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
839 mlx5_eswitch_disable_sriov(esw);
840 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
842 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
843 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
845 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
848 /* enable back PF RoCE */
849 mlx5_dev_list_lock();
850 mlx5_add_dev_by_protocol(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
851 mlx5_dev_list_unlock();
856 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
858 esw_offloads_unload_reps(esw, nvports);
859 esw_destroy_vport_rx_group(esw);
860 esw_destroy_offloads_table(esw);
861 esw_destroy_offloads_fdb_tables(esw);
864 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
867 case DEVLINK_ESWITCH_MODE_LEGACY:
868 *mlx5_mode = SRIOV_LEGACY;
870 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
871 *mlx5_mode = SRIOV_OFFLOADS;
880 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
884 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
887 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
896 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
899 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
900 *mlx5_mode = MLX5_INLINE_MODE_NONE;
902 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
903 *mlx5_mode = MLX5_INLINE_MODE_L2;
905 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
906 *mlx5_mode = MLX5_INLINE_MODE_IP;
908 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
909 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
918 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
921 case MLX5_INLINE_MODE_NONE:
922 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
924 case MLX5_INLINE_MODE_L2:
925 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
927 case MLX5_INLINE_MODE_IP:
928 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
930 case MLX5_INLINE_MODE_TCP_UDP:
931 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
940 static int mlx5_devlink_eswitch_check(struct devlink *devlink)
942 struct mlx5_core_dev *dev = devlink_priv(devlink);
944 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
947 if (!MLX5_CAP_GEN(dev, vport_group_manager))
950 if (dev->priv.eswitch->mode == SRIOV_NONE)
956 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
958 struct mlx5_core_dev *dev = devlink_priv(devlink);
959 u16 cur_mlx5_mode, mlx5_mode = 0;
962 err = mlx5_devlink_eswitch_check(devlink);
966 cur_mlx5_mode = dev->priv.eswitch->mode;
968 if (esw_mode_from_devlink(mode, &mlx5_mode))
971 if (cur_mlx5_mode == mlx5_mode)
974 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
975 return esw_offloads_start(dev->priv.eswitch);
976 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
977 return esw_offloads_stop(dev->priv.eswitch);
982 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
984 struct mlx5_core_dev *dev = devlink_priv(devlink);
987 err = mlx5_devlink_eswitch_check(devlink);
991 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
994 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
996 struct mlx5_core_dev *dev = devlink_priv(devlink);
997 struct mlx5_eswitch *esw = dev->priv.eswitch;
1001 err = mlx5_devlink_eswitch_check(devlink);
1005 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1006 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1007 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1010 case MLX5_CAP_INLINE_MODE_L2:
1011 esw_warn(dev, "Inline mode can't be set\n");
1013 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1017 if (esw->offloads.num_flows > 0) {
1018 esw_warn(dev, "Can't set inline mode when flows are configured\n");
1022 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1026 for (vport = 1; vport < esw->enabled_vports; vport++) {
1027 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1029 esw_warn(dev, "Failed to set min inline on vport %d\n",
1031 goto revert_inline_mode;
1035 esw->offloads.inline_mode = mlx5_mode;
1040 mlx5_modify_nic_vport_min_inline(dev,
1042 esw->offloads.inline_mode);
1047 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1049 struct mlx5_core_dev *dev = devlink_priv(devlink);
1050 struct mlx5_eswitch *esw = dev->priv.eswitch;
1053 err = mlx5_devlink_eswitch_check(devlink);
1057 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1060 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1062 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1063 struct mlx5_core_dev *dev = esw->dev;
1066 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1069 if (esw->mode == SRIOV_NONE)
1072 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1073 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1074 mlx5_mode = MLX5_INLINE_MODE_NONE;
1076 case MLX5_CAP_INLINE_MODE_L2:
1077 mlx5_mode = MLX5_INLINE_MODE_L2;
1079 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1084 for (vport = 1; vport <= nvfs; vport++) {
1085 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1086 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1088 prev_mlx5_mode = mlx5_mode;
1096 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1098 struct mlx5_core_dev *dev = devlink_priv(devlink);
1099 struct mlx5_eswitch *esw = dev->priv.eswitch;
1102 err = mlx5_devlink_eswitch_check(devlink);
1106 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1107 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1108 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1111 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1114 if (esw->mode == SRIOV_LEGACY) {
1115 esw->offloads.encap = encap;
1119 if (esw->offloads.encap == encap)
1122 if (esw->offloads.num_flows > 0) {
1123 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1127 esw_destroy_offloads_fast_fdb_table(esw);
1129 esw->offloads.encap = encap;
1130 err = esw_create_offloads_fast_fdb_table(esw);
1132 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1133 esw->offloads.encap = !encap;
1134 (void)esw_create_offloads_fast_fdb_table(esw);
1139 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1141 struct mlx5_core_dev *dev = devlink_priv(devlink);
1142 struct mlx5_eswitch *esw = dev->priv.eswitch;
1145 err = mlx5_devlink_eswitch_check(devlink);
1149 *encap = esw->offloads.encap;
1153 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1155 struct mlx5_eswitch_rep_if *__rep_if,
1158 struct mlx5_esw_offload *offloads = &esw->offloads;
1159 struct mlx5_eswitch_rep_if *rep_if;
1161 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
1163 rep_if->load = __rep_if->load;
1164 rep_if->unload = __rep_if->unload;
1165 rep_if->get_proto_dev = __rep_if->get_proto_dev;
1166 rep_if->priv = __rep_if->priv;
1168 rep_if->valid = true;
1170 EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
1172 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1173 int vport_index, u8 rep_type)
1175 struct mlx5_esw_offload *offloads = &esw->offloads;
1176 struct mlx5_eswitch_rep *rep;
1178 rep = &offloads->vport_reps[vport_index];
1180 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1181 rep->rep_if[rep_type].unload(rep);
1183 rep->rep_if[rep_type].valid = false;
1185 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
1187 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
1189 #define UPLINK_REP_INDEX 0
1190 struct mlx5_esw_offload *offloads = &esw->offloads;
1191 struct mlx5_eswitch_rep *rep;
1193 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1194 return rep->rep_if[rep_type].priv;
1197 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1201 struct mlx5_esw_offload *offloads = &esw->offloads;
1202 struct mlx5_eswitch_rep *rep;
1204 if (vport == FDB_UPLINK_VPORT)
1205 vport = UPLINK_REP_INDEX;
1207 rep = &offloads->vport_reps[vport];
1209 if (rep->rep_if[rep_type].valid &&
1210 rep->rep_if[rep_type].get_proto_dev)
1211 return rep->rep_if[rep_type].get_proto_dev(rep);
1214 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
1216 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1218 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1220 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1222 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1225 return &esw->offloads.vport_reps[vport];
1227 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);