2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "mlx5_core.h"
46 struct mlx5_flow_handle *
47 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
48 struct mlx5_flow_spec *spec,
49 struct mlx5_esw_flow_attr *attr)
51 struct mlx5_flow_destination dest[2] = {};
52 struct mlx5_flow_act flow_act = {0};
53 struct mlx5_fc *counter = NULL;
54 struct mlx5_flow_handle *rule;
58 if (esw->mode != SRIOV_OFFLOADS)
59 return ERR_PTR(-EOPNOTSUPP);
61 flow_act.action = attr->action;
62 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
63 if (!mlx5_eswitch_vlan_actions_supported(esw->dev))
64 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
65 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
66 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
67 flow_act.vlan.ethtype = ntohs(attr->vlan_proto);
68 flow_act.vlan.vid = attr->vlan_vid;
69 flow_act.vlan.prio = attr->vlan_prio;
72 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
73 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
74 dest[i].vport_num = attr->out_rep->vport;
77 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
78 counter = mlx5_fc_create(esw->dev, true);
79 if (IS_ERR(counter)) {
80 rule = ERR_CAST(counter);
81 goto err_counter_alloc;
83 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
84 dest[i].counter = counter;
88 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
89 MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
91 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
92 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
94 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS |
95 MLX5_MATCH_MISC_PARAMETERS;
96 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_DECAP)
97 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
99 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
100 flow_act.modify_id = attr->mod_hdr_id;
102 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
103 flow_act.encap_id = attr->encap_id;
105 rule = mlx5_add_flow_rules((struct mlx5_flow_table *)esw->fdb_table.fdb,
106 spec, &flow_act, dest, i);
110 esw->offloads.num_flows++;
115 mlx5_fc_destroy(esw->dev, counter);
121 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
122 struct mlx5_flow_handle *rule,
123 struct mlx5_esw_flow_attr *attr)
125 struct mlx5_fc *counter = NULL;
127 counter = mlx5_flow_rule_counter(rule);
128 mlx5_del_flow_rules(rule);
129 mlx5_fc_destroy(esw->dev, counter);
130 esw->offloads.num_flows--;
133 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
135 struct mlx5_eswitch_rep *rep;
136 int vf_vport, err = 0;
138 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
139 for (vf_vport = 1; vf_vport < esw->enabled_vports; vf_vport++) {
140 rep = &esw->offloads.vport_reps[vf_vport];
141 if (!rep->rep_if[REP_ETH].valid)
144 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
153 static struct mlx5_eswitch_rep *
154 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
156 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
158 in_rep = attr->in_rep;
159 out_rep = attr->out_rep;
171 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
172 bool push, bool pop, bool fwd)
174 struct mlx5_eswitch_rep *in_rep, *out_rep;
176 if ((push || pop) && !fwd)
179 in_rep = attr->in_rep;
180 out_rep = attr->out_rep;
182 if (push && in_rep->vport == FDB_UPLINK_VPORT)
185 if (pop && out_rep->vport == FDB_UPLINK_VPORT)
188 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
189 if (!push && !pop && fwd)
190 if (in_rep->vlan && out_rep->vport == FDB_UPLINK_VPORT)
193 /* protects against (1) setting rules with different vlans to push and
194 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
196 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid))
205 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
206 struct mlx5_esw_flow_attr *attr)
208 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
209 struct mlx5_eswitch_rep *vport = NULL;
213 /* nop if we're on the vlan push/pop non emulation mode */
214 if (mlx5_eswitch_vlan_actions_supported(esw->dev))
217 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
218 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
219 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
221 err = esw_add_vlan_action_check(attr, push, pop, fwd);
225 attr->vlan_handled = false;
227 vport = esw_vlan_action_get_vport(attr, push, pop);
229 if (!push && !pop && fwd) {
230 /* tracks VF --> wire rules without vlan push action */
231 if (attr->out_rep->vport == FDB_UPLINK_VPORT) {
232 vport->vlan_refcount++;
233 attr->vlan_handled = true;
242 if (!(offloads->vlan_push_pop_refcount)) {
243 /* it's the 1st vlan rule, apply global vlan pop policy */
244 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
248 offloads->vlan_push_pop_refcount++;
251 if (vport->vlan_refcount)
254 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, attr->vlan_vid, 0,
255 SET_VLAN_INSERT | SET_VLAN_STRIP);
258 vport->vlan = attr->vlan_vid;
260 vport->vlan_refcount++;
264 attr->vlan_handled = true;
268 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
269 struct mlx5_esw_flow_attr *attr)
271 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
272 struct mlx5_eswitch_rep *vport = NULL;
276 /* nop if we're on the vlan push/pop non emulation mode */
277 if (mlx5_eswitch_vlan_actions_supported(esw->dev))
280 if (!attr->vlan_handled)
283 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
284 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
285 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
287 vport = esw_vlan_action_get_vport(attr, push, pop);
289 if (!push && !pop && fwd) {
290 /* tracks VF --> wire rules without vlan push action */
291 if (attr->out_rep->vport == FDB_UPLINK_VPORT)
292 vport->vlan_refcount--;
298 vport->vlan_refcount--;
299 if (vport->vlan_refcount)
300 goto skip_unset_push;
303 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
304 0, 0, SET_VLAN_STRIP);
310 offloads->vlan_push_pop_refcount--;
311 if (offloads->vlan_push_pop_refcount)
314 /* no more vlan rules, stop global vlan pop policy */
315 err = esw_set_global_vlan_pop(esw, 0);
321 struct mlx5_flow_handle *
322 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
324 struct mlx5_flow_act flow_act = {0};
325 struct mlx5_flow_destination dest = {};
326 struct mlx5_flow_handle *flow_rule;
327 struct mlx5_flow_spec *spec;
330 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
332 flow_rule = ERR_PTR(-ENOMEM);
336 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
337 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
338 MLX5_SET(fte_match_set_misc, misc, source_port, 0x0); /* source vport is 0 */
340 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
341 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
342 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
344 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
345 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
346 dest.vport_num = vport;
347 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
349 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
350 &flow_act, &dest, 1);
351 if (IS_ERR(flow_rule))
352 esw_warn(esw->dev, "FDB: Failed to add send to vport rule err %ld\n", PTR_ERR(flow_rule));
357 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
359 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
361 mlx5_del_flow_rules(rule);
364 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
366 struct mlx5_flow_act flow_act = {0};
367 struct mlx5_flow_destination dest = {};
368 struct mlx5_flow_handle *flow_rule = NULL;
369 struct mlx5_flow_spec *spec;
376 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
382 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
383 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
385 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
386 outer_headers.dmac_47_16);
389 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
391 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
393 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
394 &flow_act, &dest, 1);
395 if (IS_ERR(flow_rule)) {
396 err = PTR_ERR(flow_rule);
397 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
401 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
403 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
405 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
406 outer_headers.dmac_47_16);
408 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.fdb, spec,
409 &flow_act, &dest, 1);
410 if (IS_ERR(flow_rule)) {
411 err = PTR_ERR(flow_rule);
412 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
413 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
417 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
424 #define ESW_OFFLOADS_NUM_GROUPS 4
426 static int esw_create_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
428 struct mlx5_core_dev *dev = esw->dev;
429 struct mlx5_flow_namespace *root_ns;
430 struct mlx5_flow_table *fdb = NULL;
431 int esw_size, err = 0;
433 u32 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
434 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
436 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
438 esw_warn(dev, "Failed to get FDB flow namespace\n");
443 esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
444 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
445 max_flow_counter, ESW_OFFLOADS_NUM_GROUPS);
447 esw_size = min_t(int, max_flow_counter * ESW_OFFLOADS_NUM_GROUPS,
448 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
450 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
451 flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
453 fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
455 ESW_OFFLOADS_NUM_GROUPS, 0,
459 esw_warn(dev, "Failed to create Fast path FDB Table err %d\n", err);
462 esw->fdb_table.fdb = fdb;
468 static void esw_destroy_offloads_fast_fdb_table(struct mlx5_eswitch *esw)
470 mlx5_destroy_flow_table(esw->fdb_table.fdb);
473 #define MAX_PF_SQ 256
474 #define MAX_SQ_NVPORTS 32
476 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports)
478 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
479 struct mlx5_flow_table_attr ft_attr = {};
480 struct mlx5_core_dev *dev = esw->dev;
481 struct mlx5_flow_namespace *root_ns;
482 struct mlx5_flow_table *fdb = NULL;
483 int table_size, ix, err = 0;
484 struct mlx5_flow_group *g;
485 void *match_criteria;
489 esw_debug(esw->dev, "Create offloads FDB Tables\n");
490 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
494 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
496 esw_warn(dev, "Failed to get FDB flow namespace\n");
501 err = esw_create_offloads_fast_fdb_table(esw);
505 table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2;
507 ft_attr.max_fte = table_size;
508 ft_attr.prio = FDB_SLOW_PATH;
510 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
513 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
516 esw->fdb_table.offloads.fdb = fdb;
518 /* create send-to-vport group */
519 memset(flow_group_in, 0, inlen);
520 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
521 MLX5_MATCH_MISC_PARAMETERS);
523 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
525 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
526 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
528 ix = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ;
529 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
530 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
532 g = mlx5_create_flow_group(fdb, flow_group_in);
535 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
538 esw->fdb_table.offloads.send_to_vport_grp = g;
540 /* create miss group */
541 memset(flow_group_in, 0, inlen);
542 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
543 MLX5_MATCH_OUTER_HEADERS);
544 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
546 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
547 outer_headers.dmac_47_16);
550 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
551 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix + 2);
553 g = mlx5_create_flow_group(fdb, flow_group_in);
556 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
559 esw->fdb_table.offloads.miss_grp = g;
561 err = esw_add_fdb_miss_rule(esw);
568 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
570 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
572 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
574 mlx5_destroy_flow_table(esw->fdb_table.fdb);
577 kvfree(flow_group_in);
581 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
583 if (!esw->fdb_table.fdb)
586 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
587 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
588 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
589 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
590 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
592 mlx5_destroy_flow_table(esw->fdb_table.offloads.fdb);
593 esw_destroy_offloads_fast_fdb_table(esw);
596 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
598 struct mlx5_flow_table_attr ft_attr = {};
599 struct mlx5_core_dev *dev = esw->dev;
600 struct mlx5_flow_table *ft_offloads;
601 struct mlx5_flow_namespace *ns;
604 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
606 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
610 ft_attr.max_fte = dev->priv.sriov.num_vfs + 2;
612 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
613 if (IS_ERR(ft_offloads)) {
614 err = PTR_ERR(ft_offloads);
615 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
619 esw->offloads.ft_offloads = ft_offloads;
623 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
625 struct mlx5_esw_offload *offloads = &esw->offloads;
627 mlx5_destroy_flow_table(offloads->ft_offloads);
630 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
632 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
633 struct mlx5_flow_group *g;
634 struct mlx5_priv *priv = &esw->dev->priv;
636 void *match_criteria, *misc;
638 int nvports = priv->sriov.num_vfs + 2;
640 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
644 /* create vport rx group */
645 memset(flow_group_in, 0, inlen);
646 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
647 MLX5_MATCH_MISC_PARAMETERS);
649 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
650 misc = MLX5_ADDR_OF(fte_match_param, match_criteria, misc_parameters);
651 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
653 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
654 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
656 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
660 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
664 esw->offloads.vport_rx_group = g;
666 kfree(flow_group_in);
670 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
672 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
675 struct mlx5_flow_handle *
676 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
678 struct mlx5_flow_act flow_act = {0};
679 struct mlx5_flow_destination dest = {};
680 struct mlx5_flow_handle *flow_rule;
681 struct mlx5_flow_spec *spec;
684 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
686 flow_rule = ERR_PTR(-ENOMEM);
690 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
691 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
693 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
694 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
696 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
697 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
700 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
701 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
702 &flow_act, &dest, 1);
703 if (IS_ERR(flow_rule)) {
704 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
713 static int esw_offloads_start(struct mlx5_eswitch *esw)
715 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
717 if (esw->mode != SRIOV_LEGACY) {
718 esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
722 mlx5_eswitch_disable_sriov(esw);
723 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
725 esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
726 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
728 esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err1);
730 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
731 if (mlx5_eswitch_inline_mode_get(esw,
733 &esw->offloads.inline_mode)) {
734 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
735 esw_warn(esw->dev, "Inline mode is different between vports\n");
741 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
743 kfree(esw->offloads.vport_reps);
746 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
748 int total_vfs = MLX5_TOTAL_VPORTS(esw->dev);
749 struct mlx5_core_dev *dev = esw->dev;
750 struct mlx5_esw_offload *offloads;
751 struct mlx5_eswitch_rep *rep;
755 esw->offloads.vport_reps = kcalloc(total_vfs,
756 sizeof(struct mlx5_eswitch_rep),
758 if (!esw->offloads.vport_reps)
761 offloads = &esw->offloads;
762 mlx5_query_nic_vport_mac_address(dev, 0, hw_id);
764 for (vport = 0; vport < total_vfs; vport++) {
765 rep = &offloads->vport_reps[vport];
768 ether_addr_copy(rep->hw_id, hw_id);
771 offloads->vport_reps[0].vport = FDB_UPLINK_VPORT;
776 static void esw_offloads_unload_reps_type(struct mlx5_eswitch *esw, int nvports,
779 struct mlx5_eswitch_rep *rep;
782 for (vport = nvports - 1; vport >= 0; vport--) {
783 rep = &esw->offloads.vport_reps[vport];
784 if (!rep->rep_if[rep_type].valid)
787 rep->rep_if[rep_type].unload(rep);
791 static void esw_offloads_unload_reps(struct mlx5_eswitch *esw, int nvports)
793 u8 rep_type = NUM_REP_TYPES;
795 while (rep_type-- > 0)
796 esw_offloads_unload_reps_type(esw, nvports, rep_type);
799 static int esw_offloads_load_reps_type(struct mlx5_eswitch *esw, int nvports,
802 struct mlx5_eswitch_rep *rep;
806 for (vport = 0; vport < nvports; vport++) {
807 rep = &esw->offloads.vport_reps[vport];
808 if (!rep->rep_if[rep_type].valid)
811 err = rep->rep_if[rep_type].load(esw->dev, rep);
819 esw_offloads_unload_reps_type(esw, vport, rep_type);
823 static int esw_offloads_load_reps(struct mlx5_eswitch *esw, int nvports)
828 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
829 err = esw_offloads_load_reps_type(esw, nvports, rep_type);
837 while (rep_type-- > 0)
838 esw_offloads_unload_reps_type(esw, nvports, rep_type);
842 int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
846 err = esw_create_offloads_fdb_tables(esw, nvports);
850 err = esw_create_offloads_table(esw);
854 err = esw_create_vport_rx_group(esw);
858 err = esw_offloads_load_reps(esw, nvports);
865 esw_destroy_vport_rx_group(esw);
868 esw_destroy_offloads_table(esw);
871 esw_destroy_offloads_fdb_tables(esw);
876 static int esw_offloads_stop(struct mlx5_eswitch *esw)
878 int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
880 mlx5_eswitch_disable_sriov(esw);
881 err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
883 esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
884 err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
886 esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
889 /* enable back PF RoCE */
890 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
895 void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
897 esw_offloads_unload_reps(esw, nvports);
898 esw_destroy_vport_rx_group(esw);
899 esw_destroy_offloads_table(esw);
900 esw_destroy_offloads_fdb_tables(esw);
903 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
906 case DEVLINK_ESWITCH_MODE_LEGACY:
907 *mlx5_mode = SRIOV_LEGACY;
909 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
910 *mlx5_mode = SRIOV_OFFLOADS;
919 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
923 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
926 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
935 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
938 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
939 *mlx5_mode = MLX5_INLINE_MODE_NONE;
941 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
942 *mlx5_mode = MLX5_INLINE_MODE_L2;
944 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
945 *mlx5_mode = MLX5_INLINE_MODE_IP;
947 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
948 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
957 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
960 case MLX5_INLINE_MODE_NONE:
961 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
963 case MLX5_INLINE_MODE_L2:
964 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
966 case MLX5_INLINE_MODE_IP:
967 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
969 case MLX5_INLINE_MODE_TCP_UDP:
970 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
979 static int mlx5_devlink_eswitch_check(struct devlink *devlink)
981 struct mlx5_core_dev *dev = devlink_priv(devlink);
983 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
986 if (!MLX5_CAP_GEN(dev, vport_group_manager))
989 if (dev->priv.eswitch->mode == SRIOV_NONE)
995 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
997 struct mlx5_core_dev *dev = devlink_priv(devlink);
998 u16 cur_mlx5_mode, mlx5_mode = 0;
1001 err = mlx5_devlink_eswitch_check(devlink);
1005 cur_mlx5_mode = dev->priv.eswitch->mode;
1007 if (esw_mode_from_devlink(mode, &mlx5_mode))
1010 if (cur_mlx5_mode == mlx5_mode)
1013 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1014 return esw_offloads_start(dev->priv.eswitch);
1015 else if (mode == DEVLINK_ESWITCH_MODE_LEGACY)
1016 return esw_offloads_stop(dev->priv.eswitch);
1021 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1023 struct mlx5_core_dev *dev = devlink_priv(devlink);
1026 err = mlx5_devlink_eswitch_check(devlink);
1030 return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
1033 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode)
1035 struct mlx5_core_dev *dev = devlink_priv(devlink);
1036 struct mlx5_eswitch *esw = dev->priv.eswitch;
1040 err = mlx5_devlink_eswitch_check(devlink);
1044 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1045 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1046 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE)
1049 case MLX5_CAP_INLINE_MODE_L2:
1050 esw_warn(dev, "Inline mode can't be set\n");
1052 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1056 if (esw->offloads.num_flows > 0) {
1057 esw_warn(dev, "Can't set inline mode when flows are configured\n");
1061 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
1065 for (vport = 1; vport < esw->enabled_vports; vport++) {
1066 err = mlx5_modify_nic_vport_min_inline(dev, vport, mlx5_mode);
1068 esw_warn(dev, "Failed to set min inline on vport %d\n",
1070 goto revert_inline_mode;
1074 esw->offloads.inline_mode = mlx5_mode;
1079 mlx5_modify_nic_vport_min_inline(dev,
1081 esw->offloads.inline_mode);
1086 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
1088 struct mlx5_core_dev *dev = devlink_priv(devlink);
1089 struct mlx5_eswitch *esw = dev->priv.eswitch;
1092 err = mlx5_devlink_eswitch_check(devlink);
1096 return esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
1099 int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, int nvfs, u8 *mode)
1101 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
1102 struct mlx5_core_dev *dev = esw->dev;
1105 if (!MLX5_CAP_GEN(dev, vport_group_manager))
1108 if (esw->mode == SRIOV_NONE)
1111 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
1112 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
1113 mlx5_mode = MLX5_INLINE_MODE_NONE;
1115 case MLX5_CAP_INLINE_MODE_L2:
1116 mlx5_mode = MLX5_INLINE_MODE_L2;
1118 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
1123 for (vport = 1; vport <= nvfs; vport++) {
1124 mlx5_query_nic_vport_min_inline(dev, vport, &mlx5_mode);
1125 if (vport > 1 && prev_mlx5_mode != mlx5_mode)
1127 prev_mlx5_mode = mlx5_mode;
1135 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, u8 encap)
1137 struct mlx5_core_dev *dev = devlink_priv(devlink);
1138 struct mlx5_eswitch *esw = dev->priv.eswitch;
1141 err = mlx5_devlink_eswitch_check(devlink);
1145 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
1146 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) ||
1147 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap)))
1150 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC)
1153 if (esw->mode == SRIOV_LEGACY) {
1154 esw->offloads.encap = encap;
1158 if (esw->offloads.encap == encap)
1161 if (esw->offloads.num_flows > 0) {
1162 esw_warn(dev, "Can't set encapsulation when flows are configured\n");
1166 esw_destroy_offloads_fast_fdb_table(esw);
1168 esw->offloads.encap = encap;
1169 err = esw_create_offloads_fast_fdb_table(esw);
1171 esw_warn(esw->dev, "Failed re-creating fast FDB table, err %d\n", err);
1172 esw->offloads.encap = !encap;
1173 (void)esw_create_offloads_fast_fdb_table(esw);
1178 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, u8 *encap)
1180 struct mlx5_core_dev *dev = devlink_priv(devlink);
1181 struct mlx5_eswitch *esw = dev->priv.eswitch;
1184 err = mlx5_devlink_eswitch_check(devlink);
1188 *encap = esw->offloads.encap;
1192 void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
1194 struct mlx5_eswitch_rep_if *__rep_if,
1197 struct mlx5_esw_offload *offloads = &esw->offloads;
1198 struct mlx5_eswitch_rep_if *rep_if;
1200 rep_if = &offloads->vport_reps[vport_index].rep_if[rep_type];
1202 rep_if->load = __rep_if->load;
1203 rep_if->unload = __rep_if->unload;
1204 rep_if->get_proto_dev = __rep_if->get_proto_dev;
1205 rep_if->priv = __rep_if->priv;
1207 rep_if->valid = true;
1209 EXPORT_SYMBOL(mlx5_eswitch_register_vport_rep);
1211 void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw,
1212 int vport_index, u8 rep_type)
1214 struct mlx5_esw_offload *offloads = &esw->offloads;
1215 struct mlx5_eswitch_rep *rep;
1217 rep = &offloads->vport_reps[vport_index];
1219 if (esw->mode == SRIOV_OFFLOADS && esw->vports[vport_index].enabled)
1220 rep->rep_if[rep_type].unload(rep);
1222 rep->rep_if[rep_type].valid = false;
1224 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_rep);
1226 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
1228 #define UPLINK_REP_INDEX 0
1229 struct mlx5_esw_offload *offloads = &esw->offloads;
1230 struct mlx5_eswitch_rep *rep;
1232 rep = &offloads->vport_reps[UPLINK_REP_INDEX];
1233 return rep->rep_if[rep_type].priv;
1236 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
1240 struct mlx5_esw_offload *offloads = &esw->offloads;
1241 struct mlx5_eswitch_rep *rep;
1243 if (vport == FDB_UPLINK_VPORT)
1244 vport = UPLINK_REP_INDEX;
1246 rep = &offloads->vport_reps[vport];
1248 if (rep->rep_if[rep_type].valid &&
1249 rep->rep_if[rep_type].get_proto_dev)
1250 return rep->rep_if[rep_type].get_proto_dev(rep);
1253 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
1255 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
1257 return mlx5_eswitch_get_proto_dev(esw, UPLINK_REP_INDEX, rep_type);
1259 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
1261 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
1264 return &esw->offloads.vport_reps[vport];
1266 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);