2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/idr.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/mlx5_ifc.h>
37 #include <linux/mlx5/vport.h>
38 #include <linux/mlx5/fs.h>
39 #include "mlx5_core.h"
41 #include "esw/indir_table.h"
42 #include "esw/acl/ofld.h"
46 #include "lib/devcom.h"
48 #include "lib/fs_chains.h"
50 #include "en/mapping.h"
53 #define mlx5_esw_for_each_rep(esw, i, rep) \
54 xa_for_each(&((esw)->offloads.vport_reps), i, rep)
56 #define mlx5_esw_for_each_sf_rep(esw, i, rep) \
57 xa_for_each_marked(&((esw)->offloads.vport_reps), i, rep, MLX5_ESW_VPT_SF)
59 #define mlx5_esw_for_each_vf_rep(esw, index, rep) \
60 mlx5_esw_for_each_entry_marked(&((esw)->offloads.vport_reps), index, \
61 rep, (esw)->esw_funcs.num_vfs, MLX5_ESW_VPT_VF)
63 /* There are two match-all miss flows, one for unicast dst mac and
66 #define MLX5_ESW_MISS_FLOWS (2)
67 #define UPLINK_REP_INDEX 0
69 #define MLX5_ESW_VPORT_TBL_SIZE 128
70 #define MLX5_ESW_VPORT_TBL_NUM_GROUPS 4
72 static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = {
73 .max_fte = MLX5_ESW_VPORT_TBL_SIZE,
74 .max_num_groups = MLX5_ESW_VPORT_TBL_NUM_GROUPS,
78 static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
81 return xa_load(&esw->offloads.vport_reps, vport_num);
85 mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
86 struct mlx5_flow_spec *spec,
87 struct mlx5_esw_flow_attr *attr)
89 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) || !attr || !attr->in_rep)
93 spec->flow_context.flow_source = mlx5e_tc_int_port_get_flow_source(attr->int_port);
98 spec->flow_context.flow_source = (attr->in_rep->vport == MLX5_VPORT_UPLINK) ?
99 MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK :
100 MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
103 /* Actually only the upper 16 bits of reg c0 need to be cleared, but the lower 16 bits
104 * are not needed as well in the following process. So clear them all for simplicity.
107 mlx5_eswitch_clear_rule_source_port(struct mlx5_eswitch *esw, struct mlx5_flow_spec *spec)
109 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
112 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
113 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
115 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
116 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, 0);
118 if (!memchr_inv(misc2, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc2)))
119 spec->match_criteria_enable &= ~MLX5_MATCH_MISC_PARAMETERS_2;
124 mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
125 struct mlx5_flow_spec *spec,
126 struct mlx5_flow_attr *attr,
127 struct mlx5_eswitch *src_esw,
130 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
135 /* Use metadata matching because vport is not represented by single
136 * VHCA in dual-port RoCE mode, and matching on source vport may fail.
138 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
139 if (mlx5_esw_indir_table_decap_vport(attr))
140 vport = mlx5_esw_indir_table_decap_vport(attr);
142 if (esw_attr->int_port)
144 mlx5e_tc_int_port_get_metadata_for_match(esw_attr->int_port);
147 mlx5_eswitch_get_vport_metadata_for_match(src_esw, vport);
149 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
150 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0, metadata);
152 misc2 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
153 MLX5_SET(fte_match_set_misc2, misc2, metadata_reg_c_0,
154 mlx5_eswitch_get_vport_metadata_mask());
156 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
158 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
159 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
161 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
162 MLX5_SET(fte_match_set_misc, misc,
163 source_eswitch_owner_vhca_id,
164 MLX5_CAP_GEN(src_esw->dev, vhca_id));
166 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
167 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
168 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
169 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
170 source_eswitch_owner_vhca_id);
172 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
177 esw_setup_decap_indir(struct mlx5_eswitch *esw,
178 struct mlx5_flow_attr *attr,
179 struct mlx5_flow_spec *spec)
181 struct mlx5_flow_table *ft;
183 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
186 ft = mlx5_esw_indir_table_get(esw, attr, spec,
187 mlx5_esw_indir_table_decap_vport(attr), true);
188 return PTR_ERR_OR_ZERO(ft);
192 esw_cleanup_decap_indir(struct mlx5_eswitch *esw,
193 struct mlx5_flow_attr *attr)
195 if (mlx5_esw_indir_table_decap_vport(attr))
196 mlx5_esw_indir_table_put(esw, attr,
197 mlx5_esw_indir_table_decap_vport(attr),
202 esw_setup_sampler_dest(struct mlx5_flow_destination *dest,
203 struct mlx5_flow_act *flow_act,
204 struct mlx5_flow_attr *attr,
207 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
208 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER;
209 dest[i].sampler_id = attr->sample_attr->sampler_id;
215 esw_setup_ft_dest(struct mlx5_flow_destination *dest,
216 struct mlx5_flow_act *flow_act,
217 struct mlx5_eswitch *esw,
218 struct mlx5_flow_attr *attr,
219 struct mlx5_flow_spec *spec,
222 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
223 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
224 dest[i].ft = attr->dest_ft;
226 if (mlx5_esw_indir_table_decap_vport(attr))
227 return esw_setup_decap_indir(esw, attr, spec);
232 esw_setup_slow_path_dest(struct mlx5_flow_destination *dest,
233 struct mlx5_flow_act *flow_act,
234 struct mlx5_fs_chains *chains,
237 if (mlx5_chains_ignore_flow_level_supported(chains))
238 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
239 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
240 dest[i].ft = mlx5_chains_get_tc_end_ft(chains);
244 esw_setup_chain_dest(struct mlx5_flow_destination *dest,
245 struct mlx5_flow_act *flow_act,
246 struct mlx5_fs_chains *chains,
247 u32 chain, u32 prio, u32 level,
250 struct mlx5_flow_table *ft;
252 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
253 ft = mlx5_chains_get_table(chains, chain, prio, level);
257 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
262 static void esw_put_dest_tables_loop(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr,
265 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
266 struct mlx5_fs_chains *chains = esw_chains(esw);
269 for (i = from; i < to; i++)
270 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
271 mlx5_chains_put_table(chains, 0, 1, 0);
272 else if (mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
273 esw_attr->dests[i].mdev))
274 mlx5_esw_indir_table_put(esw, attr, esw_attr->dests[i].rep->vport,
279 esw_is_chain_src_port_rewrite(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr)
283 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
284 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
290 esw_setup_chain_src_port_rewrite(struct mlx5_flow_destination *dest,
291 struct mlx5_flow_act *flow_act,
292 struct mlx5_eswitch *esw,
293 struct mlx5_fs_chains *chains,
294 struct mlx5_flow_attr *attr,
297 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
300 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
303 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
304 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain, 1, 0, *i);
306 goto err_setup_chain;
308 if (esw_attr->dests[j].pkt_reformat) {
309 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
310 flow_act->pkt_reformat = esw_attr->dests[j].pkt_reformat;
316 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
320 static void esw_cleanup_chain_src_port_rewrite(struct mlx5_eswitch *esw,
321 struct mlx5_flow_attr *attr)
323 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
325 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
329 esw_is_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
331 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
334 for (i = esw_attr->split_count; i < esw_attr->out_count; i++)
335 if (esw_attr->dests[i].rep &&
336 mlx5_esw_indir_table_needed(esw, attr, esw_attr->dests[i].rep->vport,
337 esw_attr->dests[i].mdev))
343 esw_setup_indir_table(struct mlx5_flow_destination *dest,
344 struct mlx5_flow_act *flow_act,
345 struct mlx5_eswitch *esw,
346 struct mlx5_flow_attr *attr,
347 struct mlx5_flow_spec *spec,
348 bool ignore_flow_lvl,
351 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
354 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_SRC_REWRITE))
357 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, (*i)++) {
359 flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
360 dest[*i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
362 dest[*i].ft = mlx5_esw_indir_table_get(esw, attr, spec,
363 esw_attr->dests[j].rep->vport, false);
364 if (IS_ERR(dest[*i].ft)) {
365 err = PTR_ERR(dest[*i].ft);
366 goto err_indir_tbl_get;
370 if (mlx5_esw_indir_table_decap_vport(attr)) {
371 err = esw_setup_decap_indir(esw, attr, spec);
373 goto err_indir_tbl_get;
379 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, j);
383 static void esw_cleanup_indir_table(struct mlx5_eswitch *esw, struct mlx5_flow_attr *attr)
385 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
387 esw_put_dest_tables_loop(esw, attr, esw_attr->split_count, esw_attr->out_count);
388 esw_cleanup_decap_indir(esw, attr);
392 esw_cleanup_chain_dest(struct mlx5_fs_chains *chains, u32 chain, u32 prio, u32 level)
394 mlx5_chains_put_table(chains, chain, prio, level);
398 esw_setup_vport_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
399 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
400 int attr_idx, int dest_idx, bool pkt_reformat)
402 dest[dest_idx].type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
403 dest[dest_idx].vport.num = esw_attr->dests[attr_idx].rep->vport;
404 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
405 dest[dest_idx].vport.vhca_id =
406 MLX5_CAP_GEN(esw_attr->dests[attr_idx].mdev, vhca_id);
407 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
409 if (esw_attr->dests[attr_idx].flags & MLX5_ESW_DEST_ENCAP) {
411 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
412 flow_act->pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
414 dest[dest_idx].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID;
415 dest[dest_idx].vport.pkt_reformat = esw_attr->dests[attr_idx].pkt_reformat;
420 esw_setup_vport_dests(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act,
421 struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *esw_attr,
426 for (j = esw_attr->split_count; j < esw_attr->out_count; j++, i++)
427 esw_setup_vport_dest(dest, flow_act, esw, esw_attr, j, i, true);
432 esw_src_port_rewrite_supported(struct mlx5_eswitch *esw)
434 return MLX5_CAP_GEN(esw->dev, reg_c_preserve) &&
435 mlx5_eswitch_vport_match_metadata_enabled(esw) &&
436 MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level);
440 esw_setup_dests(struct mlx5_flow_destination *dest,
441 struct mlx5_flow_act *flow_act,
442 struct mlx5_eswitch *esw,
443 struct mlx5_flow_attr *attr,
444 struct mlx5_flow_spec *spec,
447 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
448 struct mlx5_fs_chains *chains = esw_chains(esw);
451 if (!mlx5_eswitch_termtbl_required(esw, attr, flow_act, spec) &&
452 esw_src_port_rewrite_supported(esw))
453 attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE;
455 if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) {
456 esw_setup_sampler_dest(dest, flow_act, attr, *i);
458 } else if (attr->dest_ft) {
459 esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i);
461 } else if (mlx5_esw_attr_flags_skip(attr->flags)) {
462 esw_setup_slow_path_dest(dest, flow_act, chains, *i);
464 } else if (attr->dest_chain) {
465 err = esw_setup_chain_dest(dest, flow_act, chains, attr->dest_chain,
468 } else if (esw_is_indir_table(esw, attr)) {
469 err = esw_setup_indir_table(dest, flow_act, esw, attr, spec, true, i);
470 } else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) {
471 err = esw_setup_chain_src_port_rewrite(dest, flow_act, esw, chains, attr, i);
473 *i = esw_setup_vport_dests(dest, flow_act, esw, esw_attr, *i);
480 esw_cleanup_dests(struct mlx5_eswitch *esw,
481 struct mlx5_flow_attr *attr)
483 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
484 struct mlx5_fs_chains *chains = esw_chains(esw);
487 esw_cleanup_decap_indir(esw, attr);
488 } else if (!mlx5_esw_attr_flags_skip(attr->flags)) {
489 if (attr->dest_chain)
490 esw_cleanup_chain_dest(chains, attr->dest_chain, 1, 0);
491 else if (esw_is_indir_table(esw, attr))
492 esw_cleanup_indir_table(esw, attr);
493 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
494 esw_cleanup_chain_src_port_rewrite(esw, attr);
498 struct mlx5_flow_handle *
499 mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
500 struct mlx5_flow_spec *spec,
501 struct mlx5_flow_attr *attr)
503 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
504 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
505 struct mlx5_fs_chains *chains = esw_chains(esw);
506 bool split = !!(esw_attr->split_count);
507 struct mlx5_vport_tbl_attr fwd_attr;
508 struct mlx5_flow_destination *dest;
509 struct mlx5_flow_handle *rule;
510 struct mlx5_flow_table *fdb;
513 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
514 return ERR_PTR(-EOPNOTSUPP);
516 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
518 return ERR_PTR(-ENOMEM);
520 flow_act.action = attr->action;
521 /* if per flow vlan pop/push is emulated, don't set that into the firmware */
522 if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
523 flow_act.action &= ~(MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH |
524 MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
525 else if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH) {
526 flow_act.vlan[0].ethtype = ntohs(esw_attr->vlan_proto[0]);
527 flow_act.vlan[0].vid = esw_attr->vlan_vid[0];
528 flow_act.vlan[0].prio = esw_attr->vlan_prio[0];
529 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2) {
530 flow_act.vlan[1].ethtype = ntohs(esw_attr->vlan_proto[1]);
531 flow_act.vlan[1].vid = esw_attr->vlan_vid[1];
532 flow_act.vlan[1].prio = esw_attr->vlan_prio[1];
536 mlx5_eswitch_set_rule_flow_source(esw, spec, esw_attr);
538 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
541 err = esw_setup_dests(dest, &flow_act, esw, attr, spec, &i);
544 goto err_create_goto_table;
548 if (esw_attr->decap_pkt_reformat)
549 flow_act.pkt_reformat = esw_attr->decap_pkt_reformat;
551 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
552 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
553 dest[i].counter_id = mlx5_fc_id(attr->counter);
557 if (attr->outer_match_level != MLX5_MATCH_NONE)
558 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
559 if (attr->inner_match_level != MLX5_MATCH_NONE)
560 spec->match_criteria_enable |= MLX5_MATCH_INNER_HEADERS;
562 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
563 flow_act.modify_hdr = attr->modify_hdr;
566 fwd_attr.chain = attr->chain;
567 fwd_attr.prio = attr->prio;
568 fwd_attr.vport = esw_attr->in_rep->vport;
569 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
571 fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
573 if (attr->chain || attr->prio)
574 fdb = mlx5_chains_get_table(chains, attr->chain,
579 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_NO_IN_PORT))
580 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
581 esw_attr->in_mdev->priv.eswitch,
582 esw_attr->in_rep->vport);
585 rule = ERR_CAST(fdb);
589 if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
590 rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, esw_attr,
593 rule = mlx5_add_flow_rules(fdb, spec, &flow_act, dest, i);
597 atomic64_inc(&esw->offloads.num_flows);
604 mlx5_esw_vporttbl_put(esw, &fwd_attr);
605 else if (attr->chain || attr->prio)
606 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
608 esw_cleanup_dests(esw, attr);
609 err_create_goto_table:
614 struct mlx5_flow_handle *
615 mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
616 struct mlx5_flow_spec *spec,
617 struct mlx5_flow_attr *attr)
619 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
620 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
621 struct mlx5_fs_chains *chains = esw_chains(esw);
622 struct mlx5_vport_tbl_attr fwd_attr;
623 struct mlx5_flow_destination *dest;
624 struct mlx5_flow_table *fast_fdb;
625 struct mlx5_flow_table *fwd_fdb;
626 struct mlx5_flow_handle *rule;
629 dest = kcalloc(MLX5_MAX_FLOW_FWD_VPORTS + 1, sizeof(*dest), GFP_KERNEL);
631 return ERR_PTR(-ENOMEM);
633 fast_fdb = mlx5_chains_get_table(chains, attr->chain, attr->prio, 0);
634 if (IS_ERR(fast_fdb)) {
635 rule = ERR_CAST(fast_fdb);
639 fwd_attr.chain = attr->chain;
640 fwd_attr.prio = attr->prio;
641 fwd_attr.vport = esw_attr->in_rep->vport;
642 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
643 fwd_fdb = mlx5_esw_vporttbl_get(esw, &fwd_attr);
644 if (IS_ERR(fwd_fdb)) {
645 rule = ERR_CAST(fwd_fdb);
649 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
650 for (i = 0; i < esw_attr->split_count; i++) {
651 if (esw_is_indir_table(esw, attr))
652 err = esw_setup_indir_table(dest, &flow_act, esw, attr, spec, false, &i);
653 else if (esw_is_chain_src_port_rewrite(esw, esw_attr))
654 err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr,
657 esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false);
661 goto err_chain_src_rewrite;
664 dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
665 dest[i].ft = fwd_fdb;
668 mlx5_eswitch_set_rule_source_port(esw, spec, attr,
669 esw_attr->in_mdev->priv.eswitch,
670 esw_attr->in_rep->vport);
672 if (attr->outer_match_level != MLX5_MATCH_NONE)
673 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
675 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
676 rule = mlx5_add_flow_rules(fast_fdb, spec, &flow_act, dest, i);
679 i = esw_attr->split_count;
680 goto err_chain_src_rewrite;
683 atomic64_inc(&esw->offloads.num_flows);
687 err_chain_src_rewrite:
688 esw_put_dest_tables_loop(esw, attr, 0, i);
689 mlx5_esw_vporttbl_put(esw, &fwd_attr);
691 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
698 __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw,
699 struct mlx5_flow_handle *rule,
700 struct mlx5_flow_attr *attr,
703 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
704 struct mlx5_fs_chains *chains = esw_chains(esw);
705 bool split = (esw_attr->split_count > 0);
706 struct mlx5_vport_tbl_attr fwd_attr;
709 mlx5_del_flow_rules(rule);
711 if (!mlx5_esw_attr_flags_skip(attr->flags)) {
712 /* unref the term table */
713 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
714 if (esw_attr->dests[i].termtbl)
715 mlx5_eswitch_termtbl_put(esw, esw_attr->dests[i].termtbl);
719 atomic64_dec(&esw->offloads.num_flows);
721 if (fwd_rule || split) {
722 fwd_attr.chain = attr->chain;
723 fwd_attr.prio = attr->prio;
724 fwd_attr.vport = esw_attr->in_rep->vport;
725 fwd_attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
729 mlx5_esw_vporttbl_put(esw, &fwd_attr);
730 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
731 esw_put_dest_tables_loop(esw, attr, 0, esw_attr->split_count);
734 mlx5_esw_vporttbl_put(esw, &fwd_attr);
735 else if (attr->chain || attr->prio)
736 mlx5_chains_put_table(chains, attr->chain, attr->prio, 0);
737 esw_cleanup_dests(esw, attr);
742 mlx5_eswitch_del_offloaded_rule(struct mlx5_eswitch *esw,
743 struct mlx5_flow_handle *rule,
744 struct mlx5_flow_attr *attr)
746 __mlx5_eswitch_del_rule(esw, rule, attr, false);
750 mlx5_eswitch_del_fwd_rule(struct mlx5_eswitch *esw,
751 struct mlx5_flow_handle *rule,
752 struct mlx5_flow_attr *attr)
754 __mlx5_eswitch_del_rule(esw, rule, attr, true);
757 static int esw_set_global_vlan_pop(struct mlx5_eswitch *esw, u8 val)
759 struct mlx5_eswitch_rep *rep;
763 esw_debug(esw->dev, "%s applying global %s policy\n", __func__, val ? "pop" : "none");
764 mlx5_esw_for_each_host_func_vport(esw, i, rep, esw->esw_funcs.num_vfs) {
765 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
768 err = __mlx5_eswitch_set_vport_vlan(esw, rep->vport, 0, 0, val);
777 static struct mlx5_eswitch_rep *
778 esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop)
780 struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL;
782 in_rep = attr->in_rep;
783 out_rep = attr->dests[0].rep;
795 static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr,
796 bool push, bool pop, bool fwd)
798 struct mlx5_eswitch_rep *in_rep, *out_rep;
800 if ((push || pop) && !fwd)
803 in_rep = attr->in_rep;
804 out_rep = attr->dests[0].rep;
806 if (push && in_rep->vport == MLX5_VPORT_UPLINK)
809 if (pop && out_rep->vport == MLX5_VPORT_UPLINK)
812 /* vport has vlan push configured, can't offload VF --> wire rules w.o it */
813 if (!push && !pop && fwd)
814 if (in_rep->vlan && out_rep->vport == MLX5_VPORT_UPLINK)
817 /* protects against (1) setting rules with different vlans to push and
818 * (2) setting rules w.o vlans (attr->vlan = 0) && w. vlans to push (!= 0)
820 if (push && in_rep->vlan_refcount && (in_rep->vlan != attr->vlan_vid[0]))
829 int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw,
830 struct mlx5_flow_attr *attr)
832 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
833 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
834 struct mlx5_eswitch_rep *vport = NULL;
838 /* nop if we're on the vlan push/pop non emulation mode */
839 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
842 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
843 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
844 fwd = !!((attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) &&
847 mutex_lock(&esw->state_lock);
849 err = esw_add_vlan_action_check(esw_attr, push, pop, fwd);
853 attr->flags &= ~MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
855 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
857 if (!push && !pop && fwd) {
858 /* tracks VF --> wire rules without vlan push action */
859 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK) {
860 vport->vlan_refcount++;
861 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
870 if (!(offloads->vlan_push_pop_refcount)) {
871 /* it's the 1st vlan rule, apply global vlan pop policy */
872 err = esw_set_global_vlan_pop(esw, SET_VLAN_STRIP);
876 offloads->vlan_push_pop_refcount++;
879 if (vport->vlan_refcount)
882 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport, esw_attr->vlan_vid[0],
883 0, SET_VLAN_INSERT | SET_VLAN_STRIP);
886 vport->vlan = esw_attr->vlan_vid[0];
888 vport->vlan_refcount++;
892 attr->flags |= MLX5_ESW_ATTR_FLAG_VLAN_HANDLED;
894 mutex_unlock(&esw->state_lock);
898 int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw,
899 struct mlx5_flow_attr *attr)
901 struct offloads_fdb *offloads = &esw->fdb_table.offloads;
902 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
903 struct mlx5_eswitch_rep *vport = NULL;
907 /* nop if we're on the vlan push/pop non emulation mode */
908 if (mlx5_eswitch_vlan_actions_supported(esw->dev, 1))
911 if (!(attr->flags & MLX5_ESW_ATTR_FLAG_VLAN_HANDLED))
914 push = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH);
915 pop = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP);
916 fwd = !!(attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
918 mutex_lock(&esw->state_lock);
920 vport = esw_vlan_action_get_vport(esw_attr, push, pop);
922 if (!push && !pop && fwd) {
923 /* tracks VF --> wire rules without vlan push action */
924 if (esw_attr->dests[0].rep->vport == MLX5_VPORT_UPLINK)
925 vport->vlan_refcount--;
931 vport->vlan_refcount--;
932 if (vport->vlan_refcount)
933 goto skip_unset_push;
936 err = __mlx5_eswitch_set_vport_vlan(esw, vport->vport,
937 0, 0, SET_VLAN_STRIP);
943 offloads->vlan_push_pop_refcount--;
944 if (offloads->vlan_push_pop_refcount)
947 /* no more vlan rules, stop global vlan pop policy */
948 err = esw_set_global_vlan_pop(esw, 0);
951 mutex_unlock(&esw->state_lock);
955 struct mlx5_flow_handle *
956 mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
957 struct mlx5_eswitch *from_esw,
958 struct mlx5_eswitch_rep *rep,
961 struct mlx5_flow_act flow_act = {0};
962 struct mlx5_flow_destination dest = {};
963 struct mlx5_flow_handle *flow_rule;
964 struct mlx5_flow_spec *spec;
967 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
969 flow_rule = ERR_PTR(-ENOMEM);
973 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
974 MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn);
975 /* source vport is the esw manager */
976 MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport);
977 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
978 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
979 MLX5_CAP_GEN(from_esw->dev, vhca_id));
981 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
982 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn);
983 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
984 if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch))
985 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
986 source_eswitch_owner_vhca_id);
988 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
989 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
990 dest.vport.num = rep->vport;
991 dest.vport.vhca_id = MLX5_CAP_GEN(rep->esw->dev, vhca_id);
992 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
993 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
995 if (rep->vport == MLX5_VPORT_UPLINK)
996 spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
998 flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb,
999 spec, &flow_act, &dest, 1);
1000 if (IS_ERR(flow_rule))
1001 esw_warn(on_esw->dev, "FDB: Failed to add send to vport rule err %ld\n",
1002 PTR_ERR(flow_rule));
1007 EXPORT_SYMBOL(mlx5_eswitch_add_send_to_vport_rule);
1009 void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule)
1011 mlx5_del_flow_rules(rule);
1014 static void mlx5_eswitch_del_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
1016 struct mlx5_flow_handle **flows = esw->fdb_table.offloads.send_to_vport_meta_rules;
1017 int i = 0, num_vfs = esw->esw_funcs.num_vfs;
1019 if (!num_vfs || !flows)
1022 for (i = 0; i < num_vfs; i++)
1023 mlx5_del_flow_rules(flows[i]);
1029 mlx5_eswitch_add_send_to_vport_meta_rules(struct mlx5_eswitch *esw)
1031 struct mlx5_flow_destination dest = {};
1032 struct mlx5_flow_act flow_act = {0};
1033 int num_vfs, rule_idx = 0, err = 0;
1034 struct mlx5_flow_handle *flow_rule;
1035 struct mlx5_flow_handle **flows;
1036 struct mlx5_flow_spec *spec;
1037 struct mlx5_vport *vport;
1041 num_vfs = esw->esw_funcs.num_vfs;
1042 flows = kvcalloc(num_vfs, sizeof(*flows), GFP_KERNEL);
1046 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1052 MLX5_SET(fte_match_param, spec->match_criteria,
1053 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
1054 MLX5_SET(fte_match_param, spec->match_criteria,
1055 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1056 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_1,
1057 ESW_TUN_SLOW_TABLE_GOTO_VPORT_MARK);
1059 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1060 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1061 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1063 mlx5_esw_for_each_vf_vport(esw, i, vport, num_vfs) {
1064 vport_num = vport->vport;
1065 MLX5_SET(fte_match_param, spec->match_value, misc_parameters_2.metadata_reg_c_0,
1066 mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num));
1067 dest.vport.num = vport_num;
1069 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1070 spec, &flow_act, &dest, 1);
1071 if (IS_ERR(flow_rule)) {
1072 err = PTR_ERR(flow_rule);
1073 esw_warn(esw->dev, "FDB: Failed to add send to vport meta rule idx %d, err %ld\n",
1074 rule_idx, PTR_ERR(flow_rule));
1077 flows[rule_idx++] = flow_rule;
1080 esw->fdb_table.offloads.send_to_vport_meta_rules = flows;
1085 while (--rule_idx >= 0)
1086 mlx5_del_flow_rules(flows[rule_idx]);
1093 static bool mlx5_eswitch_reg_c1_loopback_supported(struct mlx5_eswitch *esw)
1095 return MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
1096 MLX5_FDB_TO_VPORT_REG_C_1;
1099 static int esw_set_passing_vport_metadata(struct mlx5_eswitch *esw, bool enable)
1101 u32 out[MLX5_ST_SZ_DW(query_esw_vport_context_out)] = {};
1102 u32 min[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
1103 u32 in[MLX5_ST_SZ_DW(query_esw_vport_context_in)] = {};
1107 if (!mlx5_eswitch_reg_c1_loopback_supported(esw) &&
1108 !mlx5_eswitch_vport_match_metadata_enabled(esw))
1111 MLX5_SET(query_esw_vport_context_in, in, opcode,
1112 MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT);
1113 err = mlx5_cmd_exec_inout(esw->dev, query_esw_vport_context, in, out);
1117 curr = MLX5_GET(query_esw_vport_context_out, out,
1118 esw_vport_context.fdb_to_vport_reg_c_id);
1119 wanted = MLX5_FDB_TO_VPORT_REG_C_0;
1120 if (mlx5_eswitch_reg_c1_loopback_supported(esw))
1121 wanted |= MLX5_FDB_TO_VPORT_REG_C_1;
1128 MLX5_SET(modify_esw_vport_context_in, min,
1129 esw_vport_context.fdb_to_vport_reg_c_id, curr);
1130 MLX5_SET(modify_esw_vport_context_in, min,
1131 field_select.fdb_to_vport_reg_c_id, 1);
1133 err = mlx5_eswitch_modify_esw_vport_context(esw->dev, 0, false, min);
1135 if (enable && (curr & MLX5_FDB_TO_VPORT_REG_C_1))
1136 esw->flags |= MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1138 esw->flags &= ~MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED;
1144 static void peer_miss_rules_setup(struct mlx5_eswitch *esw,
1145 struct mlx5_core_dev *peer_dev,
1146 struct mlx5_flow_spec *spec,
1147 struct mlx5_flow_destination *dest)
1151 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1152 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1154 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1155 mlx5_eswitch_get_vport_metadata_mask());
1157 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1159 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1162 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
1163 MLX5_CAP_GEN(peer_dev, vhca_id));
1165 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1167 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1169 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1170 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
1171 source_eswitch_owner_vhca_id);
1174 dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1175 dest->vport.num = peer_dev->priv.eswitch->manager_vport;
1176 dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id);
1177 dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
1180 static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw,
1181 struct mlx5_eswitch *peer_esw,
1182 struct mlx5_flow_spec *spec,
1187 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1188 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1190 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1191 mlx5_eswitch_get_vport_metadata_for_match(peer_esw,
1194 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1196 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1200 static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw,
1201 struct mlx5_core_dev *peer_dev)
1203 struct mlx5_flow_destination dest = {};
1204 struct mlx5_flow_act flow_act = {0};
1205 struct mlx5_flow_handle **flows;
1206 /* total vports is the same for both e-switches */
1207 int nvports = esw->total_vports;
1208 struct mlx5_flow_handle *flow;
1209 struct mlx5_flow_spec *spec;
1210 struct mlx5_vport *vport;
1215 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1219 peer_miss_rules_setup(esw, peer_dev, spec, &dest);
1221 flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL);
1224 goto alloc_flows_err;
1227 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1228 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1231 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1232 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1233 esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch,
1234 spec, MLX5_VPORT_PF);
1236 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1237 spec, &flow_act, &dest, 1);
1239 err = PTR_ERR(flow);
1240 goto add_pf_flow_err;
1242 flows[vport->index] = flow;
1245 if (mlx5_ecpf_vport_exists(esw->dev)) {
1246 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1247 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF);
1248 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1249 spec, &flow_act, &dest, 1);
1251 err = PTR_ERR(flow);
1252 goto add_ecpf_flow_err;
1254 flows[vport->index] = flow;
1257 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1258 esw_set_peer_miss_rule_source_port(esw,
1259 peer_dev->priv.eswitch,
1260 spec, vport->vport);
1262 flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1263 spec, &flow_act, &dest, 1);
1265 err = PTR_ERR(flow);
1266 goto add_vf_flow_err;
1268 flows[vport->index] = flow;
1271 esw->fdb_table.offloads.peer_miss_rules = flows;
1277 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) {
1278 if (!flows[vport->index])
1280 mlx5_del_flow_rules(flows[vport->index]);
1282 if (mlx5_ecpf_vport_exists(esw->dev)) {
1283 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1284 mlx5_del_flow_rules(flows[vport->index]);
1287 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1288 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1289 mlx5_del_flow_rules(flows[vport->index]);
1292 esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err);
1299 static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw)
1301 struct mlx5_flow_handle **flows;
1302 struct mlx5_vport *vport;
1305 flows = esw->fdb_table.offloads.peer_miss_rules;
1307 mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev))
1308 mlx5_del_flow_rules(flows[vport->index]);
1310 if (mlx5_ecpf_vport_exists(esw->dev)) {
1311 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF);
1312 mlx5_del_flow_rules(flows[vport->index]);
1315 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1316 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF);
1317 mlx5_del_flow_rules(flows[vport->index]);
1322 static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
1324 struct mlx5_flow_act flow_act = {0};
1325 struct mlx5_flow_destination dest = {};
1326 struct mlx5_flow_handle *flow_rule = NULL;
1327 struct mlx5_flow_spec *spec;
1334 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1340 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
1341 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1343 dmac_c = MLX5_ADDR_OF(fte_match_param, headers_c,
1344 outer_headers.dmac_47_16);
1347 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
1348 dest.vport.num = esw->manager_vport;
1349 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1351 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1352 spec, &flow_act, &dest, 1);
1353 if (IS_ERR(flow_rule)) {
1354 err = PTR_ERR(flow_rule);
1355 esw_warn(esw->dev, "FDB: Failed to add unicast miss flow rule err %d\n", err);
1359 esw->fdb_table.offloads.miss_rule_uni = flow_rule;
1361 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1363 dmac_v = MLX5_ADDR_OF(fte_match_param, headers_v,
1364 outer_headers.dmac_47_16);
1366 flow_rule = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb,
1367 spec, &flow_act, &dest, 1);
1368 if (IS_ERR(flow_rule)) {
1369 err = PTR_ERR(flow_rule);
1370 esw_warn(esw->dev, "FDB: Failed to add multicast miss flow rule err %d\n", err);
1371 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1375 esw->fdb_table.offloads.miss_rule_multi = flow_rule;
1382 struct mlx5_flow_handle *
1383 esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag)
1385 struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, };
1386 struct mlx5_flow_table *ft = esw->offloads.ft_offloads_restore;
1387 struct mlx5_flow_context *flow_context;
1388 struct mlx5_flow_handle *flow_rule;
1389 struct mlx5_flow_destination dest;
1390 struct mlx5_flow_spec *spec;
1393 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
1394 return ERR_PTR(-EOPNOTSUPP);
1396 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1398 return ERR_PTR(-ENOMEM);
1400 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1402 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1403 ESW_REG_C0_USER_DATA_METADATA_MASK);
1404 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1406 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0, tag);
1407 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1408 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1409 MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1410 flow_act.modify_hdr = esw->offloads.restore_copy_hdr_id;
1412 flow_context = &spec->flow_context;
1413 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1414 flow_context->flow_tag = tag;
1415 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1416 dest.ft = esw->offloads.ft_offloads;
1418 flow_rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
1421 if (IS_ERR(flow_rule))
1423 "Failed to create restore rule for tag: %d, err(%d)\n",
1424 tag, (int)PTR_ERR(flow_rule));
1429 #define MAX_PF_SQ 256
1430 #define MAX_SQ_NVPORTS 32
1432 static void esw_set_flow_group_source_port(struct mlx5_eswitch *esw,
1435 void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1439 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1440 MLX5_SET(create_flow_group_in, flow_group_in,
1441 match_criteria_enable,
1442 MLX5_MATCH_MISC_PARAMETERS_2);
1444 MLX5_SET(fte_match_param, match_criteria,
1445 misc_parameters_2.metadata_reg_c_0,
1446 mlx5_eswitch_get_vport_metadata_mask());
1448 MLX5_SET(create_flow_group_in, flow_group_in,
1449 match_criteria_enable,
1450 MLX5_MATCH_MISC_PARAMETERS);
1452 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1453 misc_parameters.source_port);
1457 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
1458 static void esw_vport_tbl_put(struct mlx5_eswitch *esw)
1460 struct mlx5_vport_tbl_attr attr;
1461 struct mlx5_vport *vport;
1466 mlx5_esw_for_each_vport(esw, i, vport) {
1467 attr.vport = vport->vport;
1468 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1469 mlx5_esw_vporttbl_put(esw, &attr);
1473 static int esw_vport_tbl_get(struct mlx5_eswitch *esw)
1475 struct mlx5_vport_tbl_attr attr;
1476 struct mlx5_flow_table *fdb;
1477 struct mlx5_vport *vport;
1482 mlx5_esw_for_each_vport(esw, i, vport) {
1483 attr.vport = vport->vport;
1484 attr.vport_ns = &mlx5_esw_vport_tbl_mirror_ns;
1485 fdb = mlx5_esw_vporttbl_get(esw, &attr);
1492 esw_vport_tbl_put(esw);
1493 return PTR_ERR(fdb);
1496 #define fdb_modify_header_fwd_to_table_supported(esw) \
1497 (MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table))
1498 static void esw_init_chains_offload_flags(struct mlx5_eswitch *esw, u32 *flags)
1500 struct mlx5_core_dev *dev = esw->dev;
1502 if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ignore_flow_level))
1503 *flags |= MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
1505 if (!MLX5_CAP_ESW_FLOWTABLE(dev, multi_fdb_encap) &&
1506 esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE) {
1507 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1508 esw_warn(dev, "Tc chains and priorities offload aren't supported, update firmware if needed\n");
1509 } else if (!mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1510 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1511 esw_warn(dev, "Tc chains and priorities offload aren't supported\n");
1512 } else if (!fdb_modify_header_fwd_to_table_supported(esw)) {
1513 /* Disabled when ttl workaround is needed, e.g
1514 * when ESWITCH_IPV4_TTL_MODIFY_ENABLE = true in mlxconfig
1517 "Tc chains and priorities offload aren't supported, check firmware version, or mlxconfig settings\n");
1518 *flags &= ~MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1520 *flags |= MLX5_CHAINS_AND_PRIOS_SUPPORTED;
1521 esw_info(dev, "Supported tc chains and prios offload\n");
1524 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1525 *flags |= MLX5_CHAINS_FT_TUNNEL_SUPPORTED;
1529 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1531 struct mlx5_core_dev *dev = esw->dev;
1532 struct mlx5_flow_table *nf_ft, *ft;
1533 struct mlx5_chains_attr attr = {};
1534 struct mlx5_fs_chains *chains;
1538 fdb_max = 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size);
1540 esw_init_chains_offload_flags(esw, &attr.flags);
1541 attr.ns = MLX5_FLOW_NAMESPACE_FDB;
1542 attr.max_ft_sz = fdb_max;
1543 attr.max_grp_num = esw->params.large_group_num;
1544 attr.default_ft = miss_fdb;
1545 attr.mapping = esw->offloads.reg_c0_obj_pool;
1547 chains = mlx5_chains_create(dev, &attr);
1548 if (IS_ERR(chains)) {
1549 err = PTR_ERR(chains);
1550 esw_warn(dev, "Failed to create fdb chains err(%d)\n", err);
1554 esw->fdb_table.offloads.esw_chains_priv = chains;
1556 /* Create tc_end_ft which is the always created ft chain */
1557 nf_ft = mlx5_chains_get_table(chains, mlx5_chains_get_nf_ft_chain(chains),
1559 if (IS_ERR(nf_ft)) {
1560 err = PTR_ERR(nf_ft);
1564 /* Always open the root for fast path */
1565 ft = mlx5_chains_get_table(chains, 0, 1, 0);
1571 /* Open level 1 for split fdb rules now if prios isn't supported */
1572 if (!mlx5_chains_prios_supported(chains)) {
1573 err = esw_vport_tbl_get(esw);
1578 mlx5_chains_set_end_ft(chains, nf_ft);
1583 mlx5_chains_put_table(chains, 0, 1, 0);
1585 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1587 mlx5_chains_destroy(chains);
1588 esw->fdb_table.offloads.esw_chains_priv = NULL;
1594 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1596 if (!mlx5_chains_prios_supported(chains))
1597 esw_vport_tbl_put(esw);
1598 mlx5_chains_put_table(chains, 0, 1, 0);
1599 mlx5_chains_put_table(chains, mlx5_chains_get_nf_ft_chain(chains), 1, 0);
1600 mlx5_chains_destroy(chains);
1603 #else /* CONFIG_MLX5_CLS_ACT */
1606 esw_chains_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *miss_fdb)
1610 esw_chains_destroy(struct mlx5_eswitch *esw, struct mlx5_fs_chains *chains)
1615 static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw)
1617 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1618 struct mlx5_flow_table_attr ft_attr = {};
1619 int num_vfs, table_size, ix, err = 0;
1620 struct mlx5_core_dev *dev = esw->dev;
1621 struct mlx5_flow_namespace *root_ns;
1622 struct mlx5_flow_table *fdb = NULL;
1623 u32 flags = 0, *flow_group_in;
1624 struct mlx5_flow_group *g;
1625 void *match_criteria;
1628 esw_debug(esw->dev, "Create offloads FDB Tables\n");
1630 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1634 root_ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_FDB);
1636 esw_warn(dev, "Failed to get FDB flow namespace\n");
1640 esw->fdb_table.offloads.ns = root_ns;
1641 err = mlx5_flow_namespace_set_mode(root_ns,
1642 esw->dev->priv.steering->mode);
1644 esw_warn(dev, "Failed to set FDB namespace steering mode\n");
1648 /* To be strictly correct:
1649 * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ)
1651 * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ +
1652 * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ
1653 * but as the peer device might not be in switchdev mode it's not
1654 * possible. We use the fact that by default FW sets max vfs and max sfs
1655 * to the same value on both devices. If it needs to be changed in the future note
1656 * the peer miss group should also be created based on the number of
1657 * total vports of the peer (currently is also uses esw->total_vports).
1659 table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) +
1660 MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs;
1662 /* create the slow path fdb with encap set, so further table instances
1663 * can be created at run time while VFs are probed if the FW allows that.
1665 if (esw->offloads.encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE)
1666 flags |= (MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
1667 MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
1669 ft_attr.flags = flags;
1670 ft_attr.max_fte = table_size;
1671 ft_attr.prio = FDB_SLOW_PATH;
1673 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
1676 esw_warn(dev, "Failed to create slow path FDB Table err %d\n", err);
1679 esw->fdb_table.offloads.slow_fdb = fdb;
1681 /* Create empty TC-miss managed table. This allows plugging in following
1682 * priorities without directly exposing their level 0 table to
1683 * eswitch_offloads and passing it as miss_fdb to following call to
1684 * esw_chains_create().
1686 memset(&ft_attr, 0, sizeof(ft_attr));
1687 ft_attr.prio = FDB_TC_MISS;
1688 esw->fdb_table.offloads.tc_miss_table = mlx5_create_flow_table(root_ns, &ft_attr);
1689 if (IS_ERR(esw->fdb_table.offloads.tc_miss_table)) {
1690 err = PTR_ERR(esw->fdb_table.offloads.tc_miss_table);
1691 esw_warn(dev, "Failed to create TC miss FDB Table err %d\n", err);
1692 goto tc_miss_table_err;
1695 err = esw_chains_create(esw, esw->fdb_table.offloads.tc_miss_table);
1697 esw_warn(dev, "Failed to open fdb chains err(%d)\n", err);
1698 goto fdb_chains_err;
1701 /* create send-to-vport group */
1702 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1703 MLX5_MATCH_MISC_PARAMETERS);
1705 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1707 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_sqn);
1708 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
1709 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1710 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1711 misc_parameters.source_eswitch_owner_vhca_id);
1712 MLX5_SET(create_flow_group_in, flow_group_in,
1713 source_eswitch_owner_vhca_id_valid, 1);
1716 /* See comment above table_size calculation */
1717 ix = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ);
1718 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1719 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1);
1721 g = mlx5_create_flow_group(fdb, flow_group_in);
1724 esw_warn(dev, "Failed to create send-to-vport flow group err(%d)\n", err);
1725 goto send_vport_err;
1727 esw->fdb_table.offloads.send_to_vport_grp = g;
1729 if (esw_src_port_rewrite_supported(esw)) {
1730 /* meta send to vport */
1731 memset(flow_group_in, 0, inlen);
1732 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1733 MLX5_MATCH_MISC_PARAMETERS_2);
1735 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
1737 MLX5_SET(fte_match_param, match_criteria,
1738 misc_parameters_2.metadata_reg_c_0,
1739 mlx5_eswitch_get_vport_metadata_mask());
1740 MLX5_SET(fte_match_param, match_criteria,
1741 misc_parameters_2.metadata_reg_c_1, ESW_TUN_MASK);
1743 num_vfs = esw->esw_funcs.num_vfs;
1745 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1746 MLX5_SET(create_flow_group_in, flow_group_in,
1747 end_flow_index, ix + num_vfs - 1);
1750 g = mlx5_create_flow_group(fdb, flow_group_in);
1753 esw_warn(dev, "Failed to create send-to-vport meta flow group err(%d)\n",
1755 goto send_vport_meta_err;
1757 esw->fdb_table.offloads.send_to_vport_meta_grp = g;
1759 err = mlx5_eswitch_add_send_to_vport_meta_rules(esw);
1765 if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) {
1766 /* create peer esw miss group */
1767 memset(flow_group_in, 0, inlen);
1769 esw_set_flow_group_source_port(esw, flow_group_in);
1771 if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1772 match_criteria = MLX5_ADDR_OF(create_flow_group_in,
1776 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
1777 misc_parameters.source_eswitch_owner_vhca_id);
1779 MLX5_SET(create_flow_group_in, flow_group_in,
1780 source_eswitch_owner_vhca_id_valid, 1);
1783 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1784 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1785 ix + esw->total_vports - 1);
1786 ix += esw->total_vports;
1788 g = mlx5_create_flow_group(fdb, flow_group_in);
1791 esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err);
1794 esw->fdb_table.offloads.peer_miss_grp = g;
1797 /* create miss group */
1798 memset(flow_group_in, 0, inlen);
1799 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
1800 MLX5_MATCH_OUTER_HEADERS);
1801 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
1803 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
1804 outer_headers.dmac_47_16);
1807 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix);
1808 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
1809 ix + MLX5_ESW_MISS_FLOWS);
1811 g = mlx5_create_flow_group(fdb, flow_group_in);
1814 esw_warn(dev, "Failed to create miss flow group err(%d)\n", err);
1817 esw->fdb_table.offloads.miss_grp = g;
1819 err = esw_add_fdb_miss_rule(esw);
1823 kvfree(flow_group_in);
1827 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1829 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1830 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1832 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
1834 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1835 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1836 send_vport_meta_err:
1837 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1839 esw_chains_destroy(esw, esw_chains(esw));
1841 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1843 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1845 /* Holds true only as long as DMFS is the default */
1846 mlx5_flow_namespace_set_mode(root_ns, MLX5_FLOW_STEERING_MODE_DMFS);
1848 kvfree(flow_group_in);
1852 static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw)
1854 if (!esw->fdb_table.offloads.slow_fdb)
1857 esw_debug(esw->dev, "Destroy offloads FDB Tables\n");
1858 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi);
1859 mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni);
1860 mlx5_eswitch_del_send_to_vport_meta_rules(esw);
1861 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp);
1862 if (esw->fdb_table.offloads.send_to_vport_meta_grp)
1863 mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_meta_grp);
1864 if (MLX5_CAP_ESW(esw->dev, merged_eswitch))
1865 mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp);
1866 mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp);
1868 esw_chains_destroy(esw, esw_chains(esw));
1870 mlx5_destroy_flow_table(esw->fdb_table.offloads.tc_miss_table);
1871 mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb);
1872 /* Holds true only as long as DMFS is the default */
1873 mlx5_flow_namespace_set_mode(esw->fdb_table.offloads.ns,
1874 MLX5_FLOW_STEERING_MODE_DMFS);
1875 atomic64_set(&esw->user_count, 0);
1878 static int esw_get_offloads_ft_size(struct mlx5_eswitch *esw)
1882 nvports = esw->total_vports + MLX5_ESW_MISS_FLOWS;
1883 if (mlx5e_tc_int_port_supported(esw))
1884 nvports += MLX5E_TC_MAX_INT_PORT_NUM;
1889 static int esw_create_offloads_table(struct mlx5_eswitch *esw)
1891 struct mlx5_flow_table_attr ft_attr = {};
1892 struct mlx5_core_dev *dev = esw->dev;
1893 struct mlx5_flow_table *ft_offloads;
1894 struct mlx5_flow_namespace *ns;
1897 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
1899 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
1903 ft_attr.max_fte = esw_get_offloads_ft_size(esw);
1906 ft_offloads = mlx5_create_flow_table(ns, &ft_attr);
1907 if (IS_ERR(ft_offloads)) {
1908 err = PTR_ERR(ft_offloads);
1909 esw_warn(esw->dev, "Failed to create offloads table, err %d\n", err);
1913 esw->offloads.ft_offloads = ft_offloads;
1917 static void esw_destroy_offloads_table(struct mlx5_eswitch *esw)
1919 struct mlx5_esw_offload *offloads = &esw->offloads;
1921 mlx5_destroy_flow_table(offloads->ft_offloads);
1924 static int esw_create_vport_rx_group(struct mlx5_eswitch *esw)
1926 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1927 struct mlx5_flow_group *g;
1932 nvports = esw_get_offloads_ft_size(esw);
1933 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
1937 /* create vport rx group */
1938 esw_set_flow_group_source_port(esw, flow_group_in);
1940 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
1941 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, nvports - 1);
1943 g = mlx5_create_flow_group(esw->offloads.ft_offloads, flow_group_in);
1947 mlx5_core_warn(esw->dev, "Failed to create vport rx group err %d\n", err);
1951 esw->offloads.vport_rx_group = g;
1953 kvfree(flow_group_in);
1957 static void esw_destroy_vport_rx_group(struct mlx5_eswitch *esw)
1959 mlx5_destroy_flow_group(esw->offloads.vport_rx_group);
1962 struct mlx5_flow_handle *
1963 mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, u16 vport,
1964 struct mlx5_flow_destination *dest)
1966 struct mlx5_flow_act flow_act = {0};
1967 struct mlx5_flow_handle *flow_rule;
1968 struct mlx5_flow_spec *spec;
1971 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
1973 flow_rule = ERR_PTR(-ENOMEM);
1977 if (mlx5_eswitch_vport_match_metadata_enabled(esw)) {
1978 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters_2);
1979 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1980 mlx5_eswitch_get_vport_metadata_for_match(esw, vport));
1982 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters_2);
1983 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
1984 mlx5_eswitch_get_vport_metadata_mask());
1986 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
1988 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
1989 MLX5_SET(fte_match_set_misc, misc, source_port, vport);
1991 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
1992 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
1994 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
1997 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1998 flow_rule = mlx5_add_flow_rules(esw->offloads.ft_offloads, spec,
1999 &flow_act, dest, 1);
2000 if (IS_ERR(flow_rule)) {
2001 esw_warn(esw->dev, "fs offloads: Failed to add vport rx rule err %ld\n", PTR_ERR(flow_rule));
2010 static int mlx5_eswitch_inline_mode_get(struct mlx5_eswitch *esw, u8 *mode)
2012 u8 prev_mlx5_mode, mlx5_mode = MLX5_INLINE_MODE_L2;
2013 struct mlx5_core_dev *dev = esw->dev;
2014 struct mlx5_vport *vport;
2017 if (!MLX5_CAP_GEN(dev, vport_group_manager))
2020 if (esw->mode == MLX5_ESWITCH_NONE)
2023 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
2024 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
2025 mlx5_mode = MLX5_INLINE_MODE_NONE;
2027 case MLX5_CAP_INLINE_MODE_L2:
2028 mlx5_mode = MLX5_INLINE_MODE_L2;
2030 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
2035 mlx5_query_nic_vport_min_inline(dev, esw->first_host_vport, &prev_mlx5_mode);
2036 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
2037 mlx5_query_nic_vport_min_inline(dev, vport->vport, &mlx5_mode);
2038 if (prev_mlx5_mode != mlx5_mode)
2040 prev_mlx5_mode = mlx5_mode;
2048 static void esw_destroy_restore_table(struct mlx5_eswitch *esw)
2050 struct mlx5_esw_offload *offloads = &esw->offloads;
2052 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2055 mlx5_modify_header_dealloc(esw->dev, offloads->restore_copy_hdr_id);
2056 mlx5_destroy_flow_group(offloads->restore_group);
2057 mlx5_destroy_flow_table(offloads->ft_offloads_restore);
2060 static int esw_create_restore_table(struct mlx5_eswitch *esw)
2062 u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {};
2063 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2064 struct mlx5_flow_table_attr ft_attr = {};
2065 struct mlx5_core_dev *dev = esw->dev;
2066 struct mlx5_flow_namespace *ns;
2067 struct mlx5_modify_hdr *mod_hdr;
2068 void *match_criteria, *misc;
2069 struct mlx5_flow_table *ft;
2070 struct mlx5_flow_group *g;
2074 if (!mlx5_eswitch_reg_c1_loopback_supported(esw))
2077 ns = mlx5_get_flow_namespace(dev, MLX5_FLOW_NAMESPACE_OFFLOADS);
2079 esw_warn(esw->dev, "Failed to get offloads flow namespace\n");
2083 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2084 if (!flow_group_in) {
2089 ft_attr.max_fte = 1 << ESW_REG_C0_USER_DATA_METADATA_BITS;
2090 ft = mlx5_create_flow_table(ns, &ft_attr);
2093 esw_warn(esw->dev, "Failed to create restore table, err %d\n",
2098 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2100 misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
2103 MLX5_SET(fte_match_set_misc2, misc, metadata_reg_c_0,
2104 ESW_REG_C0_USER_DATA_METADATA_MASK);
2105 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2106 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index,
2107 ft_attr.max_fte - 1);
2108 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2109 MLX5_MATCH_MISC_PARAMETERS_2);
2110 g = mlx5_create_flow_group(ft, flow_group_in);
2113 esw_warn(dev, "Failed to create restore flow group, err: %d\n",
2118 MLX5_SET(copy_action_in, modact, action_type, MLX5_ACTION_TYPE_COPY);
2119 MLX5_SET(copy_action_in, modact, src_field,
2120 MLX5_ACTION_IN_FIELD_METADATA_REG_C_1);
2121 MLX5_SET(copy_action_in, modact, dst_field,
2122 MLX5_ACTION_IN_FIELD_METADATA_REG_B);
2123 mod_hdr = mlx5_modify_header_alloc(esw->dev,
2124 MLX5_FLOW_NAMESPACE_KERNEL, 1,
2126 if (IS_ERR(mod_hdr)) {
2127 err = PTR_ERR(mod_hdr);
2128 esw_warn(dev, "Failed to create restore mod header, err: %d\n",
2133 esw->offloads.ft_offloads_restore = ft;
2134 esw->offloads.restore_group = g;
2135 esw->offloads.restore_copy_hdr_id = mod_hdr;
2137 kvfree(flow_group_in);
2142 mlx5_destroy_flow_group(g);
2144 mlx5_destroy_flow_table(ft);
2146 kvfree(flow_group_in);
2151 static int esw_offloads_start(struct mlx5_eswitch *esw,
2152 struct netlink_ext_ack *extack)
2156 mlx5_eswitch_disable_locked(esw, false);
2157 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
2158 esw->dev->priv.sriov.num_vfs);
2160 NL_SET_ERR_MSG_MOD(extack,
2161 "Failed setting eswitch to offloads");
2162 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
2163 MLX5_ESWITCH_IGNORE_NUM_VFS);
2165 NL_SET_ERR_MSG_MOD(extack,
2166 "Failed setting eswitch back to legacy");
2169 if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) {
2170 if (mlx5_eswitch_inline_mode_get(esw,
2171 &esw->offloads.inline_mode)) {
2172 esw->offloads.inline_mode = MLX5_INLINE_MODE_L2;
2173 NL_SET_ERR_MSG_MOD(extack,
2174 "Inline mode is different between vports");
2180 static void mlx5_esw_offloads_rep_mark_set(struct mlx5_eswitch *esw,
2181 struct mlx5_eswitch_rep *rep,
2186 /* Copy the mark from vport to its rep */
2187 mark_set = xa_get_mark(&esw->vports, rep->vport, mark);
2189 xa_set_mark(&esw->offloads.vport_reps, rep->vport, mark);
2192 static int mlx5_esw_offloads_rep_init(struct mlx5_eswitch *esw, const struct mlx5_vport *vport)
2194 struct mlx5_eswitch_rep *rep;
2198 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
2202 rep->vport = vport->vport;
2203 rep->vport_index = vport->index;
2204 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2205 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
2207 err = xa_insert(&esw->offloads.vport_reps, rep->vport, rep, GFP_KERNEL);
2211 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_HOST_FN);
2212 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_VF);
2213 mlx5_esw_offloads_rep_mark_set(esw, rep, MLX5_ESW_VPT_SF);
2221 static void mlx5_esw_offloads_rep_cleanup(struct mlx5_eswitch *esw,
2222 struct mlx5_eswitch_rep *rep)
2224 xa_erase(&esw->offloads.vport_reps, rep->vport);
2228 void esw_offloads_cleanup_reps(struct mlx5_eswitch *esw)
2230 struct mlx5_eswitch_rep *rep;
2233 mlx5_esw_for_each_rep(esw, i, rep)
2234 mlx5_esw_offloads_rep_cleanup(esw, rep);
2235 xa_destroy(&esw->offloads.vport_reps);
2238 int esw_offloads_init_reps(struct mlx5_eswitch *esw)
2240 struct mlx5_vport *vport;
2244 xa_init(&esw->offloads.vport_reps);
2246 mlx5_esw_for_each_vport(esw, i, vport) {
2247 err = mlx5_esw_offloads_rep_init(esw, vport);
2254 esw_offloads_cleanup_reps(esw);
2258 static void __esw_offloads_unload_rep(struct mlx5_eswitch *esw,
2259 struct mlx5_eswitch_rep *rep, u8 rep_type)
2261 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2262 REP_LOADED, REP_REGISTERED) == REP_LOADED)
2263 esw->offloads.rep_ops[rep_type]->unload(rep);
2266 static void __unload_reps_sf_vport(struct mlx5_eswitch *esw, u8 rep_type)
2268 struct mlx5_eswitch_rep *rep;
2271 mlx5_esw_for_each_sf_rep(esw, i, rep)
2272 __esw_offloads_unload_rep(esw, rep, rep_type);
2275 static void __unload_reps_all_vport(struct mlx5_eswitch *esw, u8 rep_type)
2277 struct mlx5_eswitch_rep *rep;
2280 __unload_reps_sf_vport(esw, rep_type);
2282 mlx5_esw_for_each_vf_rep(esw, i, rep)
2283 __esw_offloads_unload_rep(esw, rep, rep_type);
2285 if (mlx5_ecpf_vport_exists(esw->dev)) {
2286 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_ECPF);
2287 __esw_offloads_unload_rep(esw, rep, rep_type);
2290 if (mlx5_core_is_ecpf_esw_manager(esw->dev)) {
2291 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_PF);
2292 __esw_offloads_unload_rep(esw, rep, rep_type);
2295 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2296 __esw_offloads_unload_rep(esw, rep, rep_type);
2299 int mlx5_esw_offloads_rep_load(struct mlx5_eswitch *esw, u16 vport_num)
2301 struct mlx5_eswitch_rep *rep;
2305 rep = mlx5_eswitch_get_rep(esw, vport_num);
2306 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++)
2307 if (atomic_cmpxchg(&rep->rep_data[rep_type].state,
2308 REP_REGISTERED, REP_LOADED) == REP_REGISTERED) {
2309 err = esw->offloads.rep_ops[rep_type]->load(esw->dev, rep);
2317 atomic_set(&rep->rep_data[rep_type].state, REP_REGISTERED);
2318 for (--rep_type; rep_type >= 0; rep_type--)
2319 __esw_offloads_unload_rep(esw, rep, rep_type);
2323 void mlx5_esw_offloads_rep_unload(struct mlx5_eswitch *esw, u16 vport_num)
2325 struct mlx5_eswitch_rep *rep;
2328 rep = mlx5_eswitch_get_rep(esw, vport_num);
2329 for (rep_type = NUM_REP_TYPES - 1; rep_type >= 0; rep_type--)
2330 __esw_offloads_unload_rep(esw, rep, rep_type);
2333 int esw_offloads_load_rep(struct mlx5_eswitch *esw, u16 vport_num)
2337 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2340 if (vport_num != MLX5_VPORT_UPLINK) {
2341 err = mlx5_esw_offloads_devlink_port_register(esw, vport_num);
2346 err = mlx5_esw_offloads_rep_load(esw, vport_num);
2352 if (vport_num != MLX5_VPORT_UPLINK)
2353 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2357 void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num)
2359 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
2362 mlx5_esw_offloads_rep_unload(esw, vport_num);
2364 if (vport_num != MLX5_VPORT_UPLINK)
2365 mlx5_esw_offloads_devlink_port_unregister(esw, vport_num);
2368 static int esw_set_uplink_slave_ingress_root(struct mlx5_core_dev *master,
2369 struct mlx5_core_dev *slave)
2371 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
2372 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
2373 struct mlx5_eswitch *esw;
2374 struct mlx5_flow_root_namespace *root;
2375 struct mlx5_flow_namespace *ns;
2376 struct mlx5_vport *vport;
2379 MLX5_SET(set_flow_table_root_in, in, opcode,
2380 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
2381 MLX5_SET(set_flow_table_root_in, in, table_type, FS_FT_ESW_INGRESS_ACL);
2382 MLX5_SET(set_flow_table_root_in, in, other_vport, 1);
2383 MLX5_SET(set_flow_table_root_in, in, vport_number, MLX5_VPORT_UPLINK);
2386 esw = master->priv.eswitch;
2387 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2388 MLX5_SET(set_flow_table_root_in, in, table_of_other_vport, 1);
2389 MLX5_SET(set_flow_table_root_in, in, table_vport_number,
2392 ns = mlx5_get_flow_vport_acl_namespace(master,
2393 MLX5_FLOW_NAMESPACE_ESW_INGRESS,
2395 root = find_root(&ns->node);
2396 mutex_lock(&root->chain_lock);
2398 MLX5_SET(set_flow_table_root_in, in,
2399 table_eswitch_owner_vhca_id_valid, 1);
2400 MLX5_SET(set_flow_table_root_in, in,
2401 table_eswitch_owner_vhca_id,
2402 MLX5_CAP_GEN(master, vhca_id));
2403 MLX5_SET(set_flow_table_root_in, in, table_id,
2406 esw = slave->priv.eswitch;
2407 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2408 ns = mlx5_get_flow_vport_acl_namespace(slave,
2409 MLX5_FLOW_NAMESPACE_ESW_INGRESS,
2411 root = find_root(&ns->node);
2412 mutex_lock(&root->chain_lock);
2413 MLX5_SET(set_flow_table_root_in, in, table_id, root->root_ft->id);
2416 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
2417 mutex_unlock(&root->chain_lock);
2422 static int esw_set_slave_root_fdb(struct mlx5_core_dev *master,
2423 struct mlx5_core_dev *slave)
2425 u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {};
2426 u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {};
2427 struct mlx5_flow_root_namespace *root;
2428 struct mlx5_flow_namespace *ns;
2431 MLX5_SET(set_flow_table_root_in, in, opcode,
2432 MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
2433 MLX5_SET(set_flow_table_root_in, in, table_type,
2437 ns = mlx5_get_flow_namespace(master,
2438 MLX5_FLOW_NAMESPACE_FDB);
2439 root = find_root(&ns->node);
2440 mutex_lock(&root->chain_lock);
2441 MLX5_SET(set_flow_table_root_in, in,
2442 table_eswitch_owner_vhca_id_valid, 1);
2443 MLX5_SET(set_flow_table_root_in, in,
2444 table_eswitch_owner_vhca_id,
2445 MLX5_CAP_GEN(master, vhca_id));
2446 MLX5_SET(set_flow_table_root_in, in, table_id,
2449 ns = mlx5_get_flow_namespace(slave,
2450 MLX5_FLOW_NAMESPACE_FDB);
2451 root = find_root(&ns->node);
2452 mutex_lock(&root->chain_lock);
2453 MLX5_SET(set_flow_table_root_in, in, table_id,
2457 err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out));
2458 mutex_unlock(&root->chain_lock);
2463 static int __esw_set_master_egress_rule(struct mlx5_core_dev *master,
2464 struct mlx5_core_dev *slave,
2465 struct mlx5_vport *vport,
2466 struct mlx5_flow_table *acl)
2468 struct mlx5_flow_handle *flow_rule = NULL;
2469 struct mlx5_flow_destination dest = {};
2470 struct mlx5_flow_act flow_act = {};
2471 struct mlx5_flow_spec *spec;
2475 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2479 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2480 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2482 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2483 MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id,
2484 MLX5_CAP_GEN(slave, vhca_id));
2486 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2487 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2488 MLX5_SET_TO_ONES(fte_match_set_misc, misc,
2489 source_eswitch_owner_vhca_id);
2491 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2492 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2493 dest.vport.num = slave->priv.eswitch->manager_vport;
2494 dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id);
2495 dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
2497 flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act,
2499 if (IS_ERR(flow_rule))
2500 err = PTR_ERR(flow_rule);
2502 vport->egress.offloads.bounce_rule = flow_rule;
2508 static int esw_set_master_egress_rule(struct mlx5_core_dev *master,
2509 struct mlx5_core_dev *slave)
2511 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
2512 struct mlx5_eswitch *esw = master->priv.eswitch;
2513 struct mlx5_flow_table_attr ft_attr = {
2514 .max_fte = 1, .prio = 0, .level = 0,
2516 struct mlx5_flow_namespace *egress_ns;
2517 struct mlx5_flow_table *acl;
2518 struct mlx5_flow_group *g;
2519 struct mlx5_vport *vport;
2520 void *match_criteria;
2524 vport = mlx5_eswitch_get_vport(esw, esw->manager_vport);
2526 return PTR_ERR(vport);
2528 egress_ns = mlx5_get_flow_vport_acl_namespace(master,
2529 MLX5_FLOW_NAMESPACE_ESW_EGRESS,
2534 if (vport->egress.acl)
2537 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
2541 acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport);
2547 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in,
2549 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2550 misc_parameters.source_port);
2551 MLX5_SET_TO_ONES(fte_match_param, match_criteria,
2552 misc_parameters.source_eswitch_owner_vhca_id);
2553 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
2554 MLX5_MATCH_MISC_PARAMETERS);
2556 MLX5_SET(create_flow_group_in, flow_group_in,
2557 source_eswitch_owner_vhca_id_valid, 1);
2558 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
2559 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
2561 g = mlx5_create_flow_group(acl, flow_group_in);
2567 err = __esw_set_master_egress_rule(master, slave, vport, acl);
2571 vport->egress.acl = acl;
2572 vport->egress.offloads.bounce_grp = g;
2574 kvfree(flow_group_in);
2579 mlx5_destroy_flow_group(g);
2581 mlx5_destroy_flow_table(acl);
2583 kvfree(flow_group_in);
2587 static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev)
2589 struct mlx5_vport *vport;
2591 vport = mlx5_eswitch_get_vport(dev->priv.eswitch,
2592 dev->priv.eswitch->manager_vport);
2594 esw_acl_egress_ofld_cleanup(vport);
2597 int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw,
2598 struct mlx5_eswitch *slave_esw)
2602 err = esw_set_uplink_slave_ingress_root(master_esw->dev,
2607 err = esw_set_slave_root_fdb(master_esw->dev,
2612 err = esw_set_master_egress_rule(master_esw->dev,
2620 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2623 esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
2628 void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw,
2629 struct mlx5_eswitch *slave_esw)
2631 esw_unset_master_egress_rule(master_esw->dev);
2632 esw_set_slave_root_fdb(NULL, slave_esw->dev);
2633 esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev);
2636 #define ESW_OFFLOADS_DEVCOM_PAIR (0)
2637 #define ESW_OFFLOADS_DEVCOM_UNPAIR (1)
2639 static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw)
2641 const struct mlx5_eswitch_rep_ops *ops;
2642 struct mlx5_eswitch_rep *rep;
2646 mlx5_esw_for_each_rep(esw, i, rep) {
2647 rep_type = NUM_REP_TYPES;
2648 while (rep_type--) {
2649 ops = esw->offloads.rep_ops[rep_type];
2650 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2652 ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, NULL);
2657 static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw)
2659 #if IS_ENABLED(CONFIG_MLX5_CLS_ACT)
2660 mlx5e_tc_clean_fdb_peer_flows(esw);
2662 mlx5_esw_offloads_rep_event_unpair(esw);
2663 esw_del_fdb_peer_miss_rules(esw);
2666 static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw,
2667 struct mlx5_eswitch *peer_esw)
2669 const struct mlx5_eswitch_rep_ops *ops;
2670 struct mlx5_eswitch_rep *rep;
2675 err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev);
2679 mlx5_esw_for_each_rep(esw, i, rep) {
2680 for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) {
2681 ops = esw->offloads.rep_ops[rep_type];
2682 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
2684 err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw);
2694 mlx5_esw_offloads_unpair(esw);
2698 static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw,
2699 struct mlx5_eswitch *peer_esw,
2702 struct mlx5_flow_root_namespace *peer_ns;
2703 struct mlx5_flow_root_namespace *ns;
2706 peer_ns = peer_esw->dev->priv.steering->fdb_root_ns;
2707 ns = esw->dev->priv.steering->fdb_root_ns;
2710 err = mlx5_flow_namespace_set_peer(ns, peer_ns);
2714 err = mlx5_flow_namespace_set_peer(peer_ns, ns);
2716 mlx5_flow_namespace_set_peer(ns, NULL);
2720 mlx5_flow_namespace_set_peer(ns, NULL);
2721 mlx5_flow_namespace_set_peer(peer_ns, NULL);
2727 static int mlx5_esw_offloads_devcom_event(int event,
2731 struct mlx5_eswitch *esw = my_data;
2732 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2733 struct mlx5_eswitch *peer_esw = event_data;
2737 case ESW_OFFLOADS_DEVCOM_PAIR:
2738 if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev)
2741 if (mlx5_eswitch_vport_match_metadata_enabled(esw) !=
2742 mlx5_eswitch_vport_match_metadata_enabled(peer_esw))
2745 err = mlx5_esw_offloads_set_ns_peer(esw, peer_esw, true);
2748 err = mlx5_esw_offloads_pair(esw, peer_esw);
2752 err = mlx5_esw_offloads_pair(peer_esw, esw);
2756 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true);
2759 case ESW_OFFLOADS_DEVCOM_UNPAIR:
2760 if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS))
2763 mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false);
2764 mlx5_esw_offloads_unpair(peer_esw);
2765 mlx5_esw_offloads_unpair(esw);
2766 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2773 mlx5_esw_offloads_unpair(esw);
2775 mlx5_esw_offloads_set_ns_peer(esw, peer_esw, false);
2777 mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d",
2782 static void esw_offloads_devcom_init(struct mlx5_eswitch *esw)
2784 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2786 INIT_LIST_HEAD(&esw->offloads.peer_flows);
2787 mutex_init(&esw->offloads.peer_mutex);
2789 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2792 mlx5_devcom_register_component(devcom,
2793 MLX5_DEVCOM_ESW_OFFLOADS,
2794 mlx5_esw_offloads_devcom_event,
2797 mlx5_devcom_send_event(devcom,
2798 MLX5_DEVCOM_ESW_OFFLOADS,
2799 ESW_OFFLOADS_DEVCOM_PAIR, esw);
2802 static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw)
2804 struct mlx5_devcom *devcom = esw->dev->priv.devcom;
2806 if (!MLX5_CAP_ESW(esw->dev, merged_eswitch))
2809 mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS,
2810 ESW_OFFLOADS_DEVCOM_UNPAIR, esw);
2812 mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2815 bool mlx5_esw_vport_match_metadata_supported(const struct mlx5_eswitch *esw)
2817 if (!MLX5_CAP_ESW(esw->dev, esw_uplink_ingress_acl))
2820 if (!(MLX5_CAP_ESW_FLOWTABLE(esw->dev, fdb_to_vport_reg_c_id) &
2821 MLX5_FDB_TO_VPORT_REG_C_0))
2824 if (!MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source))
2827 if (mlx5_core_is_ecpf_esw_manager(esw->dev) ||
2828 mlx5_ecpf_vport_exists(esw->dev))
2834 u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw)
2836 u32 vport_end_ida = (1 << ESW_VPORT_BITS) - 1;
2837 /* Reserve 0xf for internal port offload */
2838 u32 max_pf_num = (1 << ESW_PFNUM_BITS) - 2;
2842 /* Only 4 bits of pf_num */
2843 pf_num = mlx5_get_dev_index(esw->dev);
2844 if (pf_num > max_pf_num)
2847 /* Metadata is 4 bits of PFNUM and 12 bits of unique id */
2848 /* Use only non-zero vport_id (1-4095) for all PF's */
2849 id = ida_alloc_range(&esw->offloads.vport_metadata_ida, 1, vport_end_ida, GFP_KERNEL);
2852 id = (pf_num << ESW_VPORT_BITS) | id;
2856 void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata)
2858 u32 vport_bit_mask = (1 << ESW_VPORT_BITS) - 1;
2860 /* Metadata contains only 12 bits of actual ida id */
2861 ida_free(&esw->offloads.vport_metadata_ida, metadata & vport_bit_mask);
2864 static int esw_offloads_vport_metadata_setup(struct mlx5_eswitch *esw,
2865 struct mlx5_vport *vport)
2867 vport->default_metadata = mlx5_esw_match_metadata_alloc(esw);
2868 vport->metadata = vport->default_metadata;
2869 return vport->metadata ? 0 : -ENOSPC;
2872 static void esw_offloads_vport_metadata_cleanup(struct mlx5_eswitch *esw,
2873 struct mlx5_vport *vport)
2875 if (!vport->default_metadata)
2878 WARN_ON(vport->metadata != vport->default_metadata);
2879 mlx5_esw_match_metadata_free(esw, vport->default_metadata);
2882 static void esw_offloads_metadata_uninit(struct mlx5_eswitch *esw)
2884 struct mlx5_vport *vport;
2887 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2890 mlx5_esw_for_each_vport(esw, i, vport)
2891 esw_offloads_vport_metadata_cleanup(esw, vport);
2894 static int esw_offloads_metadata_init(struct mlx5_eswitch *esw)
2896 struct mlx5_vport *vport;
2900 if (!mlx5_eswitch_vport_match_metadata_enabled(esw))
2903 mlx5_esw_for_each_vport(esw, i, vport) {
2904 err = esw_offloads_vport_metadata_setup(esw, vport);
2912 esw_offloads_metadata_uninit(esw);
2916 int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable)
2920 down_write(&esw->mode_lock);
2921 if (esw->mode != MLX5_ESWITCH_NONE) {
2925 if (!mlx5_esw_vport_match_metadata_supported(esw)) {
2930 esw->flags |= MLX5_ESWITCH_VPORT_MATCH_METADATA;
2932 esw->flags &= ~MLX5_ESWITCH_VPORT_MATCH_METADATA;
2934 up_write(&esw->mode_lock);
2939 esw_vport_create_offloads_acl_tables(struct mlx5_eswitch *esw,
2940 struct mlx5_vport *vport)
2944 err = esw_acl_ingress_ofld_setup(esw, vport);
2948 err = esw_acl_egress_ofld_setup(esw, vport);
2955 esw_acl_ingress_ofld_cleanup(esw, vport);
2960 esw_vport_destroy_offloads_acl_tables(struct mlx5_eswitch *esw,
2961 struct mlx5_vport *vport)
2963 esw_acl_egress_ofld_cleanup(vport);
2964 esw_acl_ingress_ofld_cleanup(esw, vport);
2967 static int esw_create_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2969 struct mlx5_vport *vport;
2971 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2973 return PTR_ERR(vport);
2975 return esw_vport_create_offloads_acl_tables(esw, vport);
2978 static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw)
2980 struct mlx5_vport *vport;
2982 vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK);
2986 esw_vport_destroy_offloads_acl_tables(esw, vport);
2989 int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw)
2991 struct mlx5_eswitch_rep *rep;
2995 if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS)
2998 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
2999 if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED)
3002 ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK);
3006 mlx5_esw_for_each_rep(esw, i, rep) {
3007 if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED)
3008 mlx5_esw_offloads_rep_load(esw, rep->vport);
3014 static int esw_offloads_steering_init(struct mlx5_eswitch *esw)
3016 struct mlx5_esw_indir_table *indir;
3019 memset(&esw->fdb_table.offloads, 0, sizeof(struct offloads_fdb));
3020 mutex_init(&esw->fdb_table.offloads.vports.lock);
3021 hash_init(esw->fdb_table.offloads.vports.table);
3022 atomic64_set(&esw->user_count, 0);
3024 indir = mlx5_esw_indir_table_init();
3025 if (IS_ERR(indir)) {
3026 err = PTR_ERR(indir);
3027 goto create_indir_err;
3029 esw->fdb_table.offloads.indir = indir;
3031 err = esw_create_uplink_offloads_acl_tables(esw);
3033 goto create_acl_err;
3035 err = esw_create_offloads_table(esw);
3037 goto create_offloads_err;
3039 err = esw_create_restore_table(esw);
3041 goto create_restore_err;
3043 err = esw_create_offloads_fdb_tables(esw);
3045 goto create_fdb_err;
3047 err = esw_create_vport_rx_group(esw);
3054 esw_destroy_offloads_fdb_tables(esw);
3056 esw_destroy_restore_table(esw);
3058 esw_destroy_offloads_table(esw);
3059 create_offloads_err:
3060 esw_destroy_uplink_offloads_acl_tables(esw);
3062 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3064 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3068 static void esw_offloads_steering_cleanup(struct mlx5_eswitch *esw)
3070 esw_destroy_vport_rx_group(esw);
3071 esw_destroy_offloads_fdb_tables(esw);
3072 esw_destroy_restore_table(esw);
3073 esw_destroy_offloads_table(esw);
3074 esw_destroy_uplink_offloads_acl_tables(esw);
3075 mlx5_esw_indir_table_destroy(esw->fdb_table.offloads.indir);
3076 mutex_destroy(&esw->fdb_table.offloads.vports.lock);
3080 esw_vfs_changed_event_handler(struct mlx5_eswitch *esw, const u32 *out)
3082 bool host_pf_disabled;
3085 new_num_vfs = MLX5_GET(query_esw_functions_out, out,
3086 host_params_context.host_num_of_vfs);
3087 host_pf_disabled = MLX5_GET(query_esw_functions_out, out,
3088 host_params_context.host_pf_disabled);
3090 if (new_num_vfs == esw->esw_funcs.num_vfs || host_pf_disabled)
3093 /* Number of VFs can only change from "0 to x" or "x to 0". */
3094 if (esw->esw_funcs.num_vfs > 0) {
3095 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
3099 err = mlx5_eswitch_load_vf_vports(esw, new_num_vfs,
3100 MLX5_VPORT_UC_ADDR_CHANGE);
3104 esw->esw_funcs.num_vfs = new_num_vfs;
3107 static void esw_functions_changed_event_handler(struct work_struct *work)
3109 struct mlx5_host_work *host_work;
3110 struct mlx5_eswitch *esw;
3113 host_work = container_of(work, struct mlx5_host_work, work);
3114 esw = host_work->esw;
3116 out = mlx5_esw_query_functions(esw->dev);
3120 esw_vfs_changed_event_handler(esw, out);
3126 int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type, void *data)
3128 struct mlx5_esw_functions *esw_funcs;
3129 struct mlx5_host_work *host_work;
3130 struct mlx5_eswitch *esw;
3132 host_work = kzalloc(sizeof(*host_work), GFP_ATOMIC);
3136 esw_funcs = mlx5_nb_cof(nb, struct mlx5_esw_functions, nb);
3137 esw = container_of(esw_funcs, struct mlx5_eswitch, esw_funcs);
3139 host_work->esw = esw;
3141 INIT_WORK(&host_work->work, esw_functions_changed_event_handler);
3142 queue_work(esw->work_queue, &host_work->work);
3147 static int mlx5_esw_host_number_init(struct mlx5_eswitch *esw)
3149 const u32 *query_host_out;
3151 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3154 query_host_out = mlx5_esw_query_functions(esw->dev);
3155 if (IS_ERR(query_host_out))
3156 return PTR_ERR(query_host_out);
3158 /* Mark non local controller with non zero controller number. */
3159 esw->offloads.host_number = MLX5_GET(query_esw_functions_out, query_host_out,
3160 host_params_context.host_number);
3161 kvfree(query_host_out);
3165 bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller)
3167 /* Local controller is always valid */
3168 if (controller == 0)
3171 if (!mlx5_core_is_ecpf_esw_manager(esw->dev))
3174 /* External host number starts with zero in device */
3175 return (controller == esw->offloads.host_number + 1);
3178 int esw_offloads_enable(struct mlx5_eswitch *esw)
3180 struct mapping_ctx *reg_c0_obj_pool;
3181 struct mlx5_vport *vport;
3186 mutex_init(&esw->offloads.termtbl_mutex);
3187 mlx5_rdma_enable_roce(esw->dev);
3189 err = mlx5_esw_host_number_init(esw);
3193 err = esw_offloads_metadata_init(esw);
3197 err = esw_set_passing_vport_metadata(esw, true);
3199 goto err_vport_metadata;
3201 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
3203 reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
3204 sizeof(struct mlx5_mapped_obj),
3205 ESW_REG_C0_USER_DATA_METADATA_MASK,
3208 if (IS_ERR(reg_c0_obj_pool)) {
3209 err = PTR_ERR(reg_c0_obj_pool);
3212 esw->offloads.reg_c0_obj_pool = reg_c0_obj_pool;
3214 err = esw_offloads_steering_init(esw);
3216 goto err_steering_init;
3218 /* Representor will control the vport link state */
3219 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
3220 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN;
3222 /* Uplink vport rep must load first. */
3223 err = esw_offloads_load_rep(esw, MLX5_VPORT_UPLINK);
3227 err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE);
3231 esw_offloads_devcom_init(esw);
3236 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3238 esw_offloads_steering_cleanup(esw);
3240 mapping_destroy(reg_c0_obj_pool);
3242 esw_set_passing_vport_metadata(esw, false);
3244 esw_offloads_metadata_uninit(esw);
3246 mlx5_rdma_disable_roce(esw->dev);
3247 mutex_destroy(&esw->offloads.termtbl_mutex);
3251 static int esw_offloads_stop(struct mlx5_eswitch *esw,
3252 struct netlink_ext_ack *extack)
3256 mlx5_eswitch_disable_locked(esw, false);
3257 err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY,
3258 MLX5_ESWITCH_IGNORE_NUM_VFS);
3260 NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
3261 err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_OFFLOADS,
3262 MLX5_ESWITCH_IGNORE_NUM_VFS);
3264 NL_SET_ERR_MSG_MOD(extack,
3265 "Failed setting eswitch back to offloads");
3272 void esw_offloads_disable(struct mlx5_eswitch *esw)
3274 esw_offloads_devcom_cleanup(esw);
3275 mlx5_eswitch_disable_pf_vf_vports(esw);
3276 esw_offloads_unload_rep(esw, MLX5_VPORT_UPLINK);
3277 esw_set_passing_vport_metadata(esw, false);
3278 esw_offloads_steering_cleanup(esw);
3279 mapping_destroy(esw->offloads.reg_c0_obj_pool);
3280 esw_offloads_metadata_uninit(esw);
3281 mlx5_rdma_disable_roce(esw->dev);
3282 mutex_destroy(&esw->offloads.termtbl_mutex);
3285 static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
3288 case DEVLINK_ESWITCH_MODE_LEGACY:
3289 *mlx5_mode = MLX5_ESWITCH_LEGACY;
3291 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3292 *mlx5_mode = MLX5_ESWITCH_OFFLOADS;
3301 static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
3303 switch (mlx5_mode) {
3304 case MLX5_ESWITCH_LEGACY:
3305 *mode = DEVLINK_ESWITCH_MODE_LEGACY;
3307 case MLX5_ESWITCH_OFFLOADS:
3308 *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
3317 static int esw_inline_mode_from_devlink(u8 mode, u8 *mlx5_mode)
3320 case DEVLINK_ESWITCH_INLINE_MODE_NONE:
3321 *mlx5_mode = MLX5_INLINE_MODE_NONE;
3323 case DEVLINK_ESWITCH_INLINE_MODE_LINK:
3324 *mlx5_mode = MLX5_INLINE_MODE_L2;
3326 case DEVLINK_ESWITCH_INLINE_MODE_NETWORK:
3327 *mlx5_mode = MLX5_INLINE_MODE_IP;
3329 case DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT:
3330 *mlx5_mode = MLX5_INLINE_MODE_TCP_UDP;
3339 static int esw_inline_mode_to_devlink(u8 mlx5_mode, u8 *mode)
3341 switch (mlx5_mode) {
3342 case MLX5_INLINE_MODE_NONE:
3343 *mode = DEVLINK_ESWITCH_INLINE_MODE_NONE;
3345 case MLX5_INLINE_MODE_L2:
3346 *mode = DEVLINK_ESWITCH_INLINE_MODE_LINK;
3348 case MLX5_INLINE_MODE_IP:
3349 *mode = DEVLINK_ESWITCH_INLINE_MODE_NETWORK;
3351 case MLX5_INLINE_MODE_TCP_UDP:
3352 *mode = DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT;
3361 static int eswitch_devlink_esw_mode_check(const struct mlx5_eswitch *esw)
3363 /* devlink commands in NONE eswitch mode are currently supported only
3366 return (esw->mode == MLX5_ESWITCH_NONE &&
3367 !mlx5_core_is_ecpf_esw_manager(esw->dev)) ? -EOPNOTSUPP : 0;
3370 int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
3371 struct netlink_ext_ack *extack)
3373 u16 cur_mlx5_mode, mlx5_mode = 0;
3374 struct mlx5_eswitch *esw;
3377 esw = mlx5_devlink_eswitch_get(devlink);
3379 return PTR_ERR(esw);
3381 if (esw_mode_from_devlink(mode, &mlx5_mode))
3384 mlx5_lag_disable_change(esw->dev);
3385 err = mlx5_esw_try_lock(esw);
3387 NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy");
3390 cur_mlx5_mode = err;
3393 if (cur_mlx5_mode == mlx5_mode)
3396 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) {
3397 if (mlx5_devlink_trap_get_num_active(esw->dev)) {
3398 NL_SET_ERR_MSG_MOD(extack,
3399 "Can't change mode while devlink traps are active");
3403 err = esw_offloads_start(esw, extack);
3404 } else if (mode == DEVLINK_ESWITCH_MODE_LEGACY) {
3405 err = esw_offloads_stop(esw, extack);
3411 mlx5_esw_unlock(esw);
3413 mlx5_lag_enable_change(esw->dev);
3417 int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3419 struct mlx5_eswitch *esw;
3422 esw = mlx5_devlink_eswitch_get(devlink);
3424 return PTR_ERR(esw);
3426 down_write(&esw->mode_lock);
3427 err = eswitch_devlink_esw_mode_check(esw);
3431 err = esw_mode_to_devlink(esw->mode, mode);
3433 up_write(&esw->mode_lock);
3437 static int mlx5_esw_vports_inline_set(struct mlx5_eswitch *esw, u8 mlx5_mode,
3438 struct netlink_ext_ack *extack)
3440 struct mlx5_core_dev *dev = esw->dev;
3441 struct mlx5_vport *vport;
3442 u16 err_vport_num = 0;
3446 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3447 err = mlx5_modify_nic_vport_min_inline(dev, vport->vport, mlx5_mode);
3449 err_vport_num = vport->vport;
3450 NL_SET_ERR_MSG_MOD(extack,
3451 "Failed to set min inline on vport");
3452 goto revert_inline_mode;
3458 mlx5_esw_for_each_host_func_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
3459 if (vport->vport == err_vport_num)
3461 mlx5_modify_nic_vport_min_inline(dev,
3463 esw->offloads.inline_mode);
3468 int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode,
3469 struct netlink_ext_ack *extack)
3471 struct mlx5_core_dev *dev = devlink_priv(devlink);
3472 struct mlx5_eswitch *esw;
3476 esw = mlx5_devlink_eswitch_get(devlink);
3478 return PTR_ERR(esw);
3480 down_write(&esw->mode_lock);
3481 err = eswitch_devlink_esw_mode_check(esw);
3485 switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) {
3486 case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
3487 if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) {
3493 case MLX5_CAP_INLINE_MODE_L2:
3494 NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set");
3497 case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
3501 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3502 NL_SET_ERR_MSG_MOD(extack,
3503 "Can't set inline mode when flows are configured");
3508 err = esw_inline_mode_from_devlink(mode, &mlx5_mode);
3512 err = mlx5_esw_vports_inline_set(esw, mlx5_mode, extack);
3516 esw->offloads.inline_mode = mlx5_mode;
3517 up_write(&esw->mode_lock);
3521 up_write(&esw->mode_lock);
3525 int mlx5_devlink_eswitch_inline_mode_get(struct devlink *devlink, u8 *mode)
3527 struct mlx5_eswitch *esw;
3530 esw = mlx5_devlink_eswitch_get(devlink);
3532 return PTR_ERR(esw);
3534 down_write(&esw->mode_lock);
3535 err = eswitch_devlink_esw_mode_check(esw);
3539 err = esw_inline_mode_to_devlink(esw->offloads.inline_mode, mode);
3541 up_write(&esw->mode_lock);
3545 int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink,
3546 enum devlink_eswitch_encap_mode encap,
3547 struct netlink_ext_ack *extack)
3549 struct mlx5_core_dev *dev = devlink_priv(devlink);
3550 struct mlx5_eswitch *esw;
3553 esw = mlx5_devlink_eswitch_get(devlink);
3555 return PTR_ERR(esw);
3557 down_write(&esw->mode_lock);
3558 err = eswitch_devlink_esw_mode_check(esw);
3562 if (encap != DEVLINK_ESWITCH_ENCAP_MODE_NONE &&
3563 (!MLX5_CAP_ESW_FLOWTABLE_FDB(dev, reformat) ||
3564 !MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))) {
3569 if (encap && encap != DEVLINK_ESWITCH_ENCAP_MODE_BASIC) {
3574 if (esw->mode == MLX5_ESWITCH_LEGACY) {
3575 esw->offloads.encap = encap;
3579 if (esw->offloads.encap == encap)
3582 if (atomic64_read(&esw->offloads.num_flows) > 0) {
3583 NL_SET_ERR_MSG_MOD(extack,
3584 "Can't set encapsulation when flows are configured");
3589 esw_destroy_offloads_fdb_tables(esw);
3591 esw->offloads.encap = encap;
3593 err = esw_create_offloads_fdb_tables(esw);
3596 NL_SET_ERR_MSG_MOD(extack,
3597 "Failed re-creating fast FDB table");
3598 esw->offloads.encap = !encap;
3599 (void)esw_create_offloads_fdb_tables(esw);
3603 up_write(&esw->mode_lock);
3607 int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink,
3608 enum devlink_eswitch_encap_mode *encap)
3610 struct mlx5_eswitch *esw;
3613 esw = mlx5_devlink_eswitch_get(devlink);
3615 return PTR_ERR(esw);
3618 down_write(&esw->mode_lock);
3619 err = eswitch_devlink_esw_mode_check(esw);
3623 *encap = esw->offloads.encap;
3625 up_write(&esw->mode_lock);
3630 mlx5_eswitch_vport_has_rep(const struct mlx5_eswitch *esw, u16 vport_num)
3632 /* Currently, only ECPF based device has representor for host PF. */
3633 if (vport_num == MLX5_VPORT_PF &&
3634 !mlx5_core_is_ecpf_esw_manager(esw->dev))
3637 if (vport_num == MLX5_VPORT_ECPF &&
3638 !mlx5_ecpf_vport_exists(esw->dev))
3644 void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw,
3645 const struct mlx5_eswitch_rep_ops *ops,
3648 struct mlx5_eswitch_rep_data *rep_data;
3649 struct mlx5_eswitch_rep *rep;
3652 esw->offloads.rep_ops[rep_type] = ops;
3653 mlx5_esw_for_each_rep(esw, i, rep) {
3654 if (likely(mlx5_eswitch_vport_has_rep(esw, rep->vport))) {
3656 rep_data = &rep->rep_data[rep_type];
3657 atomic_set(&rep_data->state, REP_REGISTERED);
3661 EXPORT_SYMBOL(mlx5_eswitch_register_vport_reps);
3663 void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type)
3665 struct mlx5_eswitch_rep *rep;
3668 if (esw->mode == MLX5_ESWITCH_OFFLOADS)
3669 __unload_reps_all_vport(esw, rep_type);
3671 mlx5_esw_for_each_rep(esw, i, rep)
3672 atomic_set(&rep->rep_data[rep_type].state, REP_UNREGISTERED);
3674 EXPORT_SYMBOL(mlx5_eswitch_unregister_vport_reps);
3676 void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type)
3678 struct mlx5_eswitch_rep *rep;
3680 rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK);
3681 return rep->rep_data[rep_type].priv;
3684 void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw,
3688 struct mlx5_eswitch_rep *rep;
3690 rep = mlx5_eswitch_get_rep(esw, vport);
3692 if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED &&
3693 esw->offloads.rep_ops[rep_type]->get_proto_dev)
3694 return esw->offloads.rep_ops[rep_type]->get_proto_dev(rep);
3697 EXPORT_SYMBOL(mlx5_eswitch_get_proto_dev);
3699 void *mlx5_eswitch_uplink_get_proto_dev(struct mlx5_eswitch *esw, u8 rep_type)
3701 return mlx5_eswitch_get_proto_dev(esw, MLX5_VPORT_UPLINK, rep_type);
3703 EXPORT_SYMBOL(mlx5_eswitch_uplink_get_proto_dev);
3705 struct mlx5_eswitch_rep *mlx5_eswitch_vport_rep(struct mlx5_eswitch *esw,
3708 return mlx5_eswitch_get_rep(esw, vport);
3710 EXPORT_SYMBOL(mlx5_eswitch_vport_rep);
3712 bool mlx5_eswitch_reg_c1_loopback_enabled(const struct mlx5_eswitch *esw)
3714 return !!(esw->flags & MLX5_ESWITCH_REG_C1_LOOPBACK_ENABLED);
3716 EXPORT_SYMBOL(mlx5_eswitch_reg_c1_loopback_enabled);
3718 bool mlx5_eswitch_vport_match_metadata_enabled(const struct mlx5_eswitch *esw)
3720 return !!(esw->flags & MLX5_ESWITCH_VPORT_MATCH_METADATA);
3722 EXPORT_SYMBOL(mlx5_eswitch_vport_match_metadata_enabled);
3724 u32 mlx5_eswitch_get_vport_metadata_for_match(struct mlx5_eswitch *esw,
3727 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3729 if (WARN_ON_ONCE(IS_ERR(vport)))
3732 return vport->metadata << (32 - ESW_SOURCE_PORT_METADATA_BITS);
3734 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_match);
3736 int mlx5_esw_offloads_sf_vport_enable(struct mlx5_eswitch *esw, struct devlink_port *dl_port,
3737 u16 vport_num, u32 controller, u32 sfnum)
3741 err = mlx5_esw_vport_enable(esw, vport_num, MLX5_VPORT_UC_ADDR_CHANGE);
3745 err = mlx5_esw_devlink_sf_port_register(esw, dl_port, vport_num, controller, sfnum);
3749 err = mlx5_esw_offloads_rep_load(esw, vport_num);
3755 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3757 mlx5_esw_vport_disable(esw, vport_num);
3761 void mlx5_esw_offloads_sf_vport_disable(struct mlx5_eswitch *esw, u16 vport_num)
3763 mlx5_esw_offloads_rep_unload(esw, vport_num);
3764 mlx5_esw_devlink_sf_port_unregister(esw, vport_num);
3765 mlx5_esw_vport_disable(esw, vport_num);
3768 static int mlx5_esw_query_vport_vhca_id(struct mlx5_eswitch *esw, u16 vport_num, u16 *vhca_id)
3770 int query_out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
3776 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
3777 !MLX5_CAP_GEN(esw->dev, vhca_resource_manager))
3780 query_ctx = kzalloc(query_out_sz, GFP_KERNEL);
3784 err = mlx5_vport_get_other_func_cap(esw->dev, vport_num, query_ctx);
3788 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
3789 *vhca_id = MLX5_GET(cmd_hca_cap, hca_caps, vhca_id);
3796 int mlx5_esw_vport_vhca_id_set(struct mlx5_eswitch *esw, u16 vport_num)
3798 u16 *old_entry, *vhca_map_entry, vhca_id;
3801 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3803 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%u,err=%d)\n",
3808 vhca_map_entry = kmalloc(sizeof(*vhca_map_entry), GFP_KERNEL);
3809 if (!vhca_map_entry)
3812 *vhca_map_entry = vport_num;
3813 old_entry = xa_store(&esw->offloads.vhca_map, vhca_id, vhca_map_entry, GFP_KERNEL);
3814 if (xa_is_err(old_entry)) {
3815 kfree(vhca_map_entry);
3816 return xa_err(old_entry);
3822 void mlx5_esw_vport_vhca_id_clear(struct mlx5_eswitch *esw, u16 vport_num)
3824 u16 *vhca_map_entry, vhca_id;
3827 err = mlx5_esw_query_vport_vhca_id(esw, vport_num, &vhca_id);
3829 esw_warn(esw->dev, "Getting vhca_id for vport failed (vport=%hu,err=%d)\n",
3832 vhca_map_entry = xa_erase(&esw->offloads.vhca_map, vhca_id);
3833 kfree(vhca_map_entry);
3836 int mlx5_eswitch_vhca_id_to_vport(struct mlx5_eswitch *esw, u16 vhca_id, u16 *vport_num)
3838 u16 *res = xa_load(&esw->offloads.vhca_map, vhca_id);
3847 u32 mlx5_eswitch_get_vport_metadata_for_set(struct mlx5_eswitch *esw,
3850 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
3852 if (WARN_ON_ONCE(IS_ERR(vport)))
3855 return vport->metadata;
3857 EXPORT_SYMBOL(mlx5_eswitch_get_vport_metadata_for_set);