1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */
4 #include "lib/devcom.h"
7 #include "bridge_priv.h"
8 #include "diag/bridge_tracepoint.h"
10 static const struct rhashtable_params mdb_ht_params = {
11 .key_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, key),
12 .key_len = sizeof(struct mlx5_esw_bridge_mdb_key),
13 .head_offset = offsetof(struct mlx5_esw_bridge_mdb_entry, ht_node),
14 .automatic_shrinking = true,
17 int mlx5_esw_bridge_mdb_init(struct mlx5_esw_bridge *bridge)
19 INIT_LIST_HEAD(&bridge->mdb_list);
20 return rhashtable_init(&bridge->mdb_ht, &mdb_ht_params);
23 void mlx5_esw_bridge_mdb_cleanup(struct mlx5_esw_bridge *bridge)
25 rhashtable_destroy(&bridge->mdb_ht);
28 static struct mlx5_esw_bridge_port *
29 mlx5_esw_bridge_mdb_port_lookup(struct mlx5_esw_bridge_port *port,
30 struct mlx5_esw_bridge_mdb_entry *entry)
32 return xa_load(&entry->ports, mlx5_esw_bridge_port_key(port));
35 static int mlx5_esw_bridge_mdb_port_insert(struct mlx5_esw_bridge_port *port,
36 struct mlx5_esw_bridge_mdb_entry *entry)
38 int err = xa_insert(&entry->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL);
45 static void mlx5_esw_bridge_mdb_port_remove(struct mlx5_esw_bridge_port *port,
46 struct mlx5_esw_bridge_mdb_entry *entry)
48 xa_erase(&entry->ports, mlx5_esw_bridge_port_key(port));
52 static struct mlx5_flow_handle *
53 mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_mdb_entry *entry,
54 struct mlx5_esw_bridge *bridge)
56 struct mlx5_flow_act flow_act = {
57 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
58 .flags = FLOW_ACT_NO_APPEND | FLOW_ACT_IGNORE_FLOW_LEVEL,
60 int num_dests = entry->num_ports, i = 0;
61 struct mlx5_flow_destination *dests;
62 struct mlx5_esw_bridge_port *port;
63 struct mlx5_flow_spec *rule_spec;
64 struct mlx5_flow_handle *handle;
68 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
70 return ERR_PTR(-ENOMEM);
72 dests = kvcalloc(num_dests, sizeof(*dests), GFP_KERNEL);
75 return ERR_PTR(-ENOMEM);
78 xa_for_each(&entry->ports, idx, port) {
79 dests[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
80 dests[i].ft = port->mcast.ft;
81 if (port->vport_num == MLX5_VPORT_UPLINK)
82 dests[i].ft->flags |= MLX5_FLOW_TABLE_UPLINK_VPORT;
86 rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
87 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
88 dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
89 ether_addr_copy(dmac_v, entry->key.addr);
90 dmac_c = MLX5_ADDR_OF(fte_match_param, rule_spec->match_criteria, outer_headers.dmac_47_16);
91 eth_broadcast_addr(dmac_c);
94 if (bridge->vlan_proto == ETH_P_8021Q) {
95 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
96 outer_headers.cvlan_tag);
97 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
98 outer_headers.cvlan_tag);
99 } else if (bridge->vlan_proto == ETH_P_8021AD) {
100 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
101 outer_headers.svlan_tag);
102 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
103 outer_headers.svlan_tag);
105 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
106 outer_headers.first_vid);
107 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid,
111 handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, dests, num_dests);
119 mlx5_esw_bridge_port_mdb_offload(struct mlx5_esw_bridge_port *port,
120 struct mlx5_esw_bridge_mdb_entry *entry)
122 struct mlx5_flow_handle *handle;
124 handle = mlx5_esw_bridge_mdb_flow_create(port->esw_owner_vhca_id, entry, port->bridge);
125 if (entry->egress_handle) {
126 mlx5_del_flow_rules(entry->egress_handle);
127 entry->egress_handle = NULL;
130 return PTR_ERR(handle);
132 entry->egress_handle = handle;
136 static struct mlx5_esw_bridge_mdb_entry *
137 mlx5_esw_bridge_mdb_lookup(struct mlx5_esw_bridge *bridge,
138 const unsigned char *addr, u16 vid)
140 struct mlx5_esw_bridge_mdb_key key = {};
142 ether_addr_copy(key.addr, addr);
144 return rhashtable_lookup_fast(&bridge->mdb_ht, &key, mdb_ht_params);
147 static struct mlx5_esw_bridge_mdb_entry *
148 mlx5_esw_bridge_port_mdb_entry_init(struct mlx5_esw_bridge_port *port,
149 const unsigned char *addr, u16 vid)
151 struct mlx5_esw_bridge *bridge = port->bridge;
152 struct mlx5_esw_bridge_mdb_entry *entry;
155 entry = kvzalloc(sizeof(*entry), GFP_KERNEL);
157 return ERR_PTR(-ENOMEM);
159 ether_addr_copy(entry->key.addr, addr);
160 entry->key.vid = vid;
161 xa_init(&entry->ports);
162 err = rhashtable_insert_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
166 list_add(&entry->list, &bridge->mdb_list);
171 xa_destroy(&entry->ports);
176 static void mlx5_esw_bridge_port_mdb_entry_cleanup(struct mlx5_esw_bridge *bridge,
177 struct mlx5_esw_bridge_mdb_entry *entry)
179 if (entry->egress_handle)
180 mlx5_del_flow_rules(entry->egress_handle);
181 list_del(&entry->list);
182 rhashtable_remove_fast(&bridge->mdb_ht, &entry->ht_node, mdb_ht_params);
183 xa_destroy(&entry->ports);
187 int mlx5_esw_bridge_port_mdb_attach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
188 const unsigned char *addr, u16 vid)
190 struct mlx5_esw_bridge *bridge = port->bridge;
191 struct mlx5_esw_bridge_mdb_entry *entry;
194 if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
197 entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
199 if (mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
200 esw_warn(bridge->br_offloads->esw->dev, "MDB attach entry is already attached to port (MAC=%pM,vid=%u,vport=%u)\n",
201 addr, vid, port->vport_num);
205 entry = mlx5_esw_bridge_port_mdb_entry_init(port, addr, vid);
207 err = PTR_ERR(entry);
208 esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to init entry (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
209 addr, vid, port->vport_num, err);
214 err = mlx5_esw_bridge_mdb_port_insert(port, entry);
216 if (!entry->num_ports)
217 mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry); /* new mdb entry */
218 esw_warn(bridge->br_offloads->esw->dev,
219 "MDB attach failed to insert port (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
220 addr, vid, port->vport_num, err);
224 err = mlx5_esw_bridge_port_mdb_offload(port, entry);
226 /* Single mdb can be used by multiple ports, so just log the
227 * error and continue.
229 esw_warn(bridge->br_offloads->esw->dev, "MDB attach failed to offload (MAC=%pM,vid=%u,vport=%u,err=%d)\n",
230 addr, vid, port->vport_num, err);
232 trace_mlx5_esw_bridge_port_mdb_attach(dev, entry);
236 static void mlx5_esw_bridge_port_mdb_entry_detach(struct mlx5_esw_bridge_port *port,
237 struct mlx5_esw_bridge_mdb_entry *entry)
239 struct mlx5_esw_bridge *bridge = port->bridge;
242 mlx5_esw_bridge_mdb_port_remove(port, entry);
243 if (!entry->num_ports) {
244 mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
248 err = mlx5_esw_bridge_port_mdb_offload(port, entry);
250 /* Single mdb can be used by multiple ports, so just log the
251 * error and continue.
253 esw_warn(bridge->br_offloads->esw->dev, "MDB detach failed to offload (MAC=%pM,vid=%u,vport=%u)\n",
254 entry->key.addr, entry->key.vid, port->vport_num);
257 void mlx5_esw_bridge_port_mdb_detach(struct net_device *dev, struct mlx5_esw_bridge_port *port,
258 const unsigned char *addr, u16 vid)
260 struct mlx5_esw_bridge *bridge = port->bridge;
261 struct mlx5_esw_bridge_mdb_entry *entry;
263 entry = mlx5_esw_bridge_mdb_lookup(bridge, addr, vid);
265 esw_debug(bridge->br_offloads->esw->dev,
266 "MDB detach entry not found (MAC=%pM,vid=%u,vport=%u)\n",
267 addr, vid, port->vport_num);
271 if (!mlx5_esw_bridge_mdb_port_lookup(port, entry)) {
272 esw_debug(bridge->br_offloads->esw->dev,
273 "MDB detach entry not attached to the port (MAC=%pM,vid=%u,vport=%u)\n",
274 addr, vid, port->vport_num);
278 trace_mlx5_esw_bridge_port_mdb_detach(dev, entry);
279 mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
282 void mlx5_esw_bridge_port_mdb_vlan_flush(struct mlx5_esw_bridge_port *port,
283 struct mlx5_esw_bridge_vlan *vlan)
285 struct mlx5_esw_bridge *bridge = port->bridge;
286 struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
288 list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
289 if (entry->key.vid == vlan->vid && mlx5_esw_bridge_mdb_port_lookup(port, entry))
290 mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
293 static void mlx5_esw_bridge_port_mdb_flush(struct mlx5_esw_bridge_port *port)
295 struct mlx5_esw_bridge *bridge = port->bridge;
296 struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
298 list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
299 if (mlx5_esw_bridge_mdb_port_lookup(port, entry))
300 mlx5_esw_bridge_port_mdb_entry_detach(port, entry);
303 void mlx5_esw_bridge_mdb_flush(struct mlx5_esw_bridge *bridge)
305 struct mlx5_esw_bridge_mdb_entry *entry, *tmp;
307 list_for_each_entry_safe(entry, tmp, &bridge->mdb_list, list)
308 mlx5_esw_bridge_port_mdb_entry_cleanup(bridge, entry);
310 static int mlx5_esw_bridge_port_mcast_fts_init(struct mlx5_esw_bridge_port *port,
311 struct mlx5_esw_bridge *bridge)
313 struct mlx5_eswitch *esw = bridge->br_offloads->esw;
314 struct mlx5_flow_table *mcast_ft;
316 mcast_ft = mlx5_esw_bridge_table_create(MLX5_ESW_BRIDGE_MCAST_TABLE_SIZE,
317 MLX5_ESW_BRIDGE_LEVEL_MCAST_TABLE,
319 if (IS_ERR(mcast_ft))
320 return PTR_ERR(mcast_ft);
322 port->mcast.ft = mcast_ft;
326 static void mlx5_esw_bridge_port_mcast_fts_cleanup(struct mlx5_esw_bridge_port *port)
329 mlx5_destroy_flow_table(port->mcast.ft);
330 port->mcast.ft = NULL;
333 static struct mlx5_flow_group *
334 mlx5_esw_bridge_mcast_filter_fg_create(struct mlx5_eswitch *esw,
335 struct mlx5_flow_table *mcast_ft)
337 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
338 struct mlx5_flow_group *fg;
341 in = kvzalloc(inlen, GFP_KERNEL);
343 return ERR_PTR(-ENOMEM);
345 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_MISC_PARAMETERS_2);
346 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
348 MLX5_SET(fte_match_param, match, misc_parameters_2.metadata_reg_c_0,
349 mlx5_eswitch_get_vport_metadata_mask());
351 MLX5_SET(create_flow_group_in, in, start_flow_index,
352 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_FROM);
353 MLX5_SET(create_flow_group_in, in, end_flow_index,
354 MLX5_ESW_BRIDGE_MCAST_TABLE_FILTER_GRP_IDX_TO);
356 fg = mlx5_create_flow_group(mcast_ft, in);
360 "Failed to create filter flow group for bridge mcast table (err=%pe)\n",
366 static struct mlx5_flow_group *
367 mlx5_esw_bridge_mcast_vlan_proto_fg_create(unsigned int from, unsigned int to, u16 vlan_proto,
368 struct mlx5_eswitch *esw,
369 struct mlx5_flow_table *mcast_ft)
371 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
372 struct mlx5_flow_group *fg;
375 in = kvzalloc(inlen, GFP_KERNEL);
377 return ERR_PTR(-ENOMEM);
379 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
380 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
382 if (vlan_proto == ETH_P_8021Q)
383 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.cvlan_tag);
384 else if (vlan_proto == ETH_P_8021AD)
385 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.svlan_tag);
386 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.first_vid);
388 MLX5_SET(create_flow_group_in, in, start_flow_index, from);
389 MLX5_SET(create_flow_group_in, in, end_flow_index, to);
391 fg = mlx5_create_flow_group(mcast_ft, in);
395 "Failed to create VLAN(proto=%x) flow group for bridge mcast table (err=%pe)\n",
401 static struct mlx5_flow_group *
402 mlx5_esw_bridge_mcast_vlan_fg_create(struct mlx5_eswitch *esw, struct mlx5_flow_table *mcast_ft)
404 unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_FROM;
405 unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_VLAN_GRP_IDX_TO;
407 return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021Q, esw, mcast_ft);
410 static struct mlx5_flow_group *
411 mlx5_esw_bridge_mcast_qinq_fg_create(struct mlx5_eswitch *esw,
412 struct mlx5_flow_table *mcast_ft)
414 unsigned int from = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_FROM;
415 unsigned int to = MLX5_ESW_BRIDGE_MCAST_TABLE_QINQ_GRP_IDX_TO;
417 return mlx5_esw_bridge_mcast_vlan_proto_fg_create(from, to, ETH_P_8021AD, esw, mcast_ft);
420 static struct mlx5_flow_group *
421 mlx5_esw_bridge_mcast_fwd_fg_create(struct mlx5_eswitch *esw,
422 struct mlx5_flow_table *mcast_ft)
424 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
425 struct mlx5_flow_group *fg;
428 in = kvzalloc(inlen, GFP_KERNEL);
430 return ERR_PTR(-ENOMEM);
432 MLX5_SET(create_flow_group_in, in, start_flow_index,
433 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_FROM);
434 MLX5_SET(create_flow_group_in, in, end_flow_index,
435 MLX5_ESW_BRIDGE_MCAST_TABLE_FWD_GRP_IDX_TO);
437 fg = mlx5_create_flow_group(mcast_ft, in);
441 "Failed to create forward flow group for bridge mcast table (err=%pe)\n",
447 static int mlx5_esw_bridge_port_mcast_fgs_init(struct mlx5_esw_bridge_port *port)
449 struct mlx5_flow_group *fwd_fg, *qinq_fg, *vlan_fg, *filter_fg;
450 struct mlx5_eswitch *esw = port->bridge->br_offloads->esw;
451 struct mlx5_flow_table *mcast_ft = port->mcast.ft;
454 filter_fg = mlx5_esw_bridge_mcast_filter_fg_create(esw, mcast_ft);
455 if (IS_ERR(filter_fg))
456 return PTR_ERR(filter_fg);
458 vlan_fg = mlx5_esw_bridge_mcast_vlan_fg_create(esw, mcast_ft);
459 if (IS_ERR(vlan_fg)) {
460 err = PTR_ERR(vlan_fg);
464 qinq_fg = mlx5_esw_bridge_mcast_qinq_fg_create(esw, mcast_ft);
465 if (IS_ERR(qinq_fg)) {
466 err = PTR_ERR(qinq_fg);
470 fwd_fg = mlx5_esw_bridge_mcast_fwd_fg_create(esw, mcast_ft);
471 if (IS_ERR(fwd_fg)) {
472 err = PTR_ERR(fwd_fg);
476 port->mcast.filter_fg = filter_fg;
477 port->mcast.vlan_fg = vlan_fg;
478 port->mcast.qinq_fg = qinq_fg;
479 port->mcast.fwd_fg = fwd_fg;
484 mlx5_destroy_flow_group(qinq_fg);
486 mlx5_destroy_flow_group(vlan_fg);
488 mlx5_destroy_flow_group(filter_fg);
492 static void mlx5_esw_bridge_port_mcast_fgs_cleanup(struct mlx5_esw_bridge_port *port)
494 if (port->mcast.fwd_fg)
495 mlx5_destroy_flow_group(port->mcast.fwd_fg);
496 port->mcast.fwd_fg = NULL;
497 if (port->mcast.qinq_fg)
498 mlx5_destroy_flow_group(port->mcast.qinq_fg);
499 port->mcast.qinq_fg = NULL;
500 if (port->mcast.vlan_fg)
501 mlx5_destroy_flow_group(port->mcast.vlan_fg);
502 port->mcast.vlan_fg = NULL;
503 if (port->mcast.filter_fg)
504 mlx5_destroy_flow_group(port->mcast.filter_fg);
505 port->mcast.filter_fg = NULL;
508 static struct mlx5_flow_handle *
509 mlx5_esw_bridge_mcast_flow_with_esw_create(struct mlx5_esw_bridge_port *port,
510 struct mlx5_eswitch *esw)
512 struct mlx5_flow_act flow_act = {
513 .action = MLX5_FLOW_CONTEXT_ACTION_DROP,
514 .flags = FLOW_ACT_NO_APPEND,
516 struct mlx5_flow_spec *rule_spec;
517 struct mlx5_flow_handle *handle;
519 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
521 return ERR_PTR(-ENOMEM);
523 rule_spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS_2;
525 MLX5_SET(fte_match_param, rule_spec->match_criteria,
526 misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask());
527 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0,
528 mlx5_eswitch_get_vport_metadata_for_match(esw, port->vport_num));
530 handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, NULL, 0);
536 static struct mlx5_flow_handle *
537 mlx5_esw_bridge_mcast_filter_flow_create(struct mlx5_esw_bridge_port *port)
539 return mlx5_esw_bridge_mcast_flow_with_esw_create(port, port->bridge->br_offloads->esw);
542 static struct mlx5_flow_handle *
543 mlx5_esw_bridge_mcast_filter_flow_peer_create(struct mlx5_esw_bridge_port *port)
545 struct mlx5_devcom_comp_dev *devcom = port->bridge->br_offloads->esw->devcom, *pos;
546 struct mlx5_eswitch *tmp, *peer_esw = NULL;
547 static struct mlx5_flow_handle *handle;
549 if (!mlx5_devcom_for_each_peer_begin(devcom))
550 return ERR_PTR(-ENODEV);
552 mlx5_devcom_for_each_peer_entry(devcom, tmp, pos) {
553 if (mlx5_esw_is_owner(tmp, port->vport_num, port->esw_owner_vhca_id)) {
560 handle = ERR_PTR(-ENODEV);
564 handle = mlx5_esw_bridge_mcast_flow_with_esw_create(port, peer_esw);
567 mlx5_devcom_for_each_peer_end(devcom);
571 static struct mlx5_flow_handle *
572 mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
573 struct mlx5_esw_bridge_vlan *vlan)
575 struct mlx5_flow_act flow_act = {
576 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
577 .flags = FLOW_ACT_NO_APPEND,
579 struct mlx5_flow_destination dest = {
580 .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
581 .vport.num = port->vport_num,
583 struct mlx5_esw_bridge *bridge = port->bridge;
584 struct mlx5_flow_spec *rule_spec;
585 struct mlx5_flow_handle *handle;
587 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
589 return ERR_PTR(-ENOMEM);
591 rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
592 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
594 flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
595 flow_act.pkt_reformat = vlan->pkt_reformat_pop;
597 if (vlan_proto == ETH_P_8021Q) {
598 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
599 outer_headers.cvlan_tag);
600 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
601 outer_headers.cvlan_tag);
602 } else if (vlan_proto == ETH_P_8021AD) {
603 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria,
604 outer_headers.svlan_tag);
605 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_value,
606 outer_headers.svlan_tag);
608 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.first_vid);
609 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.first_vid, vlan->vid);
611 if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
612 dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
613 dest.vport.vhca_id = port->esw_owner_vhca_id;
615 handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
621 int mlx5_esw_bridge_vlan_mcast_init(u16 vlan_proto, struct mlx5_esw_bridge_port *port,
622 struct mlx5_esw_bridge_vlan *vlan)
624 struct mlx5_flow_handle *handle;
626 if (!(port->bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
629 handle = mlx5_esw_bridge_mcast_vlan_flow_create(vlan_proto, port, vlan);
631 return PTR_ERR(handle);
633 vlan->mcast_handle = handle;
637 void mlx5_esw_bridge_vlan_mcast_cleanup(struct mlx5_esw_bridge_vlan *vlan)
639 if (vlan->mcast_handle)
640 mlx5_del_flow_rules(vlan->mcast_handle);
641 vlan->mcast_handle = NULL;
644 static struct mlx5_flow_handle *
645 mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
647 struct mlx5_flow_act flow_act = {
648 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
649 .flags = FLOW_ACT_NO_APPEND,
651 struct mlx5_flow_destination dest = {
652 .type = MLX5_FLOW_DESTINATION_TYPE_VPORT,
653 .vport.num = port->vport_num,
655 struct mlx5_esw_bridge *bridge = port->bridge;
656 struct mlx5_flow_spec *rule_spec;
657 struct mlx5_flow_handle *handle;
659 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
661 return ERR_PTR(-ENOMEM);
663 if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) {
664 dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
665 dest.vport.vhca_id = port->esw_owner_vhca_id;
667 rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
668 handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
674 static int mlx5_esw_bridge_port_mcast_fhs_init(struct mlx5_esw_bridge_port *port)
676 struct mlx5_flow_handle *filter_handle, *fwd_handle;
677 struct mlx5_esw_bridge_vlan *vlan, *failed;
682 filter_handle = (port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER) ?
683 mlx5_esw_bridge_mcast_filter_flow_peer_create(port) :
684 mlx5_esw_bridge_mcast_filter_flow_create(port);
685 if (IS_ERR(filter_handle))
686 return PTR_ERR(filter_handle);
688 fwd_handle = mlx5_esw_bridge_mcast_fwd_flow_create(port);
689 if (IS_ERR(fwd_handle)) {
690 err = PTR_ERR(fwd_handle);
694 xa_for_each(&port->vlans, index, vlan) {
695 err = mlx5_esw_bridge_vlan_mcast_init(port->bridge->vlan_proto, port, vlan);
702 port->mcast.filter_handle = filter_handle;
703 port->mcast.fwd_handle = fwd_handle;
708 xa_for_each(&port->vlans, index, vlan) {
712 mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
714 mlx5_del_flow_rules(fwd_handle);
716 mlx5_del_flow_rules(filter_handle);
720 static void mlx5_esw_bridge_port_mcast_fhs_cleanup(struct mlx5_esw_bridge_port *port)
722 struct mlx5_esw_bridge_vlan *vlan;
725 xa_for_each(&port->vlans, index, vlan)
726 mlx5_esw_bridge_vlan_mcast_cleanup(vlan);
728 if (port->mcast.fwd_handle)
729 mlx5_del_flow_rules(port->mcast.fwd_handle);
730 port->mcast.fwd_handle = NULL;
731 if (port->mcast.filter_handle)
732 mlx5_del_flow_rules(port->mcast.filter_handle);
733 port->mcast.filter_handle = NULL;
736 int mlx5_esw_bridge_port_mcast_init(struct mlx5_esw_bridge_port *port)
738 struct mlx5_esw_bridge *bridge = port->bridge;
741 if (!(bridge->flags & MLX5_ESW_BRIDGE_MCAST_FLAG))
744 err = mlx5_esw_bridge_port_mcast_fts_init(port, bridge);
748 err = mlx5_esw_bridge_port_mcast_fgs_init(port);
752 err = mlx5_esw_bridge_port_mcast_fhs_init(port);
758 mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
760 mlx5_esw_bridge_port_mcast_fts_cleanup(port);
764 void mlx5_esw_bridge_port_mcast_cleanup(struct mlx5_esw_bridge_port *port)
766 mlx5_esw_bridge_port_mdb_flush(port);
767 mlx5_esw_bridge_port_mcast_fhs_cleanup(port);
768 mlx5_esw_bridge_port_mcast_fgs_cleanup(port);
769 mlx5_esw_bridge_port_mcast_fts_cleanup(port);
772 static struct mlx5_flow_group *
773 mlx5_esw_bridge_ingress_igmp_fg_create(struct mlx5_eswitch *esw,
774 struct mlx5_flow_table *ingress_ft)
776 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
777 struct mlx5_flow_group *fg;
780 in = kvzalloc(inlen, GFP_KERNEL);
782 return ERR_PTR(-ENOMEM);
784 MLX5_SET(create_flow_group_in, in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
785 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
787 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
788 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_protocol);
790 MLX5_SET(create_flow_group_in, in, start_flow_index,
791 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_FROM);
792 MLX5_SET(create_flow_group_in, in, end_flow_index,
793 MLX5_ESW_BRIDGE_INGRESS_TABLE_IGMP_GRP_IDX_TO);
795 fg = mlx5_create_flow_group(ingress_ft, in);
799 "Failed to create IGMP flow group for bridge ingress table (err=%pe)\n",
805 static struct mlx5_flow_group *
806 mlx5_esw_bridge_ingress_mld_fg_create(struct mlx5_eswitch *esw,
807 struct mlx5_flow_table *ingress_ft)
809 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
810 struct mlx5_flow_group *fg;
813 if (!(MLX5_CAP_GEN(esw->dev, flex_parser_protocols) & MLX5_FLEX_PROTO_ICMPV6)) {
815 "Can't create MLD flow group due to missing hardware ICMPv6 parsing support\n");
819 in = kvzalloc(inlen, GFP_KERNEL);
821 return ERR_PTR(-ENOMEM);
823 MLX5_SET(create_flow_group_in, in, match_criteria_enable,
824 MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3);
825 match = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
827 MLX5_SET_TO_ONES(fte_match_param, match, outer_headers.ip_version);
828 MLX5_SET_TO_ONES(fte_match_param, match, misc_parameters_3.icmpv6_type);
830 MLX5_SET(create_flow_group_in, in, start_flow_index,
831 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_FROM);
832 MLX5_SET(create_flow_group_in, in, end_flow_index,
833 MLX5_ESW_BRIDGE_INGRESS_TABLE_MLD_GRP_IDX_TO);
835 fg = mlx5_create_flow_group(ingress_ft, in);
839 "Failed to create MLD flow group for bridge ingress table (err=%pe)\n",
846 mlx5_esw_bridge_ingress_mcast_fgs_init(struct mlx5_esw_bridge_offloads *br_offloads)
848 struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft;
849 struct mlx5_eswitch *esw = br_offloads->esw;
850 struct mlx5_flow_group *igmp_fg, *mld_fg;
852 igmp_fg = mlx5_esw_bridge_ingress_igmp_fg_create(esw, ingress_ft);
854 return PTR_ERR(igmp_fg);
856 mld_fg = mlx5_esw_bridge_ingress_mld_fg_create(esw, ingress_ft);
857 if (IS_ERR(mld_fg)) {
858 mlx5_destroy_flow_group(igmp_fg);
859 return PTR_ERR(mld_fg);
862 br_offloads->ingress_igmp_fg = igmp_fg;
863 br_offloads->ingress_mld_fg = mld_fg;
868 mlx5_esw_bridge_ingress_mcast_fgs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
870 if (br_offloads->ingress_mld_fg)
871 mlx5_destroy_flow_group(br_offloads->ingress_mld_fg);
872 br_offloads->ingress_mld_fg = NULL;
873 if (br_offloads->ingress_igmp_fg)
874 mlx5_destroy_flow_group(br_offloads->ingress_igmp_fg);
875 br_offloads->ingress_igmp_fg = NULL;
878 static struct mlx5_flow_handle *
879 mlx5_esw_bridge_ingress_igmp_fh_create(struct mlx5_flow_table *ingress_ft,
880 struct mlx5_flow_table *skip_ft)
882 struct mlx5_flow_destination dest = {
883 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
886 struct mlx5_flow_act flow_act = {
887 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
888 .flags = FLOW_ACT_NO_APPEND,
890 struct mlx5_flow_spec *rule_spec;
891 struct mlx5_flow_handle *handle;
893 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
895 return ERR_PTR(-ENOMEM);
897 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
899 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
900 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 4);
901 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_protocol);
902 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_protocol, IPPROTO_IGMP);
904 handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
910 static struct mlx5_flow_handle *
911 mlx5_esw_bridge_ingress_mld_fh_create(u8 type, struct mlx5_flow_table *ingress_ft,
912 struct mlx5_flow_table *skip_ft)
914 struct mlx5_flow_destination dest = {
915 .type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE,
918 struct mlx5_flow_act flow_act = {
919 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
920 .flags = FLOW_ACT_NO_APPEND,
922 struct mlx5_flow_spec *rule_spec;
923 struct mlx5_flow_handle *handle;
925 rule_spec = kvzalloc(sizeof(*rule_spec), GFP_KERNEL);
927 return ERR_PTR(-ENOMEM);
929 rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS | MLX5_MATCH_MISC_PARAMETERS_3;
931 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, outer_headers.ip_version);
932 MLX5_SET(fte_match_param, rule_spec->match_value, outer_headers.ip_version, 6);
933 MLX5_SET_TO_ONES(fte_match_param, rule_spec->match_criteria, misc_parameters_3.icmpv6_type);
934 MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_3.icmpv6_type, type);
936 handle = mlx5_add_flow_rules(ingress_ft, rule_spec, &flow_act, &dest, 1);
943 mlx5_esw_bridge_ingress_mcast_fhs_create(struct mlx5_esw_bridge_offloads *br_offloads)
945 struct mlx5_flow_handle *igmp_handle, *mld_query_handle, *mld_report_handle,
947 struct mlx5_flow_table *ingress_ft = br_offloads->ingress_ft,
948 *skip_ft = br_offloads->skip_ft;
951 igmp_handle = mlx5_esw_bridge_ingress_igmp_fh_create(ingress_ft, skip_ft);
952 if (IS_ERR(igmp_handle))
953 return PTR_ERR(igmp_handle);
955 if (br_offloads->ingress_mld_fg) {
956 mld_query_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_QUERY,
959 if (IS_ERR(mld_query_handle)) {
960 err = PTR_ERR(mld_query_handle);
964 mld_report_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REPORT,
967 if (IS_ERR(mld_report_handle)) {
968 err = PTR_ERR(mld_report_handle);
972 mld_done_handle = mlx5_esw_bridge_ingress_mld_fh_create(ICMPV6_MGM_REDUCTION,
975 if (IS_ERR(mld_done_handle)) {
976 err = PTR_ERR(mld_done_handle);
980 mld_query_handle = NULL;
981 mld_report_handle = NULL;
982 mld_done_handle = NULL;
985 br_offloads->igmp_handle = igmp_handle;
986 br_offloads->mld_query_handle = mld_query_handle;
987 br_offloads->mld_report_handle = mld_report_handle;
988 br_offloads->mld_done_handle = mld_done_handle;
993 mlx5_del_flow_rules(mld_report_handle);
995 mlx5_del_flow_rules(mld_query_handle);
997 mlx5_del_flow_rules(igmp_handle);
1002 mlx5_esw_bridge_ingress_mcast_fhs_cleanup(struct mlx5_esw_bridge_offloads *br_offloads)
1004 if (br_offloads->mld_done_handle)
1005 mlx5_del_flow_rules(br_offloads->mld_done_handle);
1006 br_offloads->mld_done_handle = NULL;
1007 if (br_offloads->mld_report_handle)
1008 mlx5_del_flow_rules(br_offloads->mld_report_handle);
1009 br_offloads->mld_report_handle = NULL;
1010 if (br_offloads->mld_query_handle)
1011 mlx5_del_flow_rules(br_offloads->mld_query_handle);
1012 br_offloads->mld_query_handle = NULL;
1013 if (br_offloads->igmp_handle)
1014 mlx5_del_flow_rules(br_offloads->igmp_handle);
1015 br_offloads->igmp_handle = NULL;
1018 static int mlx5_esw_brige_mcast_init(struct mlx5_esw_bridge *bridge)
1020 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1021 struct mlx5_esw_bridge_port *port, *failed;
1025 xa_for_each(&br_offloads->ports, i, port) {
1026 if (port->bridge != bridge)
1029 err = mlx5_esw_bridge_port_mcast_init(port);
1038 xa_for_each(&br_offloads->ports, i, port) {
1041 if (port->bridge != bridge)
1044 mlx5_esw_bridge_port_mcast_cleanup(port);
1049 static void mlx5_esw_brige_mcast_cleanup(struct mlx5_esw_bridge *bridge)
1051 struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads;
1052 struct mlx5_esw_bridge_port *port;
1055 xa_for_each(&br_offloads->ports, i, port) {
1056 if (port->bridge != bridge)
1059 mlx5_esw_bridge_port_mcast_cleanup(port);
1063 static int mlx5_esw_brige_mcast_global_enable(struct mlx5_esw_bridge_offloads *br_offloads)
1067 if (br_offloads->ingress_igmp_fg)
1068 return 0; /* already enabled by another bridge */
1070 err = mlx5_esw_bridge_ingress_mcast_fgs_init(br_offloads);
1072 esw_warn(br_offloads->esw->dev,
1073 "Failed to create global multicast flow groups (err=%d)\n",
1078 err = mlx5_esw_bridge_ingress_mcast_fhs_create(br_offloads);
1080 esw_warn(br_offloads->esw->dev,
1081 "Failed to create global multicast flows (err=%d)\n",
1089 mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1093 static void mlx5_esw_brige_mcast_global_disable(struct mlx5_esw_bridge_offloads *br_offloads)
1095 struct mlx5_esw_bridge *br;
1097 list_for_each_entry(br, &br_offloads->bridges, list) {
1098 /* Ingress table is global, so only disable snooping when all
1099 * bridges on esw have multicast disabled.
1101 if (br->flags & MLX5_ESW_BRIDGE_MCAST_FLAG)
1105 mlx5_esw_bridge_ingress_mcast_fhs_cleanup(br_offloads);
1106 mlx5_esw_bridge_ingress_mcast_fgs_cleanup(br_offloads);
1109 int mlx5_esw_bridge_mcast_enable(struct mlx5_esw_bridge *bridge)
1113 err = mlx5_esw_brige_mcast_global_enable(bridge->br_offloads);
1117 bridge->flags |= MLX5_ESW_BRIDGE_MCAST_FLAG;
1119 err = mlx5_esw_brige_mcast_init(bridge);
1121 esw_warn(bridge->br_offloads->esw->dev, "Failed to enable multicast (err=%d)\n",
1123 bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1124 mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);
1129 void mlx5_esw_bridge_mcast_disable(struct mlx5_esw_bridge *bridge)
1131 mlx5_esw_brige_mcast_cleanup(bridge);
1132 bridge->flags &= ~MLX5_ESW_BRIDGE_MCAST_FLAG;
1133 mlx5_esw_brige_mcast_global_disable(bridge->br_offloads);