2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/etherdevice.h>
34 #include <linux/mlx5/driver.h>
35 #include <linux/mlx5/mlx5_ifc.h>
36 #include <linux/mlx5/vport.h>
37 #include <linux/mlx5/fs.h>
38 #include "esw/acl/lgcy.h"
39 #include "mlx5_core.h"
45 #include "en/mod_hdr.h"
53 /* Vport UC/MC hash node */
55 struct l2addr_node node;
58 struct mlx5_flow_handle *flow_rule;
59 bool mpfs; /* UC MAC was added to MPFs */
60 /* A flag indicating that mac was added due to mc promiscuous vport */
64 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw);
65 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw);
67 static int mlx5_eswitch_check(const struct mlx5_core_dev *dev)
69 if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
72 if (!MLX5_ESWITCH_MANAGER(dev))
78 struct mlx5_eswitch *mlx5_devlink_eswitch_get(struct devlink *devlink)
80 struct mlx5_core_dev *dev = devlink_priv(devlink);
83 err = mlx5_eswitch_check(dev);
87 return dev->priv.eswitch;
90 struct mlx5_vport *__must_check
91 mlx5_eswitch_get_vport(struct mlx5_eswitch *esw, u16 vport_num)
95 if (!esw || !MLX5_CAP_GEN(esw->dev, vport_group_manager))
96 return ERR_PTR(-EPERM);
98 idx = mlx5_eswitch_vport_num_to_index(esw, vport_num);
100 if (idx > esw->total_vports - 1) {
101 esw_debug(esw->dev, "vport out of range: num(0x%x), idx(0x%x)\n",
103 return ERR_PTR(-EINVAL);
106 return &esw->vports[idx];
109 static int arm_vport_context_events_cmd(struct mlx5_core_dev *dev, u16 vport,
112 u32 in[MLX5_ST_SZ_DW(modify_nic_vport_context_in)] = {};
115 MLX5_SET(modify_nic_vport_context_in, in,
116 opcode, MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT);
117 MLX5_SET(modify_nic_vport_context_in, in, field_select.change_event, 1);
118 MLX5_SET(modify_nic_vport_context_in, in, vport_number, vport);
119 MLX5_SET(modify_nic_vport_context_in, in, other_vport, 1);
120 nic_vport_ctx = MLX5_ADDR_OF(modify_nic_vport_context_in,
121 in, nic_vport_context);
123 MLX5_SET(nic_vport_context, nic_vport_ctx, arm_change_event, 1);
125 if (events_mask & MLX5_VPORT_UC_ADDR_CHANGE)
126 MLX5_SET(nic_vport_context, nic_vport_ctx,
127 event_on_uc_address_change, 1);
128 if (events_mask & MLX5_VPORT_MC_ADDR_CHANGE)
129 MLX5_SET(nic_vport_context, nic_vport_ctx,
130 event_on_mc_address_change, 1);
131 if (events_mask & MLX5_VPORT_PROMISC_CHANGE)
132 MLX5_SET(nic_vport_context, nic_vport_ctx,
133 event_on_promisc_change, 1);
135 return mlx5_cmd_exec_in(dev, modify_nic_vport_context, in);
138 /* E-Switch vport context HW commands */
139 int mlx5_eswitch_modify_esw_vport_context(struct mlx5_core_dev *dev, u16 vport,
140 bool other_vport, void *in)
142 MLX5_SET(modify_esw_vport_context_in, in, opcode,
143 MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT);
144 MLX5_SET(modify_esw_vport_context_in, in, vport_number, vport);
145 MLX5_SET(modify_esw_vport_context_in, in, other_vport, other_vport);
146 return mlx5_cmd_exec_in(dev, modify_esw_vport_context, in);
149 static int modify_esw_vport_cvlan(struct mlx5_core_dev *dev, u16 vport,
150 u16 vlan, u8 qos, u8 set_flags)
152 u32 in[MLX5_ST_SZ_DW(modify_esw_vport_context_in)] = {};
154 if (!MLX5_CAP_ESW(dev, vport_cvlan_strip) ||
155 !MLX5_CAP_ESW(dev, vport_cvlan_insert_if_not_exist))
158 esw_debug(dev, "Set Vport[%d] VLAN %d qos %d set=%x\n",
159 vport, vlan, qos, set_flags);
161 if (set_flags & SET_VLAN_STRIP)
162 MLX5_SET(modify_esw_vport_context_in, in,
163 esw_vport_context.vport_cvlan_strip, 1);
165 if (set_flags & SET_VLAN_INSERT) {
166 /* insert only if no vlan in packet */
167 MLX5_SET(modify_esw_vport_context_in, in,
168 esw_vport_context.vport_cvlan_insert, 1);
170 MLX5_SET(modify_esw_vport_context_in, in,
171 esw_vport_context.cvlan_pcp, qos);
172 MLX5_SET(modify_esw_vport_context_in, in,
173 esw_vport_context.cvlan_id, vlan);
176 MLX5_SET(modify_esw_vport_context_in, in,
177 field_select.vport_cvlan_strip, 1);
178 MLX5_SET(modify_esw_vport_context_in, in,
179 field_select.vport_cvlan_insert, 1);
181 return mlx5_eswitch_modify_esw_vport_context(dev, vport, true, in);
185 static struct mlx5_flow_handle *
186 __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u16 vport, bool rx_rule,
187 u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN])
189 int match_header = (is_zero_ether_addr(mac_c) ? 0 :
190 MLX5_MATCH_OUTER_HEADERS);
191 struct mlx5_flow_handle *flow_rule = NULL;
192 struct mlx5_flow_act flow_act = {0};
193 struct mlx5_flow_destination dest = {};
194 struct mlx5_flow_spec *spec;
195 void *mv_misc = NULL;
196 void *mc_misc = NULL;
201 match_header |= MLX5_MATCH_MISC_PARAMETERS;
203 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
207 dmac_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
208 outer_headers.dmac_47_16);
209 dmac_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
210 outer_headers.dmac_47_16);
212 if (match_header & MLX5_MATCH_OUTER_HEADERS) {
213 ether_addr_copy(dmac_v, mac_v);
214 ether_addr_copy(dmac_c, mac_c);
217 if (match_header & MLX5_MATCH_MISC_PARAMETERS) {
218 mv_misc = MLX5_ADDR_OF(fte_match_param, spec->match_value,
220 mc_misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
222 MLX5_SET(fte_match_set_misc, mv_misc, source_port, MLX5_VPORT_UPLINK);
223 MLX5_SET_TO_ONES(fte_match_set_misc, mc_misc, source_port);
226 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
227 dest.vport.num = vport;
230 "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n",
231 dmac_v, dmac_c, vport);
232 spec->match_criteria_enable = match_header;
233 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
235 mlx5_add_flow_rules(esw->fdb_table.legacy.fdb, spec,
236 &flow_act, &dest, 1);
237 if (IS_ERR(flow_rule)) {
239 "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n",
240 dmac_v, dmac_c, vport, PTR_ERR(flow_rule));
248 static struct mlx5_flow_handle *
249 esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u16 vport)
253 eth_broadcast_addr(mac_c);
254 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac);
257 static struct mlx5_flow_handle *
258 esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u16 vport)
263 eth_zero_addr(mac_c);
264 eth_zero_addr(mac_v);
267 return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v);
270 static struct mlx5_flow_handle *
271 esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u16 vport)
276 eth_zero_addr(mac_c);
277 eth_zero_addr(mac_v);
278 return __esw_fdb_set_vport_rule(esw, vport, true, mac_c, mac_v);
282 LEGACY_VEPA_PRIO = 0,
286 static int esw_create_legacy_vepa_table(struct mlx5_eswitch *esw)
288 struct mlx5_flow_table_attr ft_attr = {};
289 struct mlx5_core_dev *dev = esw->dev;
290 struct mlx5_flow_namespace *root_ns;
291 struct mlx5_flow_table *fdb;
294 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
296 esw_warn(dev, "Failed to get FDB flow namespace\n");
300 /* num FTE 2, num FG 2 */
301 ft_attr.prio = LEGACY_VEPA_PRIO;
303 ft_attr.autogroup.max_num_groups = 2;
304 fdb = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr);
307 esw_warn(dev, "Failed to create VEPA FDB err %d\n", err);
310 esw->fdb_table.legacy.vepa_fdb = fdb;
315 static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw)
317 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
318 struct mlx5_flow_table_attr ft_attr = {};
319 struct mlx5_core_dev *dev = esw->dev;
320 struct mlx5_flow_namespace *root_ns;
321 struct mlx5_flow_table *fdb;
322 struct mlx5_flow_group *g;
323 void *match_criteria;
329 esw_debug(dev, "Create FDB log_max_size(%d)\n",
330 MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
332 root_ns = mlx5_get_fdb_sub_ns(dev, 0);
334 esw_warn(dev, "Failed to get FDB flow namespace\n");
338 flow_group_in = kvzalloc(inlen, GFP_KERNEL);
342 table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
343 ft_attr.max_fte = table_size;
344 ft_attr.prio = LEGACY_FDB_PRIO;
345 fdb = mlx5_create_flow_table(root_ns, &ft_attr);
348 esw_warn(dev, "Failed to create FDB Table err %d\n", err);
351 esw->fdb_table.legacy.fdb = fdb;
353 /* Addresses group : Full match unicast/multicast addresses */
354 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
355 MLX5_MATCH_OUTER_HEADERS);
356 match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
357 dmac = MLX5_ADDR_OF(fte_match_param, match_criteria, outer_headers.dmac_47_16);
358 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
359 /* Preserve 2 entries for allmulti and promisc rules*/
360 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 3);
361 eth_broadcast_addr(dmac);
362 g = mlx5_create_flow_group(fdb, flow_group_in);
365 esw_warn(dev, "Failed to create flow group err(%d)\n", err);
368 esw->fdb_table.legacy.addr_grp = g;
370 /* Allmulti group : One rule that forwards any mcast traffic */
371 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
372 MLX5_MATCH_OUTER_HEADERS);
373 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 2);
374 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 2);
377 g = mlx5_create_flow_group(fdb, flow_group_in);
380 esw_warn(dev, "Failed to create allmulti flow group err(%d)\n", err);
383 esw->fdb_table.legacy.allmulti_grp = g;
385 /* Promiscuous group :
386 * One rule that forward all unmatched traffic from previous groups
389 MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable,
390 MLX5_MATCH_MISC_PARAMETERS);
391 MLX5_SET_TO_ONES(fte_match_param, match_criteria, misc_parameters.source_port);
392 MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, table_size - 1);
393 MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, table_size - 1);
394 g = mlx5_create_flow_group(fdb, flow_group_in);
397 esw_warn(dev, "Failed to create promisc flow group err(%d)\n", err);
400 esw->fdb_table.legacy.promisc_grp = g;
404 esw_destroy_legacy_fdb_table(esw);
406 kvfree(flow_group_in);
410 static void esw_destroy_legacy_vepa_table(struct mlx5_eswitch *esw)
412 esw_debug(esw->dev, "Destroy VEPA Table\n");
413 if (!esw->fdb_table.legacy.vepa_fdb)
416 mlx5_destroy_flow_table(esw->fdb_table.legacy.vepa_fdb);
417 esw->fdb_table.legacy.vepa_fdb = NULL;
420 static void esw_destroy_legacy_fdb_table(struct mlx5_eswitch *esw)
422 esw_debug(esw->dev, "Destroy FDB Table\n");
423 if (!esw->fdb_table.legacy.fdb)
426 if (esw->fdb_table.legacy.promisc_grp)
427 mlx5_destroy_flow_group(esw->fdb_table.legacy.promisc_grp);
428 if (esw->fdb_table.legacy.allmulti_grp)
429 mlx5_destroy_flow_group(esw->fdb_table.legacy.allmulti_grp);
430 if (esw->fdb_table.legacy.addr_grp)
431 mlx5_destroy_flow_group(esw->fdb_table.legacy.addr_grp);
432 mlx5_destroy_flow_table(esw->fdb_table.legacy.fdb);
434 esw->fdb_table.legacy.fdb = NULL;
435 esw->fdb_table.legacy.addr_grp = NULL;
436 esw->fdb_table.legacy.allmulti_grp = NULL;
437 esw->fdb_table.legacy.promisc_grp = NULL;
440 static int esw_create_legacy_table(struct mlx5_eswitch *esw)
444 memset(&esw->fdb_table.legacy, 0, sizeof(struct legacy_fdb));
446 err = esw_create_legacy_vepa_table(esw);
450 err = esw_create_legacy_fdb_table(esw);
452 esw_destroy_legacy_vepa_table(esw);
457 static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
459 esw_cleanup_vepa_rules(esw);
460 esw_destroy_legacy_fdb_table(esw);
461 esw_destroy_legacy_vepa_table(esw);
464 #define MLX5_LEGACY_SRIOV_VPORT_EVENTS (MLX5_VPORT_UC_ADDR_CHANGE | \
465 MLX5_VPORT_MC_ADDR_CHANGE | \
466 MLX5_VPORT_PROMISC_CHANGE)
468 static int esw_legacy_enable(struct mlx5_eswitch *esw)
470 struct mlx5_vport *vport;
473 ret = esw_create_legacy_table(esw);
477 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
478 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
480 ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
482 esw_destroy_legacy_table(esw);
486 static void esw_legacy_disable(struct mlx5_eswitch *esw)
488 struct esw_mc_addr *mc_promisc;
490 mlx5_eswitch_disable_pf_vf_vports(esw);
492 mc_promisc = &esw->mc_promisc;
493 if (mc_promisc->uplink_rule)
494 mlx5_del_flow_rules(mc_promisc->uplink_rule);
496 esw_destroy_legacy_table(esw);
499 /* E-Switch vport UC/MC lists management */
500 typedef int (*vport_addr_action)(struct mlx5_eswitch *esw,
501 struct vport_addr *vaddr);
503 static int esw_add_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
505 u8 *mac = vaddr->node.addr;
506 u16 vport = vaddr->vport;
509 /* Skip mlx5_mpfs_add_mac for eswitch_managers,
510 * it is already done by its netdev in mlx5e_execute_l2_action
512 if (mlx5_esw_is_manager_vport(esw, vport))
515 err = mlx5_mpfs_add_mac(esw->dev, mac);
518 "Failed to add L2 table mac(%pM) for vport(0x%x), err(%d)\n",
525 /* SRIOV is enabled: Forward UC MAC to vport */
526 if (esw->fdb_table.legacy.fdb && esw->mode == MLX5_ESWITCH_LEGACY)
527 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
529 esw_debug(esw->dev, "\tADDED UC MAC: vport[%d] %pM fr(%p)\n",
530 vport, mac, vaddr->flow_rule);
535 static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
537 u8 *mac = vaddr->node.addr;
538 u16 vport = vaddr->vport;
541 /* Skip mlx5_mpfs_del_mac for eswitch managers,
542 * it is already done by its netdev in mlx5e_execute_l2_action
544 if (!vaddr->mpfs || mlx5_esw_is_manager_vport(esw, vport))
547 err = mlx5_mpfs_del_mac(esw->dev, mac);
550 "Failed to del L2 table mac(%pM) for vport(%d), err(%d)\n",
555 if (vaddr->flow_rule)
556 mlx5_del_flow_rules(vaddr->flow_rule);
557 vaddr->flow_rule = NULL;
562 static void update_allmulti_vports(struct mlx5_eswitch *esw,
563 struct vport_addr *vaddr,
564 struct esw_mc_addr *esw_mc)
566 u8 *mac = vaddr->node.addr;
567 struct mlx5_vport *vport;
570 mlx5_esw_for_all_vports(esw, i, vport) {
571 struct hlist_head *vport_hash = vport->mc_list;
572 struct vport_addr *iter_vaddr =
573 l2addr_hash_find(vport_hash,
576 vport_num = vport->vport;
577 if (IS_ERR_OR_NULL(vport->allmulti_rule) ||
578 vaddr->vport == vport_num)
580 switch (vaddr->action) {
581 case MLX5_ACTION_ADD:
584 iter_vaddr = l2addr_hash_add(vport_hash, mac,
589 "ALL-MULTI: Failed to add MAC(%pM) to vport[%d] DB\n",
593 iter_vaddr->vport = vport_num;
594 iter_vaddr->flow_rule =
595 esw_fdb_set_vport_rule(esw,
598 iter_vaddr->mc_promisc = true;
600 case MLX5_ACTION_DEL:
603 mlx5_del_flow_rules(iter_vaddr->flow_rule);
604 l2addr_hash_del(iter_vaddr);
610 static int esw_add_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
612 struct hlist_head *hash = esw->mc_table;
613 struct esw_mc_addr *esw_mc;
614 u8 *mac = vaddr->node.addr;
615 u16 vport = vaddr->vport;
617 if (!esw->fdb_table.legacy.fdb)
620 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
624 esw_mc = l2addr_hash_add(hash, mac, struct esw_mc_addr, GFP_KERNEL);
628 esw_mc->uplink_rule = /* Forward MC MAC to Uplink */
629 esw_fdb_set_vport_rule(esw, mac, MLX5_VPORT_UPLINK);
631 /* Add this multicast mac to all the mc promiscuous vports */
632 update_allmulti_vports(esw, vaddr, esw_mc);
635 /* If the multicast mac is added as a result of mc promiscuous vport,
636 * don't increment the multicast ref count
638 if (!vaddr->mc_promisc)
641 /* Forward MC MAC to vport */
642 vaddr->flow_rule = esw_fdb_set_vport_rule(esw, mac, vport);
644 "\tADDED MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
645 vport, mac, vaddr->flow_rule,
646 esw_mc->refcnt, esw_mc->uplink_rule);
650 static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr)
652 struct hlist_head *hash = esw->mc_table;
653 struct esw_mc_addr *esw_mc;
654 u8 *mac = vaddr->node.addr;
655 u16 vport = vaddr->vport;
657 if (!esw->fdb_table.legacy.fdb)
660 esw_mc = l2addr_hash_find(hash, mac, struct esw_mc_addr);
663 "Failed to find eswitch MC addr for MAC(%pM) vport(%d)",
668 "\tDELETE MC MAC: vport[%d] %pM fr(%p) refcnt(%d) uplinkfr(%p)\n",
669 vport, mac, vaddr->flow_rule, esw_mc->refcnt,
670 esw_mc->uplink_rule);
672 if (vaddr->flow_rule)
673 mlx5_del_flow_rules(vaddr->flow_rule);
674 vaddr->flow_rule = NULL;
676 /* If the multicast mac is added as a result of mc promiscuous vport,
677 * don't decrement the multicast ref count.
679 if (vaddr->mc_promisc || (--esw_mc->refcnt > 0))
682 /* Remove this multicast mac from all the mc promiscuous vports */
683 update_allmulti_vports(esw, vaddr, esw_mc);
685 if (esw_mc->uplink_rule)
686 mlx5_del_flow_rules(esw_mc->uplink_rule);
688 l2addr_hash_del(esw_mc);
692 /* Apply vport UC/MC list to HW l2 table and FDB table */
693 static void esw_apply_vport_addr_list(struct mlx5_eswitch *esw,
694 struct mlx5_vport *vport, int list_type)
696 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
697 vport_addr_action vport_addr_add;
698 vport_addr_action vport_addr_del;
699 struct vport_addr *addr;
700 struct l2addr_node *node;
701 struct hlist_head *hash;
702 struct hlist_node *tmp;
705 vport_addr_add = is_uc ? esw_add_uc_addr :
707 vport_addr_del = is_uc ? esw_del_uc_addr :
710 hash = is_uc ? vport->uc_list : vport->mc_list;
711 for_each_l2hash_node(node, tmp, hash, hi) {
712 addr = container_of(node, struct vport_addr, node);
713 switch (addr->action) {
714 case MLX5_ACTION_ADD:
715 vport_addr_add(esw, addr);
716 addr->action = MLX5_ACTION_NONE;
718 case MLX5_ACTION_DEL:
719 vport_addr_del(esw, addr);
720 l2addr_hash_del(addr);
726 /* Sync vport UC/MC list from vport context */
727 static void esw_update_vport_addr_list(struct mlx5_eswitch *esw,
728 struct mlx5_vport *vport, int list_type)
730 bool is_uc = list_type == MLX5_NVPRT_LIST_TYPE_UC;
731 u8 (*mac_list)[ETH_ALEN];
732 struct l2addr_node *node;
733 struct vport_addr *addr;
734 struct hlist_head *hash;
735 struct hlist_node *tmp;
741 size = is_uc ? MLX5_MAX_UC_PER_VPORT(esw->dev) :
742 MLX5_MAX_MC_PER_VPORT(esw->dev);
744 mac_list = kcalloc(size, ETH_ALEN, GFP_KERNEL);
748 hash = is_uc ? vport->uc_list : vport->mc_list;
750 for_each_l2hash_node(node, tmp, hash, hi) {
751 addr = container_of(node, struct vport_addr, node);
752 addr->action = MLX5_ACTION_DEL;
758 err = mlx5_query_nic_vport_mac_list(esw->dev, vport->vport, list_type,
762 esw_debug(esw->dev, "vport[%d] context update %s list size (%d)\n",
763 vport->vport, is_uc ? "UC" : "MC", size);
765 for (i = 0; i < size; i++) {
766 if (is_uc && !is_valid_ether_addr(mac_list[i]))
769 if (!is_uc && !is_multicast_ether_addr(mac_list[i]))
772 addr = l2addr_hash_find(hash, mac_list[i], struct vport_addr);
774 addr->action = MLX5_ACTION_NONE;
775 /* If this mac was previously added because of allmulti
776 * promiscuous rx mode, its now converted to be original
779 if (addr->mc_promisc) {
780 struct esw_mc_addr *esw_mc =
781 l2addr_hash_find(esw->mc_table,
786 "Failed to MAC(%pM) in mcast DB\n",
791 addr->mc_promisc = false;
796 addr = l2addr_hash_add(hash, mac_list[i], struct vport_addr,
800 "Failed to add MAC(%pM) to vport[%d] DB\n",
801 mac_list[i], vport->vport);
804 addr->vport = vport->vport;
805 addr->action = MLX5_ACTION_ADD;
811 /* Sync vport UC/MC list from vport context
812 * Must be called after esw_update_vport_addr_list
814 static void esw_update_vport_mc_promisc(struct mlx5_eswitch *esw,
815 struct mlx5_vport *vport)
817 struct l2addr_node *node;
818 struct vport_addr *addr;
819 struct hlist_head *hash;
820 struct hlist_node *tmp;
823 hash = vport->mc_list;
825 for_each_l2hash_node(node, tmp, esw->mc_table, hi) {
826 u8 *mac = node->addr;
828 addr = l2addr_hash_find(hash, mac, struct vport_addr);
830 if (addr->action == MLX5_ACTION_DEL)
831 addr->action = MLX5_ACTION_NONE;
834 addr = l2addr_hash_add(hash, mac, struct vport_addr,
838 "Failed to add allmulti MAC(%pM) to vport[%d] DB\n",
842 addr->vport = vport->vport;
843 addr->action = MLX5_ACTION_ADD;
844 addr->mc_promisc = true;
848 /* Apply vport rx mode to HW FDB table */
849 static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw,
850 struct mlx5_vport *vport,
851 bool promisc, bool mc_promisc)
853 struct esw_mc_addr *allmulti_addr = &esw->mc_promisc;
855 if (IS_ERR_OR_NULL(vport->allmulti_rule) != mc_promisc)
859 vport->allmulti_rule =
860 esw_fdb_set_vport_allmulti_rule(esw, vport->vport);
861 if (!allmulti_addr->uplink_rule)
862 allmulti_addr->uplink_rule =
863 esw_fdb_set_vport_allmulti_rule(esw,
865 allmulti_addr->refcnt++;
866 } else if (vport->allmulti_rule) {
867 mlx5_del_flow_rules(vport->allmulti_rule);
868 vport->allmulti_rule = NULL;
870 if (--allmulti_addr->refcnt > 0)
873 if (allmulti_addr->uplink_rule)
874 mlx5_del_flow_rules(allmulti_addr->uplink_rule);
875 allmulti_addr->uplink_rule = NULL;
879 if (IS_ERR_OR_NULL(vport->promisc_rule) != promisc)
883 vport->promisc_rule =
884 esw_fdb_set_vport_promisc_rule(esw, vport->vport);
885 } else if (vport->promisc_rule) {
886 mlx5_del_flow_rules(vport->promisc_rule);
887 vport->promisc_rule = NULL;
891 /* Sync vport rx mode from vport context */
892 static void esw_update_vport_rx_mode(struct mlx5_eswitch *esw,
893 struct mlx5_vport *vport)
900 err = mlx5_query_nic_vport_promisc(esw->dev,
907 esw_debug(esw->dev, "vport[%d] context update rx mode promisc_all=%d, all_multi=%d\n",
908 vport->vport, promisc_all, promisc_mc);
910 if (!vport->info.trusted || !vport->enabled) {
916 esw_apply_vport_rx_mode(esw, vport, promisc_all,
917 (promisc_all || promisc_mc));
920 static void esw_vport_change_handle_locked(struct mlx5_vport *vport)
922 struct mlx5_core_dev *dev = vport->dev;
923 struct mlx5_eswitch *esw = dev->priv.eswitch;
926 mlx5_query_nic_vport_mac_address(dev, vport->vport, true, mac);
927 esw_debug(dev, "vport[%d] Context Changed: perm mac: %pM\n",
930 if (vport->enabled_events & MLX5_VPORT_UC_ADDR_CHANGE) {
931 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
932 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_UC);
935 if (vport->enabled_events & MLX5_VPORT_MC_ADDR_CHANGE)
936 esw_update_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
938 if (vport->enabled_events & MLX5_VPORT_PROMISC_CHANGE) {
939 esw_update_vport_rx_mode(esw, vport);
940 if (!IS_ERR_OR_NULL(vport->allmulti_rule))
941 esw_update_vport_mc_promisc(esw, vport);
944 if (vport->enabled_events & (MLX5_VPORT_PROMISC_CHANGE | MLX5_VPORT_MC_ADDR_CHANGE))
945 esw_apply_vport_addr_list(esw, vport, MLX5_NVPRT_LIST_TYPE_MC);
947 esw_debug(esw->dev, "vport[%d] Context Changed: Done\n", vport->vport);
949 arm_vport_context_events_cmd(dev, vport->vport,
950 vport->enabled_events);
953 static void esw_vport_change_handler(struct work_struct *work)
955 struct mlx5_vport *vport =
956 container_of(work, struct mlx5_vport, vport_change_handler);
957 struct mlx5_eswitch *esw = vport->dev->priv.eswitch;
959 mutex_lock(&esw->state_lock);
960 esw_vport_change_handle_locked(vport);
961 mutex_unlock(&esw->state_lock);
964 static bool element_type_supported(struct mlx5_eswitch *esw, int type)
966 const struct mlx5_core_dev *dev = esw->dev;
969 case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR:
970 return MLX5_CAP_QOS(dev, esw_element_type) &
971 ELEMENT_TYPE_CAP_MASK_TASR;
972 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT:
973 return MLX5_CAP_QOS(dev, esw_element_type) &
974 ELEMENT_TYPE_CAP_MASK_VPORT;
975 case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC:
976 return MLX5_CAP_QOS(dev, esw_element_type) &
977 ELEMENT_TYPE_CAP_MASK_VPORT_TC;
978 case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC:
979 return MLX5_CAP_QOS(dev, esw_element_type) &
980 ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC;
985 /* Vport QoS management */
986 static void esw_create_tsar(struct mlx5_eswitch *esw)
988 u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
989 struct mlx5_core_dev *dev = esw->dev;
993 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
996 if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR))
999 if (esw->qos.enabled)
1002 MLX5_SET(scheduling_context, tsar_ctx, element_type,
1003 SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR);
1005 attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes);
1006 *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16);
1008 err = mlx5_create_scheduling_element_cmd(dev,
1009 SCHEDULING_HIERARCHY_E_SWITCH,
1011 &esw->qos.root_tsar_id);
1013 esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err);
1017 esw->qos.enabled = true;
1020 static void esw_destroy_tsar(struct mlx5_eswitch *esw)
1024 if (!esw->qos.enabled)
1027 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1028 SCHEDULING_HIERARCHY_E_SWITCH,
1029 esw->qos.root_tsar_id);
1031 esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err);
1033 esw->qos.enabled = false;
1036 static int esw_vport_enable_qos(struct mlx5_eswitch *esw,
1037 struct mlx5_vport *vport,
1038 u32 initial_max_rate, u32 initial_bw_share)
1040 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1041 struct mlx5_core_dev *dev = esw->dev;
1045 if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) ||
1046 !MLX5_CAP_QOS(dev, esw_scheduling))
1049 if (vport->qos.enabled)
1052 MLX5_SET(scheduling_context, sched_ctx, element_type,
1053 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1054 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1055 element_attributes);
1056 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
1057 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1058 esw->qos.root_tsar_id);
1059 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1061 MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share);
1063 err = mlx5_create_scheduling_element_cmd(dev,
1064 SCHEDULING_HIERARCHY_E_SWITCH,
1066 &vport->qos.esw_tsar_ix);
1068 esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n",
1073 vport->qos.enabled = true;
1077 static void esw_vport_disable_qos(struct mlx5_eswitch *esw,
1078 struct mlx5_vport *vport)
1082 if (!vport->qos.enabled)
1085 err = mlx5_destroy_scheduling_element_cmd(esw->dev,
1086 SCHEDULING_HIERARCHY_E_SWITCH,
1087 vport->qos.esw_tsar_ix);
1089 esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n",
1092 vport->qos.enabled = false;
1095 static int esw_vport_qos_config(struct mlx5_eswitch *esw,
1096 struct mlx5_vport *vport,
1097 u32 max_rate, u32 bw_share)
1099 u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
1100 struct mlx5_core_dev *dev = esw->dev;
1105 if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling))
1108 if (!vport->qos.enabled)
1111 MLX5_SET(scheduling_context, sched_ctx, element_type,
1112 SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT);
1113 vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx,
1114 element_attributes);
1115 MLX5_SET(vport_element, vport_elem, vport_number, vport->vport);
1116 MLX5_SET(scheduling_context, sched_ctx, parent_element_id,
1117 esw->qos.root_tsar_id);
1118 MLX5_SET(scheduling_context, sched_ctx, max_average_bw,
1120 MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share);
1121 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
1122 bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
1124 err = mlx5_modify_scheduling_element_cmd(dev,
1125 SCHEDULING_HIERARCHY_E_SWITCH,
1127 vport->qos.esw_tsar_ix,
1130 esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n",
1138 int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num,
1141 u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {};
1142 struct mlx5_vport *vport;
1144 vport = mlx5_eswitch_get_vport(esw, vport_num);
1145 MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps);
1147 return mlx5_modify_scheduling_element_cmd(esw->dev,
1148 SCHEDULING_HIERARCHY_E_SWITCH,
1150 vport->qos.esw_tsar_ix,
1151 MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW);
1154 static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac)
1156 ((u8 *)node_guid)[7] = mac[0];
1157 ((u8 *)node_guid)[6] = mac[1];
1158 ((u8 *)node_guid)[5] = mac[2];
1159 ((u8 *)node_guid)[4] = 0xff;
1160 ((u8 *)node_guid)[3] = 0xfe;
1161 ((u8 *)node_guid)[2] = mac[3];
1162 ((u8 *)node_guid)[1] = mac[4];
1163 ((u8 *)node_guid)[0] = mac[5];
1166 static int esw_vport_create_legacy_acl_tables(struct mlx5_eswitch *esw,
1167 struct mlx5_vport *vport)
1171 /* Only non manager vports need ACL in legacy mode */
1172 if (mlx5_esw_is_manager_vport(esw, vport->vport))
1175 ret = esw_acl_ingress_lgcy_setup(esw, vport);
1179 ret = esw_acl_egress_lgcy_setup(esw, vport);
1186 esw_acl_ingress_lgcy_cleanup(esw, vport);
1191 static int esw_vport_setup_acl(struct mlx5_eswitch *esw,
1192 struct mlx5_vport *vport)
1194 if (esw->mode == MLX5_ESWITCH_LEGACY)
1195 return esw_vport_create_legacy_acl_tables(esw, vport);
1197 return esw_vport_create_offloads_acl_tables(esw, vport);
1200 static void esw_vport_destroy_legacy_acl_tables(struct mlx5_eswitch *esw,
1201 struct mlx5_vport *vport)
1204 if (mlx5_esw_is_manager_vport(esw, vport->vport))
1207 esw_acl_egress_lgcy_cleanup(esw, vport);
1208 esw_acl_ingress_lgcy_cleanup(esw, vport);
1211 static void esw_vport_cleanup_acl(struct mlx5_eswitch *esw,
1212 struct mlx5_vport *vport)
1214 if (esw->mode == MLX5_ESWITCH_LEGACY)
1215 esw_vport_destroy_legacy_acl_tables(esw, vport);
1217 esw_vport_destroy_offloads_acl_tables(esw, vport);
1220 static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1222 u16 vport_num = vport->vport;
1226 err = esw_vport_setup_acl(esw, vport);
1230 /* Attach vport to the eswitch rate limiter */
1231 esw_vport_enable_qos(esw, vport, vport->info.max_rate, vport->qos.bw_share);
1233 if (mlx5_esw_is_manager_vport(esw, vport_num))
1236 mlx5_modify_vport_admin_state(esw->dev,
1237 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1239 vport->info.link_state);
1241 /* Host PF has its own mac/guid. */
1243 mlx5_modify_nic_vport_mac_address(esw->dev, vport_num,
1245 mlx5_modify_nic_vport_node_guid(esw->dev, vport_num,
1246 vport->info.node_guid);
1249 flags = (vport->info.vlan || vport->info.qos) ?
1250 SET_VLAN_STRIP | SET_VLAN_INSERT : 0;
1251 modify_esw_vport_cvlan(esw->dev, vport_num, vport->info.vlan,
1252 vport->info.qos, flags);
1257 /* Don't cleanup vport->info, it's needed to restore vport configuration */
1258 static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport)
1260 u16 vport_num = vport->vport;
1262 if (!mlx5_esw_is_manager_vport(esw, vport_num))
1263 mlx5_modify_vport_admin_state(esw->dev,
1264 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1266 MLX5_VPORT_ADMIN_STATE_DOWN);
1268 esw_vport_disable_qos(esw, vport);
1269 esw_vport_cleanup_acl(esw, vport);
1272 static int esw_enable_vport(struct mlx5_eswitch *esw, u16 vport_num,
1273 enum mlx5_eswitch_vport_event enabled_events)
1275 struct mlx5_vport *vport;
1278 vport = mlx5_eswitch_get_vport(esw, vport_num);
1280 mutex_lock(&esw->state_lock);
1281 WARN_ON(vport->enabled);
1283 esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
1285 ret = esw_vport_setup(esw, vport);
1289 /* Sync with current vport context */
1290 vport->enabled_events = enabled_events;
1291 vport->enabled = true;
1293 /* Esw manager is trusted by default. Host PF (vport 0) is trusted as well
1294 * in smartNIC as it's a vport group manager.
1296 if (mlx5_esw_is_manager_vport(esw, vport_num) ||
1297 (!vport_num && mlx5_core_is_ecpf(esw->dev)))
1298 vport->info.trusted = true;
1300 esw_vport_change_handle_locked(vport);
1302 esw->enabled_vports++;
1303 esw_debug(esw->dev, "Enabled VPORT(%d)\n", vport_num);
1305 mutex_unlock(&esw->state_lock);
1309 static void esw_disable_vport(struct mlx5_eswitch *esw, u16 vport_num)
1311 struct mlx5_vport *vport;
1313 vport = mlx5_eswitch_get_vport(esw, vport_num);
1315 mutex_lock(&esw->state_lock);
1316 if (!vport->enabled)
1319 esw_debug(esw->dev, "Disabling vport(%d)\n", vport_num);
1320 /* Mark this vport as disabled to discard new events */
1321 vport->enabled = false;
1323 /* Disable events from this vport */
1324 arm_vport_context_events_cmd(esw->dev, vport->vport, 0);
1325 /* We don't assume VFs will cleanup after themselves.
1326 * Calling vport change handler while vport is disabled will cleanup
1327 * the vport resources.
1329 esw_vport_change_handle_locked(vport);
1330 vport->enabled_events = 0;
1331 esw_vport_cleanup(esw, vport);
1332 esw->enabled_vports--;
1335 mutex_unlock(&esw->state_lock);
1338 static int eswitch_vport_event(struct notifier_block *nb,
1339 unsigned long type, void *data)
1341 struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb);
1342 struct mlx5_eqe *eqe = data;
1343 struct mlx5_vport *vport;
1346 vport_num = be16_to_cpu(eqe->data.vport_change.vport_num);
1347 vport = mlx5_eswitch_get_vport(esw, vport_num);
1349 queue_work(esw->work_queue, &vport->vport_change_handler);
1354 * mlx5_esw_query_functions - Returns raw output about functions state
1355 * @dev: Pointer to device to query
1357 * mlx5_esw_query_functions() allocates and returns functions changed
1358 * raw output memory pointer from device on success. Otherwise returns ERR_PTR.
1359 * Caller must free the memory using kvfree() when valid pointer is returned.
1361 const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
1363 int outlen = MLX5_ST_SZ_BYTES(query_esw_functions_out);
1364 u32 in[MLX5_ST_SZ_DW(query_esw_functions_in)] = {};
1368 out = kvzalloc(outlen, GFP_KERNEL);
1370 return ERR_PTR(-ENOMEM);
1372 MLX5_SET(query_esw_functions_in, in, opcode,
1373 MLX5_CMD_OP_QUERY_ESW_FUNCTIONS);
1375 err = mlx5_cmd_exec_inout(dev, query_esw_functions, in, out);
1380 return ERR_PTR(err);
1383 static void mlx5_eswitch_event_handlers_register(struct mlx5_eswitch *esw)
1385 MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE);
1386 mlx5_eq_notifier_register(esw->dev, &esw->nb);
1388 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev)) {
1389 MLX5_NB_INIT(&esw->esw_funcs.nb, mlx5_esw_funcs_changed_handler,
1390 ESW_FUNCTIONS_CHANGED);
1391 mlx5_eq_notifier_register(esw->dev, &esw->esw_funcs.nb);
1395 static void mlx5_eswitch_event_handlers_unregister(struct mlx5_eswitch *esw)
1397 if (esw->mode == MLX5_ESWITCH_OFFLOADS && mlx5_eswitch_is_funcs_handler(esw->dev))
1398 mlx5_eq_notifier_unregister(esw->dev, &esw->esw_funcs.nb);
1400 mlx5_eq_notifier_unregister(esw->dev, &esw->nb);
1402 flush_workqueue(esw->work_queue);
1405 static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw)
1407 struct mlx5_vport *vport;
1410 mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) {
1411 memset(&vport->info, 0, sizeof(vport->info));
1412 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1416 /* Public E-Switch API */
1417 #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
1419 int mlx5_eswitch_load_vport(struct mlx5_eswitch *esw, u16 vport_num,
1420 enum mlx5_eswitch_vport_event enabled_events)
1424 err = esw_enable_vport(esw, vport_num, enabled_events);
1428 err = esw_offloads_load_rep(esw, vport_num);
1435 esw_disable_vport(esw, vport_num);
1439 void mlx5_eswitch_unload_vport(struct mlx5_eswitch *esw, u16 vport_num)
1441 esw_offloads_unload_rep(esw, vport_num);
1442 esw_disable_vport(esw, vport_num);
1445 void mlx5_eswitch_unload_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs)
1449 mlx5_esw_for_each_vf_vport_num_reverse(esw, i, num_vfs)
1450 mlx5_eswitch_unload_vport(esw, i);
1453 int mlx5_eswitch_load_vf_vports(struct mlx5_eswitch *esw, u16 num_vfs,
1454 enum mlx5_eswitch_vport_event enabled_events)
1459 mlx5_esw_for_each_vf_vport_num(esw, i, num_vfs) {
1460 err = mlx5_eswitch_load_vport(esw, i, enabled_events);
1468 mlx5_eswitch_unload_vf_vports(esw, i - 1);
1472 /* mlx5_eswitch_enable_pf_vf_vports() enables vports of PF, ECPF and VFs
1473 * whichever are present on the eswitch.
1476 mlx5_eswitch_enable_pf_vf_vports(struct mlx5_eswitch *esw,
1477 enum mlx5_eswitch_vport_event enabled_events)
1481 /* Enable PF vport */
1482 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_PF, enabled_events);
1486 /* Enable ECPF vport */
1487 if (mlx5_ecpf_vport_exists(esw->dev)) {
1488 ret = mlx5_eswitch_load_vport(esw, MLX5_VPORT_ECPF, enabled_events);
1493 /* Enable VF vports */
1494 ret = mlx5_eswitch_load_vf_vports(esw, esw->esw_funcs.num_vfs,
1501 if (mlx5_ecpf_vport_exists(esw->dev))
1502 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1505 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1509 /* mlx5_eswitch_disable_pf_vf_vports() disables vports of PF, ECPF and VFs
1510 * whichever are previously enabled on the eswitch.
1512 void mlx5_eswitch_disable_pf_vf_vports(struct mlx5_eswitch *esw)
1514 mlx5_eswitch_unload_vf_vports(esw, esw->esw_funcs.num_vfs);
1516 if (mlx5_ecpf_vport_exists(esw->dev))
1517 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_ECPF);
1519 mlx5_eswitch_unload_vport(esw, MLX5_VPORT_PF);
1522 static void mlx5_eswitch_get_devlink_param(struct mlx5_eswitch *esw)
1524 struct devlink *devlink = priv_to_devlink(esw->dev);
1525 union devlink_param_value val;
1528 err = devlink_param_driverinit_value_get(devlink,
1529 MLX5_DEVLINK_PARAM_ID_ESW_LARGE_GROUP_NUM,
1532 esw->params.large_group_num = val.vu32;
1535 "Devlink can't get param fdb_large_groups, uses default (%d).\n",
1536 ESW_OFFLOADS_DEFAULT_NUM_GROUPS);
1537 esw->params.large_group_num = ESW_OFFLOADS_DEFAULT_NUM_GROUPS;
1542 mlx5_eswitch_update_num_of_vfs(struct mlx5_eswitch *esw, int num_vfs)
1546 WARN_ON_ONCE(esw->mode != MLX5_ESWITCH_NONE);
1551 if (!mlx5_core_is_ecpf_esw_manager(esw->dev)) {
1552 esw->esw_funcs.num_vfs = num_vfs;
1556 out = mlx5_esw_query_functions(esw->dev);
1560 esw->esw_funcs.num_vfs = MLX5_GET(query_esw_functions_out, out,
1561 host_params_context.host_num_of_vfs);
1566 * mlx5_eswitch_enable_locked - Enable eswitch
1567 * @esw: Pointer to eswitch
1568 * @mode: Eswitch mode to enable
1569 * @num_vfs: Enable eswitch for given number of VFs. This is optional.
1570 * Valid value are 0, > 0 and MLX5_ESWITCH_IGNORE_NUM_VFS.
1571 * Caller should pass num_vfs > 0 when enabling eswitch for
1572 * vf vports. Caller should pass num_vfs = 0, when eswitch
1573 * is enabled without sriov VFs or when caller
1574 * is unaware of the sriov state of the host PF on ECPF based
1575 * eswitch. Caller should pass < 0 when num_vfs should be
1576 * completely ignored. This is typically the case when eswitch
1577 * is enabled without sriov regardless of PF/ECPF system.
1578 * mlx5_eswitch_enable_locked() Enables eswitch in either legacy or offloads
1579 * mode. If num_vfs >=0 is provided, it setup VF related eswitch vports.
1580 * It returns 0 on success or error code on failure.
1582 int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
1586 lockdep_assert_held(&esw->mode_lock);
1588 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
1589 esw_warn(esw->dev, "FDB is not supported, aborting ...\n");
1593 if (!MLX5_CAP_ESW_INGRESS_ACL(esw->dev, ft_support))
1594 esw_warn(esw->dev, "ingress ACL is not supported by FW\n");
1596 if (!MLX5_CAP_ESW_EGRESS_ACL(esw->dev, ft_support))
1597 esw_warn(esw->dev, "engress ACL is not supported by FW\n");
1599 mlx5_eswitch_get_devlink_param(esw);
1601 mlx5_eswitch_update_num_of_vfs(esw, num_vfs);
1603 esw_create_tsar(esw);
1607 mlx5_lag_update(esw->dev);
1609 if (mode == MLX5_ESWITCH_LEGACY) {
1610 err = esw_legacy_enable(esw);
1612 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1613 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1614 err = esw_offloads_enable(esw);
1620 mlx5_eswitch_event_handlers_register(esw);
1622 esw_info(esw->dev, "Enable: mode(%s), nvfs(%d), active vports(%d)\n",
1623 mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1624 esw->esw_funcs.num_vfs, esw->enabled_vports);
1629 esw->mode = MLX5_ESWITCH_NONE;
1631 if (mode == MLX5_ESWITCH_OFFLOADS) {
1632 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1633 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1635 esw_destroy_tsar(esw);
1640 * mlx5_eswitch_enable - Enable eswitch
1641 * @esw: Pointer to eswitch
1642 * @num_vfs: Enable eswitch swich for given number of VFs.
1643 * Caller must pass num_vfs > 0 when enabling eswitch for
1645 * mlx5_eswitch_enable() returns 0 on success or error code on failure.
1647 int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs)
1651 if (!ESW_ALLOWED(esw))
1654 mutex_lock(&esw->mode_lock);
1655 if (esw->mode == MLX5_ESWITCH_NONE) {
1656 ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs);
1658 enum mlx5_eswitch_vport_event vport_events;
1660 vport_events = (esw->mode == MLX5_ESWITCH_LEGACY) ?
1661 MLX5_LEGACY_SRIOV_VPORT_EVENTS : MLX5_VPORT_UC_ADDR_CHANGE;
1662 ret = mlx5_eswitch_load_vf_vports(esw, num_vfs, vport_events);
1664 esw->esw_funcs.num_vfs = num_vfs;
1666 mutex_unlock(&esw->mode_lock);
1670 void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
1674 lockdep_assert_held_write(&esw->mode_lock);
1676 if (esw->mode == MLX5_ESWITCH_NONE)
1679 esw_info(esw->dev, "Disable: mode(%s), nvfs(%d), active vports(%d)\n",
1680 esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS",
1681 esw->esw_funcs.num_vfs, esw->enabled_vports);
1683 mlx5_eswitch_event_handlers_unregister(esw);
1685 if (esw->mode == MLX5_ESWITCH_LEGACY)
1686 esw_legacy_disable(esw);
1687 else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
1688 esw_offloads_disable(esw);
1690 old_mode = esw->mode;
1691 esw->mode = MLX5_ESWITCH_NONE;
1693 mlx5_lag_update(esw->dev);
1695 if (old_mode == MLX5_ESWITCH_OFFLOADS) {
1696 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
1697 mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
1699 esw_destroy_tsar(esw);
1702 mlx5_eswitch_clear_vf_vports_info(esw);
1705 void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf)
1707 if (!ESW_ALLOWED(esw))
1710 mutex_lock(&esw->mode_lock);
1711 mlx5_eswitch_disable_locked(esw, clear_vf);
1712 esw->esw_funcs.num_vfs = 0;
1713 mutex_unlock(&esw->mode_lock);
1716 int mlx5_eswitch_init(struct mlx5_core_dev *dev)
1718 struct mlx5_eswitch *esw;
1719 struct mlx5_vport *vport;
1723 if (!MLX5_VPORT_MANAGER(dev))
1726 total_vports = mlx5_eswitch_get_total_vports(dev);
1729 "Total vports %d, per vport: max uc(%d) max mc(%d)\n",
1731 MLX5_MAX_UC_PER_VPORT(dev),
1732 MLX5_MAX_MC_PER_VPORT(dev));
1734 esw = kzalloc(sizeof(*esw), GFP_KERNEL);
1739 esw->manager_vport = mlx5_eswitch_manager_vport(dev);
1740 esw->first_host_vport = mlx5_eswitch_first_host_vport_num(dev);
1742 esw->work_queue = create_singlethread_workqueue("mlx5_esw_wq");
1743 if (!esw->work_queue) {
1748 esw->vports = kcalloc(total_vports, sizeof(struct mlx5_vport),
1755 esw->total_vports = total_vports;
1757 err = esw_offloads_init_reps(esw);
1761 mutex_init(&esw->offloads.encap_tbl_lock);
1762 hash_init(esw->offloads.encap_tbl);
1763 mutex_init(&esw->offloads.decap_tbl_lock);
1764 hash_init(esw->offloads.decap_tbl);
1765 mlx5e_mod_hdr_tbl_init(&esw->offloads.mod_hdr);
1766 atomic64_set(&esw->offloads.num_flows, 0);
1767 ida_init(&esw->offloads.vport_metadata_ida);
1768 mutex_init(&esw->state_lock);
1769 mutex_init(&esw->mode_lock);
1771 mlx5_esw_for_all_vports(esw, i, vport) {
1772 vport->vport = mlx5_eswitch_index_to_vport_num(esw, i);
1773 vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
1775 INIT_WORK(&vport->vport_change_handler,
1776 esw_vport_change_handler);
1779 esw->enabled_vports = 0;
1780 esw->mode = MLX5_ESWITCH_NONE;
1781 esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE;
1783 dev->priv.eswitch = esw;
1786 if (esw->work_queue)
1787 destroy_workqueue(esw->work_queue);
1788 esw_offloads_cleanup_reps(esw);
1794 void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw)
1796 if (!esw || !MLX5_VPORT_MANAGER(esw->dev))
1799 esw_info(esw->dev, "cleanup\n");
1801 esw->dev->priv.eswitch = NULL;
1802 destroy_workqueue(esw->work_queue);
1803 esw_offloads_cleanup_reps(esw);
1804 mutex_destroy(&esw->mode_lock);
1805 mutex_destroy(&esw->state_lock);
1806 ida_destroy(&esw->offloads.vport_metadata_ida);
1807 mlx5e_mod_hdr_tbl_destroy(&esw->offloads.mod_hdr);
1808 mutex_destroy(&esw->offloads.encap_tbl_lock);
1809 mutex_destroy(&esw->offloads.decap_tbl_lock);
1814 /* Vport Administration */
1816 mlx5_esw_set_vport_mac_locked(struct mlx5_eswitch *esw,
1817 struct mlx5_vport *evport, const u8 *mac)
1819 u16 vport_num = evport->vport;
1823 if (is_multicast_ether_addr(mac))
1826 if (evport->info.spoofchk && !is_valid_ether_addr(mac))
1827 mlx5_core_warn(esw->dev,
1828 "Set invalid MAC while spoofchk is on, vport(%d)\n",
1831 err = mlx5_modify_nic_vport_mac_address(esw->dev, vport_num, mac);
1833 mlx5_core_warn(esw->dev,
1834 "Failed to mlx5_modify_nic_vport_mac vport(%d) err=(%d)\n",
1839 node_guid_gen_from_mac(&node_guid, mac);
1840 err = mlx5_modify_nic_vport_node_guid(esw->dev, vport_num, node_guid);
1842 mlx5_core_warn(esw->dev,
1843 "Failed to set vport %d node guid, err = %d. RDMA_CM will not function properly for this VF.\n",
1846 ether_addr_copy(evport->info.mac, mac);
1847 evport->info.node_guid = node_guid;
1848 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
1849 err = esw_acl_ingress_lgcy_setup(esw, evport);
1854 int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
1855 u16 vport, const u8 *mac)
1857 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1861 return PTR_ERR(evport);
1863 mutex_lock(&esw->state_lock);
1864 err = mlx5_esw_set_vport_mac_locked(esw, evport, mac);
1865 mutex_unlock(&esw->state_lock);
1870 is_port_function_supported(const struct mlx5_eswitch *esw, u16 vport_num)
1872 return vport_num == MLX5_VPORT_PF ||
1873 mlx5_eswitch_is_vf_vport(esw, vport_num);
1876 int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink,
1877 struct devlink_port *port,
1878 u8 *hw_addr, int *hw_addr_len,
1879 struct netlink_ext_ack *extack)
1881 struct mlx5_eswitch *esw;
1882 struct mlx5_vport *vport;
1883 int err = -EOPNOTSUPP;
1886 esw = mlx5_devlink_eswitch_get(devlink);
1888 return PTR_ERR(esw);
1890 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
1891 if (!is_port_function_supported(esw, vport_num))
1894 vport = mlx5_eswitch_get_vport(esw, vport_num);
1895 if (IS_ERR(vport)) {
1896 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
1897 return PTR_ERR(vport);
1900 mutex_lock(&esw->state_lock);
1901 if (vport->enabled) {
1902 ether_addr_copy(hw_addr, vport->info.mac);
1903 *hw_addr_len = ETH_ALEN;
1906 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
1908 mutex_unlock(&esw->state_lock);
1912 int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink,
1913 struct devlink_port *port,
1914 const u8 *hw_addr, int hw_addr_len,
1915 struct netlink_ext_ack *extack)
1917 struct mlx5_eswitch *esw;
1918 struct mlx5_vport *vport;
1919 int err = -EOPNOTSUPP;
1922 esw = mlx5_devlink_eswitch_get(devlink);
1924 NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr");
1925 return PTR_ERR(esw);
1928 vport_num = mlx5_esw_devlink_port_index_to_vport_num(port->index);
1929 if (!is_port_function_supported(esw, vport_num)) {
1930 NL_SET_ERR_MSG_MOD(extack, "Port doesn't support set hw_addr");
1933 vport = mlx5_eswitch_get_vport(esw, vport_num);
1934 if (IS_ERR(vport)) {
1935 NL_SET_ERR_MSG_MOD(extack, "Invalid port");
1936 return PTR_ERR(vport);
1939 mutex_lock(&esw->state_lock);
1941 err = mlx5_esw_set_vport_mac_locked(esw, vport, hw_addr);
1943 NL_SET_ERR_MSG_MOD(extack, "Eswitch vport is disabled");
1944 mutex_unlock(&esw->state_lock);
1948 int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
1949 u16 vport, int link_state)
1951 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1952 int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
1953 int other_vport = 1;
1956 if (!ESW_ALLOWED(esw))
1959 return PTR_ERR(evport);
1961 if (vport == MLX5_VPORT_UPLINK) {
1962 opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
1966 mutex_lock(&esw->state_lock);
1968 err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
1970 mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
1975 evport->info.link_state = link_state;
1978 mutex_unlock(&esw->state_lock);
1982 int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
1983 u16 vport, struct ifla_vf_info *ivi)
1985 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
1988 return PTR_ERR(evport);
1990 memset(ivi, 0, sizeof(*ivi));
1991 ivi->vf = vport - 1;
1993 mutex_lock(&esw->state_lock);
1994 ether_addr_copy(ivi->mac, evport->info.mac);
1995 ivi->linkstate = evport->info.link_state;
1996 ivi->vlan = evport->info.vlan;
1997 ivi->qos = evport->info.qos;
1998 ivi->spoofchk = evport->info.spoofchk;
1999 ivi->trusted = evport->info.trusted;
2000 ivi->min_tx_rate = evport->info.min_rate;
2001 ivi->max_tx_rate = evport->info.max_rate;
2002 mutex_unlock(&esw->state_lock);
2007 int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2008 u16 vport, u16 vlan, u8 qos, u8 set_flags)
2010 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2014 return PTR_ERR(evport);
2015 if (vlan > 4095 || qos > 7)
2018 err = modify_esw_vport_cvlan(esw->dev, vport, vlan, qos, set_flags);
2022 evport->info.vlan = vlan;
2023 evport->info.qos = qos;
2024 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY) {
2025 err = esw_acl_ingress_lgcy_setup(esw, evport);
2028 err = esw_acl_egress_lgcy_setup(esw, evport);
2034 int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
2035 u16 vport, u16 vlan, u8 qos)
2040 if (!ESW_ALLOWED(esw))
2044 set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;
2046 mutex_lock(&esw->state_lock);
2047 err = __mlx5_eswitch_set_vport_vlan(esw, vport, vlan, qos, set_flags);
2048 mutex_unlock(&esw->state_lock);
2053 int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
2054 u16 vport, bool spoofchk)
2056 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2060 if (!ESW_ALLOWED(esw))
2063 return PTR_ERR(evport);
2065 mutex_lock(&esw->state_lock);
2066 pschk = evport->info.spoofchk;
2067 evport->info.spoofchk = spoofchk;
2068 if (pschk && !is_valid_ether_addr(evport->info.mac))
2069 mlx5_core_warn(esw->dev,
2070 "Spoofchk in set while MAC is invalid, vport(%d)\n",
2072 if (evport->enabled && esw->mode == MLX5_ESWITCH_LEGACY)
2073 err = esw_acl_ingress_lgcy_setup(esw, evport);
2075 evport->info.spoofchk = pschk;
2076 mutex_unlock(&esw->state_lock);
2081 static void esw_cleanup_vepa_rules(struct mlx5_eswitch *esw)
2083 if (esw->fdb_table.legacy.vepa_uplink_rule)
2084 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_uplink_rule);
2086 if (esw->fdb_table.legacy.vepa_star_rule)
2087 mlx5_del_flow_rules(esw->fdb_table.legacy.vepa_star_rule);
2089 esw->fdb_table.legacy.vepa_uplink_rule = NULL;
2090 esw->fdb_table.legacy.vepa_star_rule = NULL;
2093 static int _mlx5_eswitch_set_vepa_locked(struct mlx5_eswitch *esw,
2096 struct mlx5_flow_destination dest = {};
2097 struct mlx5_flow_act flow_act = {};
2098 struct mlx5_flow_handle *flow_rule;
2099 struct mlx5_flow_spec *spec;
2104 esw_cleanup_vepa_rules(esw);
2108 if (esw->fdb_table.legacy.vepa_uplink_rule)
2111 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
2115 /* Uplink rule forward uplink traffic to FDB */
2116 misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
2117 MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK);
2119 misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
2120 MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port);
2122 spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS;
2123 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
2124 dest.ft = esw->fdb_table.legacy.fdb;
2125 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2126 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, spec,
2127 &flow_act, &dest, 1);
2128 if (IS_ERR(flow_rule)) {
2129 err = PTR_ERR(flow_rule);
2132 esw->fdb_table.legacy.vepa_uplink_rule = flow_rule;
2135 /* Star rule to forward all traffic to uplink vport */
2136 memset(&dest, 0, sizeof(dest));
2137 dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
2138 dest.vport.num = MLX5_VPORT_UPLINK;
2139 flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2140 flow_rule = mlx5_add_flow_rules(esw->fdb_table.legacy.vepa_fdb, NULL,
2141 &flow_act, &dest, 1);
2142 if (IS_ERR(flow_rule)) {
2143 err = PTR_ERR(flow_rule);
2146 esw->fdb_table.legacy.vepa_star_rule = flow_rule;
2152 esw_cleanup_vepa_rules(esw);
2156 int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting)
2163 if (!ESW_ALLOWED(esw))
2166 mutex_lock(&esw->state_lock);
2167 if (esw->mode != MLX5_ESWITCH_LEGACY) {
2172 err = _mlx5_eswitch_set_vepa_locked(esw, setting);
2175 mutex_unlock(&esw->state_lock);
2179 int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
2184 if (!ESW_ALLOWED(esw))
2187 if (esw->mode != MLX5_ESWITCH_LEGACY)
2190 *setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
2194 int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
2195 u16 vport, bool setting)
2197 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2199 if (!ESW_ALLOWED(esw))
2202 return PTR_ERR(evport);
2204 mutex_lock(&esw->state_lock);
2205 evport->info.trusted = setting;
2206 if (evport->enabled)
2207 esw_vport_change_handle_locked(evport);
2208 mutex_unlock(&esw->state_lock);
2213 static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
2215 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2216 struct mlx5_vport *evport;
2217 u32 max_guarantee = 0;
2220 mlx5_esw_for_all_vports(esw, i, evport) {
2221 if (!evport->enabled || evport->info.min_rate < max_guarantee)
2223 max_guarantee = evport->info.min_rate;
2226 return max_t(u32, max_guarantee / fw_max_bw_share, 1);
2229 static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
2231 u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2232 struct mlx5_vport *evport;
2239 mlx5_esw_for_all_vports(esw, i, evport) {
2240 if (!evport->enabled)
2242 vport_min_rate = evport->info.min_rate;
2243 vport_max_rate = evport->info.max_rate;
2244 bw_share = MLX5_MIN_BW_SHARE;
2247 bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
2251 if (bw_share == evport->qos.bw_share)
2254 err = esw_vport_qos_config(esw, evport, vport_max_rate,
2257 evport->qos.bw_share = bw_share;
2265 int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport,
2266 u32 max_rate, u32 min_rate)
2268 struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
2269 u32 fw_max_bw_share;
2270 u32 previous_min_rate;
2272 bool min_rate_supported;
2273 bool max_rate_supported;
2276 if (!ESW_ALLOWED(esw))
2279 return PTR_ERR(evport);
2281 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
2282 min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
2283 fw_max_bw_share >= MLX5_MIN_BW_SHARE;
2284 max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
2286 if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
2289 mutex_lock(&esw->state_lock);
2291 if (min_rate == evport->info.min_rate)
2294 previous_min_rate = evport->info.min_rate;
2295 evport->info.min_rate = min_rate;
2296 divider = calculate_vports_min_rate_divider(esw);
2297 err = normalize_vports_min_rate(esw, divider);
2299 evport->info.min_rate = previous_min_rate;
2304 if (max_rate == evport->info.max_rate)
2307 err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share);
2309 evport->info.max_rate = max_rate;
2312 mutex_unlock(&esw->state_lock);
2316 static int mlx5_eswitch_query_vport_drop_stats(struct mlx5_core_dev *dev,
2317 struct mlx5_vport *vport,
2318 struct mlx5_vport_drop_stats *stats)
2320 struct mlx5_eswitch *esw = dev->priv.eswitch;
2321 u64 rx_discard_vport_down, tx_discard_vport_down;
2325 if (esw->mode != MLX5_ESWITCH_LEGACY)
2328 mutex_lock(&esw->state_lock);
2329 if (!vport->enabled)
2332 if (!IS_ERR_OR_NULL(vport->egress.legacy.drop_counter))
2333 mlx5_fc_query(dev, vport->egress.legacy.drop_counter,
2334 &stats->rx_dropped, &bytes);
2336 if (vport->ingress.legacy.drop_counter)
2337 mlx5_fc_query(dev, vport->ingress.legacy.drop_counter,
2338 &stats->tx_dropped, &bytes);
2340 if (!MLX5_CAP_GEN(dev, receive_discard_vport_down) &&
2341 !MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2344 err = mlx5_query_vport_down_stats(dev, vport->vport, 1,
2345 &rx_discard_vport_down,
2346 &tx_discard_vport_down);
2350 if (MLX5_CAP_GEN(dev, receive_discard_vport_down))
2351 stats->rx_dropped += rx_discard_vport_down;
2352 if (MLX5_CAP_GEN(dev, transmit_discard_vport_down))
2353 stats->tx_dropped += tx_discard_vport_down;
2356 mutex_unlock(&esw->state_lock);
2360 int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
2362 struct ifla_vf_stats *vf_stats)
2364 struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num);
2365 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
2366 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {};
2367 struct mlx5_vport_drop_stats stats = {};
2372 return PTR_ERR(vport);
2374 out = kvzalloc(outlen, GFP_KERNEL);
2378 MLX5_SET(query_vport_counter_in, in, opcode,
2379 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
2380 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
2381 MLX5_SET(query_vport_counter_in, in, vport_number, vport->vport);
2382 MLX5_SET(query_vport_counter_in, in, other_vport, 1);
2384 err = mlx5_cmd_exec_inout(esw->dev, query_vport_counter, in, out);
2388 #define MLX5_GET_CTR(p, x) \
2389 MLX5_GET64(query_vport_counter_out, p, x)
2391 memset(vf_stats, 0, sizeof(*vf_stats));
2392 vf_stats->rx_packets =
2393 MLX5_GET_CTR(out, received_eth_unicast.packets) +
2394 MLX5_GET_CTR(out, received_ib_unicast.packets) +
2395 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2396 MLX5_GET_CTR(out, received_ib_multicast.packets) +
2397 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2399 vf_stats->rx_bytes =
2400 MLX5_GET_CTR(out, received_eth_unicast.octets) +
2401 MLX5_GET_CTR(out, received_ib_unicast.octets) +
2402 MLX5_GET_CTR(out, received_eth_multicast.octets) +
2403 MLX5_GET_CTR(out, received_ib_multicast.octets) +
2404 MLX5_GET_CTR(out, received_eth_broadcast.octets);
2406 vf_stats->tx_packets =
2407 MLX5_GET_CTR(out, transmitted_eth_unicast.packets) +
2408 MLX5_GET_CTR(out, transmitted_ib_unicast.packets) +
2409 MLX5_GET_CTR(out, transmitted_eth_multicast.packets) +
2410 MLX5_GET_CTR(out, transmitted_ib_multicast.packets) +
2411 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
2413 vf_stats->tx_bytes =
2414 MLX5_GET_CTR(out, transmitted_eth_unicast.octets) +
2415 MLX5_GET_CTR(out, transmitted_ib_unicast.octets) +
2416 MLX5_GET_CTR(out, transmitted_eth_multicast.octets) +
2417 MLX5_GET_CTR(out, transmitted_ib_multicast.octets) +
2418 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
2420 vf_stats->multicast =
2421 MLX5_GET_CTR(out, received_eth_multicast.packets) +
2422 MLX5_GET_CTR(out, received_ib_multicast.packets);
2424 vf_stats->broadcast =
2425 MLX5_GET_CTR(out, received_eth_broadcast.packets);
2427 err = mlx5_eswitch_query_vport_drop_stats(esw->dev, vport, &stats);
2430 vf_stats->rx_dropped = stats.rx_dropped;
2431 vf_stats->tx_dropped = stats.tx_dropped;
2438 u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
2440 return ESW_ALLOWED(esw) ? esw->mode : MLX5_ESWITCH_NONE;
2442 EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
2444 enum devlink_eswitch_encap_mode
2445 mlx5_eswitch_get_encap_mode(const struct mlx5_core_dev *dev)
2447 struct mlx5_eswitch *esw;
2449 esw = dev->priv.eswitch;
2450 return ESW_ALLOWED(esw) ? esw->offloads.encap :
2451 DEVLINK_ESWITCH_ENCAP_MODE_NONE;
2453 EXPORT_SYMBOL(mlx5_eswitch_get_encap_mode);
2455 bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1)
2457 if ((dev0->priv.eswitch->mode == MLX5_ESWITCH_NONE &&
2458 dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE) ||
2459 (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2460 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS))
2466 bool mlx5_esw_multipath_prereq(struct mlx5_core_dev *dev0,
2467 struct mlx5_core_dev *dev1)
2469 return (dev0->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS &&
2470 dev1->priv.eswitch->mode == MLX5_ESWITCH_OFFLOADS);