2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/netevent.h>
40 #include <net/devlink.h>
46 #include "en/tc_tun.h"
48 #include "lib/port_tun.h"
50 #define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \
51 max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
52 #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1
54 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
56 struct mlx5e_rep_indr_block_priv {
57 struct net_device *netdev;
58 struct mlx5e_rep_priv *rpriv;
60 struct list_head list;
63 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
64 struct net_device *netdev);
66 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
67 struct ethtool_drvinfo *drvinfo)
69 struct mlx5e_priv *priv = netdev_priv(dev);
70 struct mlx5_core_dev *mdev = priv->mdev;
72 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
73 sizeof(drvinfo->driver));
74 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
75 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
77 fw_rev_maj(mdev), fw_rev_min(mdev),
78 fw_rev_sub(mdev), mdev->board_id);
81 static void mlx5e_uplink_rep_get_drvinfo(struct net_device *dev,
82 struct ethtool_drvinfo *drvinfo)
84 struct mlx5e_priv *priv = netdev_priv(dev);
86 mlx5e_rep_get_drvinfo(dev, drvinfo);
87 strlcpy(drvinfo->bus_info, pci_name(priv->mdev->pdev),
88 sizeof(drvinfo->bus_info));
91 static const struct counter_desc sw_rep_stats_desc[] = {
92 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
93 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
94 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
95 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
100 u64 vport_tx_packets;
105 static const struct counter_desc vport_rep_stats_desc[] = {
106 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
107 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
108 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
109 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
112 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
113 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
115 static void mlx5e_rep_get_strings(struct net_device *dev,
116 u32 stringset, uint8_t *data)
122 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
123 strcpy(data + (i * ETH_GSTRING_LEN),
124 sw_rep_stats_desc[i].format);
125 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
126 strcpy(data + (i * ETH_GSTRING_LEN),
127 vport_rep_stats_desc[j].format);
132 static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
134 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
135 struct mlx5e_rep_priv *rpriv = priv->ppriv;
136 struct mlx5_eswitch_rep *rep = rpriv->rep;
137 struct rtnl_link_stats64 *vport_stats;
138 struct ifla_vf_stats vf_stats;
141 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
143 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
147 vport_stats = &priv->stats.vf_vport;
148 /* flip tx/rx as we are reporting the counters for the switch vport */
149 vport_stats->rx_packets = vf_stats.tx_packets;
150 vport_stats->rx_bytes = vf_stats.tx_bytes;
151 vport_stats->tx_packets = vf_stats.rx_packets;
152 vport_stats->tx_bytes = vf_stats.rx_bytes;
155 static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv)
157 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
158 struct rtnl_link_stats64 *vport_stats;
160 mlx5e_grp_802_3_update_stats(priv);
162 vport_stats = &priv->stats.vf_vport;
164 vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
165 vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
166 vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
167 vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
170 static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
172 struct mlx5e_sw_stats *s = &priv->stats.sw;
173 struct rtnl_link_stats64 stats64 = {};
175 memset(s, 0, sizeof(*s));
176 mlx5e_fold_sw_stats64(priv, &stats64);
178 s->rx_packets = stats64.rx_packets;
179 s->rx_bytes = stats64.rx_bytes;
180 s->tx_packets = stats64.tx_packets;
181 s->tx_bytes = stats64.tx_bytes;
182 s->tx_queue_dropped = stats64.tx_dropped;
185 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
186 struct ethtool_stats *stats, u64 *data)
188 struct mlx5e_priv *priv = netdev_priv(dev);
194 mutex_lock(&priv->state_lock);
195 mlx5e_rep_update_sw_counters(priv);
196 priv->profile->update_stats(priv);
197 mutex_unlock(&priv->state_lock);
199 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
200 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
201 sw_rep_stats_desc, i);
203 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
204 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
205 vport_rep_stats_desc, j);
208 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
212 return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
218 static void mlx5e_rep_get_ringparam(struct net_device *dev,
219 struct ethtool_ringparam *param)
221 struct mlx5e_priv *priv = netdev_priv(dev);
223 mlx5e_ethtool_get_ringparam(priv, param);
226 static int mlx5e_rep_set_ringparam(struct net_device *dev,
227 struct ethtool_ringparam *param)
229 struct mlx5e_priv *priv = netdev_priv(dev);
231 return mlx5e_ethtool_set_ringparam(priv, param);
234 static int mlx5e_replace_rep_vport_rx_rule(struct mlx5e_priv *priv,
235 struct mlx5_flow_destination *dest)
237 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
238 struct mlx5e_rep_priv *rpriv = priv->ppriv;
239 struct mlx5_eswitch_rep *rep = rpriv->rep;
240 struct mlx5_flow_handle *flow_rule;
242 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
245 if (IS_ERR(flow_rule))
246 return PTR_ERR(flow_rule);
248 mlx5_del_flow_rules(rpriv->vport_rx_rule);
249 rpriv->vport_rx_rule = flow_rule;
253 static void mlx5e_rep_get_channels(struct net_device *dev,
254 struct ethtool_channels *ch)
256 struct mlx5e_priv *priv = netdev_priv(dev);
258 mlx5e_ethtool_get_channels(priv, ch);
261 static int mlx5e_rep_set_channels(struct net_device *dev,
262 struct ethtool_channels *ch)
264 struct mlx5e_priv *priv = netdev_priv(dev);
265 u16 curr_channels_amount = priv->channels.params.num_channels;
266 u32 new_channels_amount = ch->combined_count;
267 struct mlx5_flow_destination new_dest;
270 err = mlx5e_ethtool_set_channels(priv, ch);
274 if (curr_channels_amount == 1 && new_channels_amount > 1) {
275 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
276 new_dest.ft = priv->fs.ttc.ft.t;
277 } else if (new_channels_amount == 1 && curr_channels_amount > 1) {
278 new_dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
279 new_dest.tir_num = priv->direct_tir[0].tirn;
284 err = mlx5e_replace_rep_vport_rx_rule(priv, &new_dest);
286 netdev_warn(priv->netdev, "Failed to update vport rx rule, when going from (%d) channels to (%d) channels\n",
287 curr_channels_amount, new_channels_amount);
294 static int mlx5e_rep_get_coalesce(struct net_device *netdev,
295 struct ethtool_coalesce *coal)
297 struct mlx5e_priv *priv = netdev_priv(netdev);
299 return mlx5e_ethtool_get_coalesce(priv, coal);
302 static int mlx5e_rep_set_coalesce(struct net_device *netdev,
303 struct ethtool_coalesce *coal)
305 struct mlx5e_priv *priv = netdev_priv(netdev);
307 return mlx5e_ethtool_set_coalesce(priv, coal);
310 static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev)
312 struct mlx5e_priv *priv = netdev_priv(netdev);
314 return mlx5e_ethtool_get_rxfh_key_size(priv);
317 static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev)
319 struct mlx5e_priv *priv = netdev_priv(netdev);
321 return mlx5e_ethtool_get_rxfh_indir_size(priv);
324 static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev,
325 struct ethtool_pauseparam *pauseparam)
327 struct mlx5e_priv *priv = netdev_priv(netdev);
329 mlx5e_ethtool_get_pauseparam(priv, pauseparam);
332 static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev,
333 struct ethtool_pauseparam *pauseparam)
335 struct mlx5e_priv *priv = netdev_priv(netdev);
337 return mlx5e_ethtool_set_pauseparam(priv, pauseparam);
340 static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev,
341 struct ethtool_link_ksettings *link_ksettings)
343 struct mlx5e_priv *priv = netdev_priv(netdev);
345 return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
348 static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev,
349 const struct ethtool_link_ksettings *link_ksettings)
351 struct mlx5e_priv *priv = netdev_priv(netdev);
353 return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings);
356 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
357 .get_drvinfo = mlx5e_rep_get_drvinfo,
358 .get_link = ethtool_op_get_link,
359 .get_strings = mlx5e_rep_get_strings,
360 .get_sset_count = mlx5e_rep_get_sset_count,
361 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
362 .get_ringparam = mlx5e_rep_get_ringparam,
363 .set_ringparam = mlx5e_rep_set_ringparam,
364 .get_channels = mlx5e_rep_get_channels,
365 .set_channels = mlx5e_rep_set_channels,
366 .get_coalesce = mlx5e_rep_get_coalesce,
367 .set_coalesce = mlx5e_rep_set_coalesce,
368 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
369 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
372 static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = {
373 .get_drvinfo = mlx5e_uplink_rep_get_drvinfo,
374 .get_link = ethtool_op_get_link,
375 .get_strings = mlx5e_rep_get_strings,
376 .get_sset_count = mlx5e_rep_get_sset_count,
377 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
378 .get_ringparam = mlx5e_rep_get_ringparam,
379 .set_ringparam = mlx5e_rep_set_ringparam,
380 .get_channels = mlx5e_rep_get_channels,
381 .set_channels = mlx5e_rep_set_channels,
382 .get_coalesce = mlx5e_rep_get_coalesce,
383 .set_coalesce = mlx5e_rep_set_coalesce,
384 .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings,
385 .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings,
386 .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size,
387 .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size,
388 .get_pauseparam = mlx5e_uplink_rep_get_pauseparam,
389 .set_pauseparam = mlx5e_uplink_rep_set_pauseparam,
392 static int mlx5e_rep_get_port_parent_id(struct net_device *dev,
393 struct netdev_phys_item_id *ppid)
395 struct mlx5_eswitch *esw;
396 struct mlx5e_priv *priv;
399 priv = netdev_priv(dev);
400 esw = priv->mdev->priv.eswitch;
402 if (esw->mode == MLX5_ESWITCH_NONE)
405 parent_id = mlx5_query_nic_system_image_guid(priv->mdev);
406 ppid->id_len = sizeof(parent_id);
407 memcpy(ppid->id, &parent_id, sizeof(parent_id));
412 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
413 struct mlx5_eswitch_rep *rep)
415 struct mlx5e_rep_sq *rep_sq, *tmp;
416 struct mlx5e_rep_priv *rpriv;
418 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
421 rpriv = mlx5e_rep_to_rep_priv(rep);
422 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
423 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
424 list_del(&rep_sq->list);
429 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
430 struct mlx5_eswitch_rep *rep,
431 u32 *sqns_array, int sqns_num)
433 struct mlx5_flow_handle *flow_rule;
434 struct mlx5e_rep_priv *rpriv;
435 struct mlx5e_rep_sq *rep_sq;
439 if (esw->mode != MLX5_ESWITCH_OFFLOADS)
442 rpriv = mlx5e_rep_to_rep_priv(rep);
443 for (i = 0; i < sqns_num; i++) {
444 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
450 /* Add re-inject rule to the PF/representor sqs */
451 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
454 if (IS_ERR(flow_rule)) {
455 err = PTR_ERR(flow_rule);
459 rep_sq->send_to_vport_rule = flow_rule;
460 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
465 mlx5e_sqs2vport_stop(esw, rep);
469 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
471 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
472 struct mlx5e_rep_priv *rpriv = priv->ppriv;
473 struct mlx5_eswitch_rep *rep = rpriv->rep;
474 struct mlx5e_channel *c;
475 int n, tc, num_sqs = 0;
479 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
483 for (n = 0; n < priv->channels.num; n++) {
484 c = priv->channels.c[n];
485 for (tc = 0; tc < c->num_tc; tc++)
486 sqs[num_sqs++] = c->sq[tc].sqn;
489 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
494 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
498 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
500 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
501 struct mlx5e_rep_priv *rpriv = priv->ppriv;
502 struct mlx5_eswitch_rep *rep = rpriv->rep;
504 mlx5e_sqs2vport_stop(esw, rep);
507 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
509 #if IS_ENABLED(CONFIG_IPV6)
510 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
513 unsigned long ipv6_interval = ~0UL;
515 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
517 struct net_device *netdev = rpriv->netdev;
518 struct mlx5e_priv *priv = netdev_priv(netdev);
520 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
521 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
524 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
526 struct mlx5e_rep_priv *rpriv = priv->ppriv;
527 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
529 mlx5_fc_queue_stats_work(priv->mdev,
530 &neigh_update->neigh_stats_work,
531 neigh_update->min_interval);
534 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
536 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
537 neigh_update.neigh_stats_work.work);
538 struct net_device *netdev = rpriv->netdev;
539 struct mlx5e_priv *priv = netdev_priv(netdev);
540 struct mlx5e_neigh_hash_entry *nhe;
543 if (!list_empty(&rpriv->neigh_update.neigh_list))
544 mlx5e_rep_queue_neigh_stats_work(priv);
546 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
547 mlx5e_tc_update_neigh_used_value(nhe);
552 static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
554 refcount_inc(&nhe->refcnt);
557 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
559 if (refcount_dec_and_test(&nhe->refcnt))
563 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
564 struct mlx5e_encap_entry *e,
565 bool neigh_connected,
566 unsigned char ha[ETH_ALEN])
568 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
572 if ((e->flags & MLX5_ENCAP_ENTRY_VALID) &&
573 (!neigh_connected || !ether_addr_equal(e->h_dest, ha)))
574 mlx5e_tc_encap_flows_del(priv, e);
576 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
577 ether_addr_copy(e->h_dest, ha);
578 ether_addr_copy(eth->h_dest, ha);
579 /* Update the encap source mac, in case that we delete
580 * the flows when encap source mac changed.
582 ether_addr_copy(eth->h_source, e->route_dev->dev_addr);
584 mlx5e_tc_encap_flows_add(priv, e);
588 static void mlx5e_rep_neigh_update(struct work_struct *work)
590 struct mlx5e_neigh_hash_entry *nhe =
591 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
592 struct neighbour *n = nhe->n;
593 struct mlx5e_encap_entry *e;
594 unsigned char ha[ETH_ALEN];
595 struct mlx5e_priv *priv;
596 bool neigh_connected;
597 bool encap_connected;
602 /* If these parameters are changed after we release the lock,
603 * we'll receive another event letting us know about it.
604 * We use this lock to avoid inconsistency between the neigh validity
605 * and it's hw address.
607 read_lock_bh(&n->lock);
608 memcpy(ha, n->ha, ETH_ALEN);
609 nud_state = n->nud_state;
611 read_unlock_bh(&n->lock);
613 neigh_connected = (nud_state & NUD_VALID) && !dead;
615 list_for_each_entry(e, &nhe->encap_list, encap_list) {
616 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
617 priv = netdev_priv(e->out_dev);
619 if (encap_connected != neigh_connected ||
620 !ether_addr_equal(e->h_dest, ha))
621 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
623 mlx5e_rep_neigh_entry_release(nhe);
628 static struct mlx5e_rep_indr_block_priv *
629 mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv,
630 struct net_device *netdev)
632 struct mlx5e_rep_indr_block_priv *cb_priv;
634 /* All callback list access should be protected by RTNL. */
637 list_for_each_entry(cb_priv,
638 &rpriv->uplink_priv.tc_indr_block_priv_list,
640 if (cb_priv->netdev == netdev)
646 static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv)
648 struct mlx5e_rep_indr_block_priv *cb_priv, *temp;
649 struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list;
651 list_for_each_entry_safe(cb_priv, temp, head, list) {
652 mlx5e_rep_indr_unregister_block(rpriv, cb_priv->netdev);
658 mlx5e_rep_indr_offload(struct net_device *netdev,
659 struct flow_cls_offload *flower,
660 struct mlx5e_rep_indr_block_priv *indr_priv)
662 struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev);
663 int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD;
666 switch (flower->command) {
667 case FLOW_CLS_REPLACE:
668 err = mlx5e_configure_flower(netdev, priv, flower, flags);
670 case FLOW_CLS_DESTROY:
671 err = mlx5e_delete_flower(netdev, priv, flower, flags);
674 err = mlx5e_stats_flower(netdev, priv, flower, flags);
683 static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type,
684 void *type_data, void *indr_priv)
686 struct mlx5e_rep_indr_block_priv *priv = indr_priv;
689 case TC_SETUP_CLSFLOWER:
690 return mlx5e_rep_indr_offload(priv->netdev, type_data, priv);
696 static void mlx5e_rep_indr_tc_block_unbind(void *cb_priv)
698 struct mlx5e_rep_indr_block_priv *indr_priv = cb_priv;
700 list_del(&indr_priv->list);
704 static LIST_HEAD(mlx5e_block_cb_list);
707 mlx5e_rep_indr_setup_tc_block(struct net_device *netdev,
708 struct mlx5e_rep_priv *rpriv,
709 struct flow_block_offload *f)
711 struct mlx5e_rep_indr_block_priv *indr_priv;
712 struct flow_block_cb *block_cb;
714 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
717 f->driver_block_list = &mlx5e_block_cb_list;
719 switch (f->command) {
720 case FLOW_BLOCK_BIND:
721 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
725 if (flow_block_cb_is_busy(mlx5e_rep_indr_setup_block_cb,
726 indr_priv, &mlx5e_block_cb_list))
729 indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL);
733 indr_priv->netdev = netdev;
734 indr_priv->rpriv = rpriv;
735 list_add(&indr_priv->list,
736 &rpriv->uplink_priv.tc_indr_block_priv_list);
738 block_cb = flow_block_cb_alloc(mlx5e_rep_indr_setup_block_cb,
739 indr_priv, indr_priv,
740 mlx5e_rep_indr_tc_block_unbind);
741 if (IS_ERR(block_cb)) {
742 list_del(&indr_priv->list);
744 return PTR_ERR(block_cb);
746 flow_block_cb_add(block_cb, f);
747 list_add_tail(&block_cb->driver_list, &mlx5e_block_cb_list);
750 case FLOW_BLOCK_UNBIND:
751 indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev);
755 block_cb = flow_block_cb_lookup(f->block,
756 mlx5e_rep_indr_setup_block_cb,
761 flow_block_cb_remove(block_cb, f);
762 list_del(&block_cb->driver_list);
771 int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
772 enum tc_setup_type type, void *type_data)
776 return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv,
783 static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv,
784 struct net_device *netdev)
788 err = __tc_indr_block_cb_register(netdev, rpriv,
789 mlx5e_rep_indr_setup_tc_cb,
792 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
794 mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n",
795 netdev_name(netdev), err);
800 static void mlx5e_rep_indr_unregister_block(struct mlx5e_rep_priv *rpriv,
801 struct net_device *netdev)
803 __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb,
807 static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb,
808 unsigned long event, void *ptr)
810 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
811 uplink_priv.netdevice_nb);
812 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
813 struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
815 if (!mlx5e_tc_tun_device_to_offload(priv, netdev) &&
816 !(is_vlan_dev(netdev) && vlan_dev_real_dev(netdev) == rpriv->netdev))
820 case NETDEV_REGISTER:
821 mlx5e_rep_indr_register_block(rpriv, netdev);
823 case NETDEV_UNREGISTER:
824 mlx5e_rep_indr_unregister_block(rpriv, netdev);
830 static struct mlx5e_neigh_hash_entry *
831 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
832 struct mlx5e_neigh *m_neigh);
834 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
835 unsigned long event, void *ptr)
837 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
838 neigh_update.netevent_nb);
839 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
840 struct net_device *netdev = rpriv->netdev;
841 struct mlx5e_priv *priv = netdev_priv(netdev);
842 struct mlx5e_neigh_hash_entry *nhe = NULL;
843 struct mlx5e_neigh m_neigh = {};
844 struct neigh_parms *p;
849 case NETEVENT_NEIGH_UPDATE:
851 #if IS_ENABLED(CONFIG_IPV6)
852 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
854 if (n->tbl != &arp_tbl)
858 m_neigh.dev = n->dev;
859 m_neigh.family = n->ops->family;
860 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
862 /* We are in atomic context and can't take RTNL mutex, so use
863 * spin_lock_bh to lookup the neigh table. bh is used since
864 * netevent can be called from a softirq context.
866 spin_lock_bh(&neigh_update->encap_lock);
867 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
869 spin_unlock_bh(&neigh_update->encap_lock);
873 /* This assignment is valid as long as the the neigh reference
878 /* Take a reference to ensure the neighbour and mlx5 encap
879 * entry won't be destructed until we drop the reference in
883 mlx5e_rep_neigh_entry_hold(nhe);
885 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
886 mlx5e_rep_neigh_entry_release(nhe);
889 spin_unlock_bh(&neigh_update->encap_lock);
892 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
895 /* We check the device is present since we don't care about
896 * changes in the default table, we only care about changes
897 * done per device delay prob time parameter.
899 #if IS_ENABLED(CONFIG_IPV6)
900 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
902 if (!p->dev || p->tbl != &arp_tbl)
906 /* We are in atomic context and can't take RTNL mutex,
907 * so use spin_lock_bh to walk the neigh list and look for
908 * the relevant device. bh is used since netevent can be
909 * called from a softirq context.
911 spin_lock_bh(&neigh_update->encap_lock);
912 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
913 if (p->dev == nhe->m_neigh.dev) {
918 spin_unlock_bh(&neigh_update->encap_lock);
922 neigh_update->min_interval = min_t(unsigned long,
923 NEIGH_VAR(p, DELAY_PROBE_TIME),
924 neigh_update->min_interval);
925 mlx5_fc_update_sampling_interval(priv->mdev,
926 neigh_update->min_interval);
932 static const struct rhashtable_params mlx5e_neigh_ht_params = {
933 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
934 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
935 .key_len = sizeof(struct mlx5e_neigh),
936 .automatic_shrinking = true,
939 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
941 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
944 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
948 INIT_LIST_HEAD(&neigh_update->neigh_list);
949 spin_lock_init(&neigh_update->encap_lock);
950 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
951 mlx5e_rep_neigh_stats_work);
952 mlx5e_rep_neigh_update_init_interval(rpriv);
954 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
955 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
961 rhashtable_destroy(&neigh_update->neigh_ht);
965 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
967 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
968 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
970 unregister_netevent_notifier(&neigh_update->netevent_nb);
972 flush_workqueue(priv->wq); /* flush neigh update works */
974 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
976 rhashtable_destroy(&neigh_update->neigh_ht);
979 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
980 struct mlx5e_neigh_hash_entry *nhe)
982 struct mlx5e_rep_priv *rpriv = priv->ppriv;
985 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
987 mlx5e_neigh_ht_params);
991 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
996 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
997 struct mlx5e_neigh_hash_entry *nhe)
999 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1001 spin_lock_bh(&rpriv->neigh_update.encap_lock);
1003 list_del(&nhe->neigh_list);
1005 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
1007 mlx5e_neigh_ht_params);
1008 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
1011 /* This function must only be called under RTNL lock or under the
1012 * representor's encap_lock in case RTNL mutex can't be held.
1014 static struct mlx5e_neigh_hash_entry *
1015 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
1016 struct mlx5e_neigh *m_neigh)
1018 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1019 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
1021 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
1022 mlx5e_neigh_ht_params);
1025 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
1026 struct mlx5e_encap_entry *e,
1027 struct mlx5e_neigh_hash_entry **nhe)
1031 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
1035 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
1036 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
1037 INIT_LIST_HEAD(&(*nhe)->encap_list);
1038 refcount_set(&(*nhe)->refcnt, 1);
1040 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
1050 static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
1051 struct mlx5e_neigh_hash_entry *nhe)
1053 /* The neigh hash entry must be removed from the hash table regardless
1054 * of the reference count value, so it won't be found by the next
1055 * neigh notification call. The neigh hash entry reference count is
1056 * incremented only during creation and neigh notification calls and
1057 * protects from freeing the nhe struct.
1059 mlx5e_rep_neigh_entry_remove(priv, nhe);
1060 mlx5e_rep_neigh_entry_release(nhe);
1063 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
1064 struct mlx5e_encap_entry *e)
1066 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1067 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1068 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1069 struct mlx5e_neigh_hash_entry *nhe;
1072 err = mlx5_tun_entropy_refcount_inc(tun_entropy, e->reformat_type);
1075 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1077 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
1079 mlx5_tun_entropy_refcount_dec(tun_entropy,
1084 list_add(&e->encap_list, &nhe->encap_list);
1088 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
1089 struct mlx5e_encap_entry *e)
1091 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1092 struct mlx5_rep_uplink_priv *uplink_priv = &rpriv->uplink_priv;
1093 struct mlx5_tun_entropy *tun_entropy = &uplink_priv->tun_entropy;
1094 struct mlx5e_neigh_hash_entry *nhe;
1096 list_del(&e->encap_list);
1097 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
1099 if (list_empty(&nhe->encap_list))
1100 mlx5e_rep_neigh_entry_destroy(priv, nhe);
1101 mlx5_tun_entropy_refcount_dec(tun_entropy, e->reformat_type);
1104 static int mlx5e_rep_open(struct net_device *dev)
1106 struct mlx5e_priv *priv = netdev_priv(dev);
1107 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1108 struct mlx5_eswitch_rep *rep = rpriv->rep;
1111 mutex_lock(&priv->state_lock);
1112 err = mlx5e_open_locked(dev);
1116 if (!mlx5_modify_vport_admin_state(priv->mdev,
1117 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1119 MLX5_VPORT_ADMIN_STATE_UP))
1120 netif_carrier_on(dev);
1123 mutex_unlock(&priv->state_lock);
1127 static int mlx5e_rep_close(struct net_device *dev)
1129 struct mlx5e_priv *priv = netdev_priv(dev);
1130 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1131 struct mlx5_eswitch_rep *rep = rpriv->rep;
1134 mutex_lock(&priv->state_lock);
1135 mlx5_modify_vport_admin_state(priv->mdev,
1136 MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
1138 MLX5_VPORT_ADMIN_STATE_DOWN);
1139 ret = mlx5e_close_locked(dev);
1140 mutex_unlock(&priv->state_lock);
1145 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
1146 struct flow_cls_offload *cls_flower, int flags)
1148 switch (cls_flower->command) {
1149 case FLOW_CLS_REPLACE:
1150 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
1152 case FLOW_CLS_DESTROY:
1153 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
1155 case FLOW_CLS_STATS:
1156 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
1163 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
1166 struct mlx5e_priv *priv = cb_priv;
1169 case TC_SETUP_CLSFLOWER:
1170 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS |
1171 MLX5E_TC_ESW_OFFLOAD);
1177 static LIST_HEAD(mlx5e_rep_block_cb_list);
1179 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
1182 struct mlx5e_priv *priv = netdev_priv(dev);
1185 case TC_SETUP_BLOCK:
1186 return flow_block_cb_setup_simple(type_data,
1187 &mlx5e_rep_block_cb_list,
1188 mlx5e_rep_setup_tc_cb,
1195 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
1197 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1198 struct mlx5_eswitch_rep *rep;
1200 if (!MLX5_ESWITCH_MANAGER(priv->mdev))
1203 if (!rpriv) /* non vport rep mlx5e instances don't use this field */
1207 return (rep->vport == MLX5_VPORT_UPLINK);
1210 static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id)
1213 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1221 mlx5e_get_sw_stats64(const struct net_device *dev,
1222 struct rtnl_link_stats64 *stats)
1224 struct mlx5e_priv *priv = netdev_priv(dev);
1226 mlx5e_fold_sw_stats64(priv, stats);
1230 static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev,
1234 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1235 return mlx5e_get_sw_stats64(dev, sp);
1242 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
1244 struct mlx5e_priv *priv = netdev_priv(dev);
1246 /* update HW stats in background for next time */
1247 mlx5e_queue_update_stats(priv);
1248 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
1251 static int mlx5e_rep_change_mtu(struct net_device *netdev, int new_mtu)
1253 return mlx5e_change_mtu(netdev, new_mtu, NULL);
1256 static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu)
1258 return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu);
1261 static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr)
1263 struct sockaddr *saddr = addr;
1265 if (!is_valid_ether_addr(saddr->sa_data))
1266 return -EADDRNOTAVAIL;
1268 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
1272 static int mlx5e_uplink_rep_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
1275 netdev_warn_once(dev, "legacy vf vlan setting isn't supported in switchdev mode\n");
1280 /* allow setting 0-vid for compatibility with libvirt */
1284 static struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev)
1286 struct mlx5e_priv *priv = netdev_priv(dev);
1287 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1289 return &rpriv->dl_port;
1292 static const struct net_device_ops mlx5e_netdev_ops_rep = {
1293 .ndo_open = mlx5e_rep_open,
1294 .ndo_stop = mlx5e_rep_close,
1295 .ndo_start_xmit = mlx5e_xmit,
1296 .ndo_setup_tc = mlx5e_rep_setup_tc,
1297 .ndo_get_devlink_port = mlx5e_get_devlink_port,
1298 .ndo_get_stats64 = mlx5e_rep_get_stats,
1299 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1300 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
1301 .ndo_change_mtu = mlx5e_rep_change_mtu,
1304 static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = {
1305 .ndo_open = mlx5e_open,
1306 .ndo_stop = mlx5e_close,
1307 .ndo_start_xmit = mlx5e_xmit,
1308 .ndo_set_mac_address = mlx5e_uplink_rep_set_mac,
1309 .ndo_setup_tc = mlx5e_rep_setup_tc,
1310 .ndo_get_devlink_port = mlx5e_get_devlink_port,
1311 .ndo_get_stats64 = mlx5e_get_stats,
1312 .ndo_has_offload_stats = mlx5e_rep_has_offload_stats,
1313 .ndo_get_offload_stats = mlx5e_rep_get_offload_stats,
1314 .ndo_change_mtu = mlx5e_uplink_rep_change_mtu,
1315 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
1316 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
1317 .ndo_features_check = mlx5e_features_check,
1318 .ndo_set_vf_mac = mlx5e_set_vf_mac,
1319 .ndo_set_vf_rate = mlx5e_set_vf_rate,
1320 .ndo_get_vf_config = mlx5e_get_vf_config,
1321 .ndo_get_vf_stats = mlx5e_get_vf_stats,
1322 .ndo_set_vf_vlan = mlx5e_uplink_rep_set_vf_vlan,
1323 .ndo_set_features = mlx5e_set_features,
1326 bool mlx5e_eswitch_rep(struct net_device *netdev)
1328 if (netdev->netdev_ops == &mlx5e_netdev_ops_rep ||
1329 netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep)
1335 static void mlx5e_build_rep_params(struct net_device *netdev)
1337 struct mlx5e_priv *priv = netdev_priv(netdev);
1338 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1339 struct mlx5_eswitch_rep *rep = rpriv->rep;
1340 struct mlx5_core_dev *mdev = priv->mdev;
1341 struct mlx5e_params *params;
1343 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
1344 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
1345 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
1347 params = &priv->channels.params;
1348 params->hard_mtu = MLX5E_ETH_HARD_MTU;
1349 params->sw_mtu = netdev->mtu;
1352 if (rep->vport == MLX5_VPORT_UPLINK)
1353 params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
1355 params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE;
1358 mlx5e_build_rq_params(mdev, params);
1360 /* CQ moderation params */
1361 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
1362 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
1365 params->tunneled_offload_en = false;
1367 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
1370 mlx5e_build_rss_params(&priv->rss_params, params->num_channels);
1373 static void mlx5e_build_rep_netdev(struct net_device *netdev)
1375 struct mlx5e_priv *priv = netdev_priv(netdev);
1376 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1377 struct mlx5_eswitch_rep *rep = rpriv->rep;
1378 struct mlx5_core_dev *mdev = priv->mdev;
1380 if (rep->vport == MLX5_VPORT_UPLINK) {
1381 SET_NETDEV_DEV(netdev, mdev->device);
1382 netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep;
1383 /* we want a persistent mac for the uplink rep */
1384 mlx5_query_mac_address(mdev, netdev->dev_addr);
1385 netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops;
1386 #ifdef CONFIG_MLX5_CORE_EN_DCB
1387 if (MLX5_CAP_GEN(mdev, qos))
1388 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
1391 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
1392 eth_hw_addr_random(netdev);
1393 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
1396 netdev->watchdog_timeo = 15 * HZ;
1398 netdev->features |= NETIF_F_NETNS_LOCAL;
1400 netdev->hw_features |= NETIF_F_HW_TC;
1401 netdev->hw_features |= NETIF_F_SG;
1402 netdev->hw_features |= NETIF_F_IP_CSUM;
1403 netdev->hw_features |= NETIF_F_IPV6_CSUM;
1404 netdev->hw_features |= NETIF_F_GRO;
1405 netdev->hw_features |= NETIF_F_TSO;
1406 netdev->hw_features |= NETIF_F_TSO6;
1407 netdev->hw_features |= NETIF_F_RXCSUM;
1409 if (rep->vport == MLX5_VPORT_UPLINK)
1410 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
1412 netdev->features |= NETIF_F_VLAN_CHALLENGED;
1414 netdev->features |= netdev->hw_features;
1417 static int mlx5e_init_rep(struct mlx5_core_dev *mdev,
1418 struct net_device *netdev,
1419 const struct mlx5e_profile *profile,
1422 struct mlx5e_priv *priv = netdev_priv(netdev);
1425 err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv);
1429 priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS;
1431 mlx5e_build_rep_params(netdev);
1432 mlx5e_build_rep_netdev(netdev);
1434 mlx5e_timestamp_init(priv);
1439 static void mlx5e_cleanup_rep(struct mlx5e_priv *priv)
1441 mlx5e_netdev_cleanup(priv->netdev, priv);
1444 static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv)
1446 struct ttc_params ttc_params = {};
1449 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1450 MLX5_FLOW_NAMESPACE_KERNEL);
1452 /* The inner_ttc in the ttc params is intentionally not set */
1453 ttc_params.any_tt_tirn = priv->direct_tir[0].tirn;
1454 mlx5e_set_ttc_ft_params(&ttc_params);
1455 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
1456 ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn;
1458 err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc);
1460 netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err);
1466 static int mlx5e_create_rep_vport_rx_rule(struct mlx5e_priv *priv)
1468 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1469 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1470 struct mlx5_eswitch_rep *rep = rpriv->rep;
1471 struct mlx5_flow_handle *flow_rule;
1472 struct mlx5_flow_destination dest;
1474 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1475 dest.tir_num = priv->direct_tir[0].tirn;
1476 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1479 if (IS_ERR(flow_rule))
1480 return PTR_ERR(flow_rule);
1481 rpriv->vport_rx_rule = flow_rule;
1485 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
1487 struct mlx5_core_dev *mdev = priv->mdev;
1490 mlx5e_init_l2_addr(priv);
1492 err = mlx5e_open_drop_rq(priv, &priv->drop_rq);
1494 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
1498 err = mlx5e_create_indirect_rqt(priv);
1500 goto err_close_drop_rq;
1502 err = mlx5e_create_direct_rqts(priv, priv->direct_tir);
1504 goto err_destroy_indirect_rqts;
1506 err = mlx5e_create_indirect_tirs(priv, false);
1508 goto err_destroy_direct_rqts;
1510 err = mlx5e_create_direct_tirs(priv, priv->direct_tir);
1512 goto err_destroy_indirect_tirs;
1514 err = mlx5e_create_rep_ttc_table(priv);
1516 goto err_destroy_direct_tirs;
1518 err = mlx5e_create_rep_vport_rx_rule(priv);
1520 goto err_destroy_ttc_table;
1524 err_destroy_ttc_table:
1525 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1526 err_destroy_direct_tirs:
1527 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1528 err_destroy_indirect_tirs:
1529 mlx5e_destroy_indirect_tirs(priv, false);
1530 err_destroy_direct_rqts:
1531 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1532 err_destroy_indirect_rqts:
1533 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1535 mlx5e_close_drop_rq(&priv->drop_rq);
1539 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1541 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1543 mlx5_del_flow_rules(rpriv->vport_rx_rule);
1544 mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
1545 mlx5e_destroy_direct_tirs(priv, priv->direct_tir);
1546 mlx5e_destroy_indirect_tirs(priv, false);
1547 mlx5e_destroy_direct_rqts(priv, priv->direct_tir);
1548 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
1549 mlx5e_close_drop_rq(&priv->drop_rq);
1552 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1554 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1555 struct mlx5_rep_uplink_priv *uplink_priv;
1558 err = mlx5e_create_tises(priv);
1560 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1564 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1565 uplink_priv = &rpriv->uplink_priv;
1567 INIT_LIST_HEAD(&uplink_priv->unready_flows);
1569 /* init shared tc flow table */
1570 err = mlx5e_tc_esw_init(&uplink_priv->tc_ht);
1574 mlx5_init_port_tun_entropy(&uplink_priv->tun_entropy, priv->mdev);
1576 /* init indirect block notifications */
1577 INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list);
1578 uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event;
1579 err = register_netdevice_notifier(&uplink_priv->netdevice_nb);
1581 mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n");
1582 goto tc_esw_cleanup;
1589 mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht);
1591 for (tc = 0; tc < priv->profile->max_tc; tc++)
1592 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1596 static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv)
1598 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1601 for (tc = 0; tc < priv->profile->max_tc; tc++)
1602 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
1604 if (rpriv->rep->vport == MLX5_VPORT_UPLINK) {
1605 /* clean indirect TC block notifications */
1606 unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb);
1607 mlx5e_rep_indr_clean_block_privs(rpriv);
1609 /* delete shared tc flow table */
1610 mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht);
1614 static void mlx5e_rep_enable(struct mlx5e_priv *priv)
1616 mlx5e_set_netdev_mtu_boundaries(priv);
1619 static int mlx5e_update_rep_rx(struct mlx5e_priv *priv)
1624 static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data)
1626 struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb);
1628 if (event == MLX5_EVENT_TYPE_PORT_CHANGE) {
1629 struct mlx5_eqe *eqe = data;
1631 switch (eqe->sub_type) {
1632 case MLX5_PORT_CHANGE_SUBTYPE_DOWN:
1633 case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE:
1634 queue_work(priv->wq, &priv->update_carrier_work);
1643 if (event == MLX5_DEV_EVENT_PORT_AFFINITY) {
1644 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1646 queue_work(priv->wq, &rpriv->uplink_priv.reoffload_flows_work);
1654 static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
1656 struct net_device *netdev = priv->netdev;
1657 struct mlx5_core_dev *mdev = priv->mdev;
1658 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1661 netdev->min_mtu = ETH_MIN_MTU;
1662 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
1663 netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu);
1664 mlx5e_set_dev_port_mtu(priv);
1666 INIT_WORK(&rpriv->uplink_priv.reoffload_flows_work,
1667 mlx5e_tc_reoffload_flows_work);
1669 mlx5_lag_add(mdev, netdev);
1670 priv->events_nb.notifier_call = uplink_rep_async_event;
1671 mlx5_notifier_register(mdev, &priv->events_nb);
1672 #ifdef CONFIG_MLX5_CORE_EN_DCB
1673 mlx5e_dcbnl_initialize(priv);
1674 mlx5e_dcbnl_init_app(priv);
1678 static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
1680 struct mlx5_core_dev *mdev = priv->mdev;
1681 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1683 #ifdef CONFIG_MLX5_CORE_EN_DCB
1684 mlx5e_dcbnl_delete_app(priv);
1686 mlx5_notifier_unregister(mdev, &priv->events_nb);
1687 cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
1688 mlx5_lag_remove(mdev);
1691 static const struct mlx5e_profile mlx5e_rep_profile = {
1692 .init = mlx5e_init_rep,
1693 .cleanup = mlx5e_cleanup_rep,
1694 .init_rx = mlx5e_init_rep_rx,
1695 .cleanup_rx = mlx5e_cleanup_rep_rx,
1696 .init_tx = mlx5e_init_rep_tx,
1697 .cleanup_tx = mlx5e_cleanup_rep_tx,
1698 .enable = mlx5e_rep_enable,
1699 .update_rx = mlx5e_update_rep_rx,
1700 .update_stats = mlx5e_rep_update_hw_counters,
1701 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1702 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1704 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
1707 static const struct mlx5e_profile mlx5e_uplink_rep_profile = {
1708 .init = mlx5e_init_rep,
1709 .cleanup = mlx5e_cleanup_rep,
1710 .init_rx = mlx5e_init_rep_rx,
1711 .cleanup_rx = mlx5e_cleanup_rep_rx,
1712 .init_tx = mlx5e_init_rep_tx,
1713 .cleanup_tx = mlx5e_cleanup_rep_tx,
1714 .enable = mlx5e_uplink_rep_enable,
1715 .disable = mlx5e_uplink_rep_disable,
1716 .update_rx = mlx5e_update_rep_rx,
1717 .update_stats = mlx5e_uplink_rep_update_hw_counters,
1718 .update_carrier = mlx5e_update_carrier,
1719 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1720 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
1721 .max_tc = MLX5E_MAX_NUM_TC,
1722 .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR),
1726 is_devlink_port_supported(const struct mlx5_core_dev *dev,
1727 const struct mlx5e_rep_priv *rpriv)
1729 return rpriv->rep->vport == MLX5_VPORT_UPLINK ||
1730 rpriv->rep->vport == MLX5_VPORT_PF ||
1731 mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport);
1734 static int register_devlink_port(struct mlx5_core_dev *dev,
1735 struct mlx5e_rep_priv *rpriv)
1737 struct devlink *devlink = priv_to_devlink(dev);
1738 struct mlx5_eswitch_rep *rep = rpriv->rep;
1739 struct netdev_phys_item_id ppid = {};
1742 if (!is_devlink_port_supported(dev, rpriv))
1745 ret = mlx5e_rep_get_port_parent_id(rpriv->netdev, &ppid);
1749 if (rep->vport == MLX5_VPORT_UPLINK)
1750 devlink_port_attrs_set(&rpriv->dl_port,
1751 DEVLINK_PORT_FLAVOUR_PHYSICAL,
1752 PCI_FUNC(dev->pdev->devfn), false, 0,
1753 &ppid.id[0], ppid.id_len);
1754 else if (rep->vport == MLX5_VPORT_PF)
1755 devlink_port_attrs_pci_pf_set(&rpriv->dl_port,
1756 &ppid.id[0], ppid.id_len,
1758 else if (mlx5_eswitch_is_vf_vport(dev->priv.eswitch, rpriv->rep->vport))
1759 devlink_port_attrs_pci_vf_set(&rpriv->dl_port,
1760 &ppid.id[0], ppid.id_len,
1764 return devlink_port_register(devlink, &rpriv->dl_port, rep->vport);
1767 static void unregister_devlink_port(struct mlx5_core_dev *dev,
1768 struct mlx5e_rep_priv *rpriv)
1770 if (is_devlink_port_supported(dev, rpriv))
1771 devlink_port_unregister(&rpriv->dl_port);
1774 /* e-Switch vport representors */
1776 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1778 const struct mlx5e_profile *profile;
1779 struct mlx5e_rep_priv *rpriv;
1780 struct net_device *netdev;
1783 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1787 /* rpriv->rep to be looked up when profile->init() is called */
1790 nch = mlx5e_get_max_num_channels(dev);
1791 profile = (rep->vport == MLX5_VPORT_UPLINK) ?
1792 &mlx5e_uplink_rep_profile : &mlx5e_rep_profile;
1793 netdev = mlx5e_create_netdev(dev, profile, nch, rpriv);
1795 pr_warn("Failed to create representor netdev for vport %d\n",
1801 rpriv->netdev = netdev;
1802 rep->rep_data[REP_ETH].priv = rpriv;
1803 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1805 if (rep->vport == MLX5_VPORT_UPLINK) {
1806 err = mlx5e_create_mdev_resources(dev);
1808 goto err_destroy_netdev;
1811 err = mlx5e_attach_netdev(netdev_priv(netdev));
1813 pr_warn("Failed to attach representor netdev for vport %d\n",
1815 goto err_destroy_mdev_resources;
1818 err = mlx5e_rep_neigh_init(rpriv);
1820 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1822 goto err_detach_netdev;
1825 err = register_devlink_port(dev, rpriv);
1827 esw_warn(dev, "Failed to register devlink port %d\n",
1829 goto err_neigh_cleanup;
1832 err = register_netdev(netdev);
1834 pr_warn("Failed to register representor netdev for vport %d\n",
1836 goto err_devlink_cleanup;
1839 if (is_devlink_port_supported(dev, rpriv))
1840 devlink_port_type_eth_set(&rpriv->dl_port, netdev);
1843 err_devlink_cleanup:
1844 unregister_devlink_port(dev, rpriv);
1847 mlx5e_rep_neigh_cleanup(rpriv);
1850 mlx5e_detach_netdev(netdev_priv(netdev));
1852 err_destroy_mdev_resources:
1853 if (rep->vport == MLX5_VPORT_UPLINK)
1854 mlx5e_destroy_mdev_resources(dev);
1857 mlx5e_destroy_netdev(netdev_priv(netdev));
1863 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1865 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1866 struct net_device *netdev = rpriv->netdev;
1867 struct mlx5e_priv *priv = netdev_priv(netdev);
1868 struct mlx5_core_dev *dev = priv->mdev;
1869 void *ppriv = priv->ppriv;
1871 if (is_devlink_port_supported(dev, rpriv))
1872 devlink_port_type_clear(&rpriv->dl_port);
1873 unregister_netdev(netdev);
1874 unregister_devlink_port(dev, rpriv);
1875 mlx5e_rep_neigh_cleanup(rpriv);
1876 mlx5e_detach_netdev(priv);
1877 if (rep->vport == MLX5_VPORT_UPLINK)
1878 mlx5e_destroy_mdev_resources(priv->mdev);
1879 mlx5e_destroy_netdev(priv);
1880 kfree(ppriv); /* mlx5e_rep_priv */
1883 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1885 struct mlx5e_rep_priv *rpriv;
1887 rpriv = mlx5e_rep_to_rep_priv(rep);
1889 return rpriv->netdev;
1892 static const struct mlx5_eswitch_rep_ops rep_ops = {
1893 .load = mlx5e_vport_rep_load,
1894 .unload = mlx5e_vport_rep_unload,
1895 .get_proto_dev = mlx5e_vport_rep_get_proto_dev
1898 void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev)
1900 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1902 mlx5_eswitch_register_vport_reps(esw, &rep_ops, REP_ETH);
1905 void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev)
1907 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1909 mlx5_eswitch_unregister_vport_reps(esw, REP_ETH);