2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/netevent.h>
47 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
49 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
50 struct ethtool_drvinfo *drvinfo)
52 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
53 sizeof(drvinfo->driver));
54 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
57 static const struct counter_desc sw_rep_stats_desc[] = {
58 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
59 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
60 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
61 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
64 #define NUM_VPORT_REP_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
66 static void mlx5e_rep_get_strings(struct net_device *dev,
67 u32 stringset, uint8_t *data)
73 for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
74 strcpy(data + (i * ETH_GSTRING_LEN),
75 sw_rep_stats_desc[i].format);
80 static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
82 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
83 struct mlx5e_rep_priv *rpriv = priv->ppriv;
84 struct mlx5_eswitch_rep *rep = rpriv->rep;
85 struct rtnl_link_stats64 *vport_stats;
86 struct ifla_vf_stats vf_stats;
89 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
91 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
95 vport_stats = &priv->stats.vf_vport;
96 /* flip tx/rx as we are reporting the counters for the switch vport */
97 vport_stats->rx_packets = vf_stats.tx_packets;
98 vport_stats->rx_bytes = vf_stats.tx_bytes;
99 vport_stats->tx_packets = vf_stats.rx_packets;
100 vport_stats->tx_bytes = vf_stats.rx_bytes;
103 static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
105 struct mlx5e_sw_stats *s = &priv->stats.sw;
106 struct mlx5e_rq_stats *rq_stats;
107 struct mlx5e_sq_stats *sq_stats;
110 memset(s, 0, sizeof(*s));
111 for (i = 0; i < priv->channels.num; i++) {
112 struct mlx5e_channel *c = priv->channels.c[i];
114 rq_stats = &c->rq.stats;
116 s->rx_packets += rq_stats->packets;
117 s->rx_bytes += rq_stats->bytes;
119 for (j = 0; j < priv->channels.params.num_tc; j++) {
120 sq_stats = &c->sq[j].stats;
122 s->tx_packets += sq_stats->packets;
123 s->tx_bytes += sq_stats->bytes;
128 static void mlx5e_rep_update_stats(struct mlx5e_priv *priv)
130 mlx5e_rep_update_sw_counters(priv);
131 mlx5e_rep_update_hw_counters(priv);
134 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
135 struct ethtool_stats *stats, u64 *data)
137 struct mlx5e_priv *priv = netdev_priv(dev);
143 mutex_lock(&priv->state_lock);
144 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
145 mlx5e_rep_update_sw_counters(priv);
146 mutex_unlock(&priv->state_lock);
148 for (i = 0; i < NUM_VPORT_REP_COUNTERS; i++)
149 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
150 sw_rep_stats_desc, i);
153 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
157 return NUM_VPORT_REP_COUNTERS;
163 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
164 .get_drvinfo = mlx5e_rep_get_drvinfo,
165 .get_link = ethtool_op_get_link,
166 .get_strings = mlx5e_rep_get_strings,
167 .get_sset_count = mlx5e_rep_get_sset_count,
168 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
171 int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
173 struct mlx5e_priv *priv = netdev_priv(dev);
174 struct mlx5e_rep_priv *rpriv = priv->ppriv;
175 struct mlx5_eswitch_rep *rep = rpriv->rep;
176 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
178 if (esw->mode == SRIOV_NONE)
182 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
183 attr->u.ppid.id_len = ETH_ALEN;
184 ether_addr_copy(attr->u.ppid.id, rep->hw_id);
193 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
195 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
196 struct mlx5e_rep_priv *rpriv = priv->ppriv;
197 struct mlx5_eswitch_rep *rep = rpriv->rep;
198 struct mlx5e_channel *c;
199 int n, tc, num_sqs = 0;
203 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(u16), GFP_KERNEL);
207 for (n = 0; n < priv->channels.num; n++) {
208 c = priv->channels.c[n];
209 for (tc = 0; tc < c->num_tc; tc++)
210 sqs[num_sqs++] = c->sq[tc].sqn;
213 err = mlx5_eswitch_sqs2vport_start(esw, rep, sqs, num_sqs);
218 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
222 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
224 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
225 struct mlx5e_rep_priv *rpriv = priv->ppriv;
226 struct mlx5_eswitch_rep *rep = rpriv->rep;
228 mlx5_eswitch_sqs2vport_stop(esw, rep);
231 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
233 #if IS_ENABLED(CONFIG_IPV6)
234 unsigned long ipv6_interval = NEIGH_VAR(&ipv6_stub->nd_tbl->parms,
237 unsigned long ipv6_interval = ~0UL;
239 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
241 struct net_device *netdev = rpriv->rep->netdev;
242 struct mlx5e_priv *priv = netdev_priv(netdev);
244 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
245 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
248 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
250 struct mlx5e_rep_priv *rpriv = priv->ppriv;
251 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
253 mlx5_fc_queue_stats_work(priv->mdev,
254 &neigh_update->neigh_stats_work,
255 neigh_update->min_interval);
258 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
260 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
261 neigh_update.neigh_stats_work.work);
262 struct net_device *netdev = rpriv->rep->netdev;
263 struct mlx5e_priv *priv = netdev_priv(netdev);
264 struct mlx5e_neigh_hash_entry *nhe;
267 if (!list_empty(&rpriv->neigh_update.neigh_list))
268 mlx5e_rep_queue_neigh_stats_work(priv);
270 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
271 mlx5e_tc_update_neigh_used_value(nhe);
276 static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
278 refcount_inc(&nhe->refcnt);
281 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
283 if (refcount_dec_and_test(&nhe->refcnt))
287 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
288 struct mlx5e_encap_entry *e,
289 bool neigh_connected,
290 unsigned char ha[ETH_ALEN])
292 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
296 if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
297 !ether_addr_equal(e->h_dest, ha))
298 mlx5e_tc_encap_flows_del(priv, e);
300 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
301 ether_addr_copy(e->h_dest, ha);
302 ether_addr_copy(eth->h_dest, ha);
304 mlx5e_tc_encap_flows_add(priv, e);
308 static void mlx5e_rep_neigh_update(struct work_struct *work)
310 struct mlx5e_neigh_hash_entry *nhe =
311 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
312 struct neighbour *n = nhe->n;
313 struct mlx5e_encap_entry *e;
314 unsigned char ha[ETH_ALEN];
315 struct mlx5e_priv *priv;
316 bool neigh_connected;
317 bool encap_connected;
322 /* If these parameters are changed after we release the lock,
323 * we'll receive another event letting us know about it.
324 * We use this lock to avoid inconsistency between the neigh validity
325 * and it's hw address.
327 read_lock_bh(&n->lock);
328 memcpy(ha, n->ha, ETH_ALEN);
329 nud_state = n->nud_state;
331 read_unlock_bh(&n->lock);
333 neigh_connected = (nud_state & NUD_VALID) && !dead;
335 list_for_each_entry(e, &nhe->encap_list, encap_list) {
336 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
337 priv = netdev_priv(e->out_dev);
339 if (encap_connected != neigh_connected ||
340 !ether_addr_equal(e->h_dest, ha))
341 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
343 mlx5e_rep_neigh_entry_release(nhe);
348 static struct mlx5e_neigh_hash_entry *
349 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
350 struct mlx5e_neigh *m_neigh);
352 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
353 unsigned long event, void *ptr)
355 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
356 neigh_update.netevent_nb);
357 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
358 struct net_device *netdev = rpriv->rep->netdev;
359 struct mlx5e_priv *priv = netdev_priv(netdev);
360 struct mlx5e_neigh_hash_entry *nhe = NULL;
361 struct mlx5e_neigh m_neigh = {};
362 struct neigh_parms *p;
367 case NETEVENT_NEIGH_UPDATE:
369 #if IS_ENABLED(CONFIG_IPV6)
370 if (n->tbl != ipv6_stub->nd_tbl && n->tbl != &arp_tbl)
372 if (n->tbl != &arp_tbl)
376 m_neigh.dev = n->dev;
377 m_neigh.family = n->ops->family;
378 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
380 /* We are in atomic context and can't take RTNL mutex, so use
381 * spin_lock_bh to lookup the neigh table. bh is used since
382 * netevent can be called from a softirq context.
384 spin_lock_bh(&neigh_update->encap_lock);
385 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
387 spin_unlock_bh(&neigh_update->encap_lock);
391 /* This assignment is valid as long as the the neigh reference
396 /* Take a reference to ensure the neighbour and mlx5 encap
397 * entry won't be destructed until we drop the reference in
401 mlx5e_rep_neigh_entry_hold(nhe);
403 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
404 mlx5e_rep_neigh_entry_release(nhe);
407 spin_unlock_bh(&neigh_update->encap_lock);
410 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
413 /* We check the device is present since we don't care about
414 * changes in the default table, we only care about changes
415 * done per device delay prob time parameter.
417 #if IS_ENABLED(CONFIG_IPV6)
418 if (!p->dev || (p->tbl != ipv6_stub->nd_tbl && p->tbl != &arp_tbl))
420 if (!p->dev || p->tbl != &arp_tbl)
424 /* We are in atomic context and can't take RTNL mutex,
425 * so use spin_lock_bh to walk the neigh list and look for
426 * the relevant device. bh is used since netevent can be
427 * called from a softirq context.
429 spin_lock_bh(&neigh_update->encap_lock);
430 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
431 if (p->dev == nhe->m_neigh.dev) {
436 spin_unlock_bh(&neigh_update->encap_lock);
440 neigh_update->min_interval = min_t(unsigned long,
441 NEIGH_VAR(p, DELAY_PROBE_TIME),
442 neigh_update->min_interval);
443 mlx5_fc_update_sampling_interval(priv->mdev,
444 neigh_update->min_interval);
450 static const struct rhashtable_params mlx5e_neigh_ht_params = {
451 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
452 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
453 .key_len = sizeof(struct mlx5e_neigh),
454 .automatic_shrinking = true,
457 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
459 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
462 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
466 INIT_LIST_HEAD(&neigh_update->neigh_list);
467 spin_lock_init(&neigh_update->encap_lock);
468 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
469 mlx5e_rep_neigh_stats_work);
470 mlx5e_rep_neigh_update_init_interval(rpriv);
472 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
473 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
479 rhashtable_destroy(&neigh_update->neigh_ht);
483 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
485 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
486 struct mlx5e_priv *priv = netdev_priv(rpriv->rep->netdev);
488 unregister_netevent_notifier(&neigh_update->netevent_nb);
490 flush_workqueue(priv->wq); /* flush neigh update works */
492 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
494 rhashtable_destroy(&neigh_update->neigh_ht);
497 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
498 struct mlx5e_neigh_hash_entry *nhe)
500 struct mlx5e_rep_priv *rpriv = priv->ppriv;
503 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
505 mlx5e_neigh_ht_params);
509 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
514 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
515 struct mlx5e_neigh_hash_entry *nhe)
517 struct mlx5e_rep_priv *rpriv = priv->ppriv;
519 spin_lock_bh(&rpriv->neigh_update.encap_lock);
521 list_del(&nhe->neigh_list);
523 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
525 mlx5e_neigh_ht_params);
526 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
529 /* This function must only be called under RTNL lock or under the
530 * representor's encap_lock in case RTNL mutex can't be held.
532 static struct mlx5e_neigh_hash_entry *
533 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
534 struct mlx5e_neigh *m_neigh)
536 struct mlx5e_rep_priv *rpriv = priv->ppriv;
537 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
539 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
540 mlx5e_neigh_ht_params);
543 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
544 struct mlx5e_encap_entry *e,
545 struct mlx5e_neigh_hash_entry **nhe)
549 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
553 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
554 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
555 INIT_LIST_HEAD(&(*nhe)->encap_list);
556 refcount_set(&(*nhe)->refcnt, 1);
558 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
568 static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
569 struct mlx5e_neigh_hash_entry *nhe)
571 /* The neigh hash entry must be removed from the hash table regardless
572 * of the reference count value, so it won't be found by the next
573 * neigh notification call. The neigh hash entry reference count is
574 * incremented only during creation and neigh notification calls and
575 * protects from freeing the nhe struct.
577 mlx5e_rep_neigh_entry_remove(priv, nhe);
578 mlx5e_rep_neigh_entry_release(nhe);
581 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
582 struct mlx5e_encap_entry *e)
584 struct mlx5e_neigh_hash_entry *nhe;
587 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
589 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
593 list_add(&e->encap_list, &nhe->encap_list);
597 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
598 struct mlx5e_encap_entry *e)
600 struct mlx5e_neigh_hash_entry *nhe;
602 list_del(&e->encap_list);
603 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
605 if (list_empty(&nhe->encap_list))
606 mlx5e_rep_neigh_entry_destroy(priv, nhe);
609 static int mlx5e_rep_open(struct net_device *dev)
611 struct mlx5e_priv *priv = netdev_priv(dev);
612 struct mlx5e_rep_priv *rpriv = priv->ppriv;
613 struct mlx5_eswitch_rep *rep = rpriv->rep;
614 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
617 mutex_lock(&priv->state_lock);
618 err = mlx5e_open_locked(dev);
622 if (!mlx5_eswitch_set_vport_state(esw, rep->vport,
623 MLX5_ESW_VPORT_ADMIN_STATE_UP))
624 netif_carrier_on(dev);
627 mutex_unlock(&priv->state_lock);
631 static int mlx5e_rep_close(struct net_device *dev)
633 struct mlx5e_priv *priv = netdev_priv(dev);
634 struct mlx5e_rep_priv *rpriv = priv->ppriv;
635 struct mlx5_eswitch_rep *rep = rpriv->rep;
636 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
639 mutex_lock(&priv->state_lock);
640 (void)mlx5_eswitch_set_vport_state(esw, rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
641 ret = mlx5e_close_locked(dev);
642 mutex_unlock(&priv->state_lock);
646 static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
647 char *buf, size_t len)
649 struct mlx5e_priv *priv = netdev_priv(dev);
650 struct mlx5e_rep_priv *rpriv = priv->ppriv;
651 struct mlx5_eswitch_rep *rep = rpriv->rep;
654 ret = snprintf(buf, len, "%d", rep->vport - 1);
662 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
663 struct tc_cls_flower_offload *cls_flower)
665 if (cls_flower->common.chain_index)
668 switch (cls_flower->command) {
669 case TC_CLSFLOWER_REPLACE:
670 return mlx5e_configure_flower(priv, cls_flower);
671 case TC_CLSFLOWER_DESTROY:
672 return mlx5e_delete_flower(priv, cls_flower);
673 case TC_CLSFLOWER_STATS:
674 return mlx5e_stats_flower(priv, cls_flower);
680 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
683 struct mlx5e_priv *priv = cb_priv;
685 if (!tc_can_offload(priv->netdev))
689 case TC_SETUP_CLSFLOWER:
690 return mlx5e_rep_setup_tc_cls_flower(priv, type_data);
696 static int mlx5e_rep_setup_tc_block(struct net_device *dev,
697 struct tc_block_offload *f)
699 struct mlx5e_priv *priv = netdev_priv(dev);
701 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
704 switch (f->command) {
706 return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
708 case TC_BLOCK_UNBIND:
709 tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
716 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
721 return mlx5e_rep_setup_tc_block(dev, type_data);
727 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
729 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
730 struct mlx5e_rep_priv *rpriv = priv->ppriv;
731 struct mlx5_eswitch_rep *rep;
733 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
737 if (esw->mode == SRIOV_OFFLOADS &&
738 rep && rep->vport == FDB_UPLINK_VPORT)
744 static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
746 struct mlx5e_rep_priv *rpriv = priv->ppriv;
747 struct mlx5_eswitch_rep *rep = rpriv->rep;
749 if (rep && rep->vport != FDB_UPLINK_VPORT)
755 bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
757 struct mlx5e_priv *priv = netdev_priv(dev);
760 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
761 if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
769 mlx5e_get_sw_stats64(const struct net_device *dev,
770 struct rtnl_link_stats64 *stats)
772 struct mlx5e_priv *priv = netdev_priv(dev);
773 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
775 stats->rx_packets = sstats->rx_packets;
776 stats->rx_bytes = sstats->rx_bytes;
777 stats->tx_packets = sstats->tx_packets;
778 stats->tx_bytes = sstats->tx_bytes;
780 stats->tx_dropped = sstats->tx_queue_dropped;
785 int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
789 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
790 return mlx5e_get_sw_stats64(dev, sp);
797 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
799 struct mlx5e_priv *priv = netdev_priv(dev);
801 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
804 static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
805 .switchdev_port_attr_get = mlx5e_attr_get,
808 static const struct net_device_ops mlx5e_netdev_ops_rep = {
809 .ndo_open = mlx5e_rep_open,
810 .ndo_stop = mlx5e_rep_close,
811 .ndo_start_xmit = mlx5e_xmit,
812 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
813 .ndo_setup_tc = mlx5e_rep_setup_tc,
814 .ndo_get_stats64 = mlx5e_rep_get_stats,
815 .ndo_has_offload_stats = mlx5e_has_offload_stats,
816 .ndo_get_offload_stats = mlx5e_get_offload_stats,
819 static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
820 struct mlx5e_params *params)
822 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
823 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
824 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
826 params->log_sq_size = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
827 params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
828 params->log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
830 params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
831 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
833 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
835 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
837 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
840 static void mlx5e_build_rep_netdev(struct net_device *netdev)
842 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
844 netdev->watchdog_timeo = 15 * HZ;
846 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
848 #ifdef CONFIG_NET_SWITCHDEV
849 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
852 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
853 netdev->hw_features |= NETIF_F_HW_TC;
855 eth_hw_addr_random(netdev);
858 static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
859 struct net_device *netdev,
860 const struct mlx5e_profile *profile,
863 struct mlx5e_priv *priv = netdev_priv(netdev);
866 priv->netdev = netdev;
867 priv->profile = profile;
870 mutex_init(&priv->state_lock);
872 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
874 priv->channels.params.num_channels = profile->max_nch(mdev);
876 priv->hard_mtu = MLX5E_ETH_HARD_MTU;
878 mlx5e_build_rep_params(mdev, &priv->channels.params);
879 mlx5e_build_rep_netdev(netdev);
882 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
884 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
885 struct mlx5e_rep_priv *rpriv = priv->ppriv;
886 struct mlx5_eswitch_rep *rep = rpriv->rep;
887 struct mlx5_flow_handle *flow_rule;
890 mlx5e_init_l2_addr(priv);
892 err = mlx5e_create_direct_rqts(priv);
896 err = mlx5e_create_direct_tirs(priv);
898 goto err_destroy_direct_rqts;
900 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
902 priv->direct_tir[0].tirn);
903 if (IS_ERR(flow_rule)) {
904 err = PTR_ERR(flow_rule);
905 goto err_destroy_direct_tirs;
907 rep->vport_rx_rule = flow_rule;
909 err = mlx5e_tc_init(priv);
911 goto err_del_flow_rule;
916 mlx5_del_flow_rules(rep->vport_rx_rule);
917 err_destroy_direct_tirs:
918 mlx5e_destroy_direct_tirs(priv);
919 err_destroy_direct_rqts:
920 mlx5e_destroy_direct_rqts(priv);
924 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
926 struct mlx5e_rep_priv *rpriv = priv->ppriv;
927 struct mlx5_eswitch_rep *rep = rpriv->rep;
929 mlx5e_tc_cleanup(priv);
930 mlx5_del_flow_rules(rep->vport_rx_rule);
931 mlx5e_destroy_direct_tirs(priv);
932 mlx5e_destroy_direct_rqts(priv);
935 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
939 err = mlx5e_create_tises(priv);
941 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
947 static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
949 #define MLX5E_PORT_REPRESENTOR_NCH 1
950 return MLX5E_PORT_REPRESENTOR_NCH;
953 static const struct mlx5e_profile mlx5e_rep_profile = {
954 .init = mlx5e_init_rep,
955 .init_rx = mlx5e_init_rep_rx,
956 .cleanup_rx = mlx5e_cleanup_rep_rx,
957 .init_tx = mlx5e_init_rep_tx,
958 .cleanup_tx = mlx5e_cleanup_nic_tx,
959 .update_stats = mlx5e_rep_update_stats,
960 .max_nch = mlx5e_get_rep_max_num_channels,
961 .update_carrier = NULL,
962 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
963 .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
967 /* e-Switch vport representors */
970 mlx5e_nic_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
972 struct mlx5e_priv *priv = netdev_priv(rep->netdev);
973 struct mlx5e_rep_priv *rpriv = priv->ppriv;
977 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
978 err = mlx5e_add_sqs_fwd_rules(priv);
983 err = mlx5e_rep_neigh_init(rpriv);
990 mlx5e_remove_sqs_fwd_rules(priv);
995 mlx5e_nic_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
997 struct mlx5e_priv *priv = netdev_priv(rep->netdev);
998 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1000 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1001 mlx5e_remove_sqs_fwd_rules(priv);
1003 /* clean (and re-init) existing uplink offloaded TC rules */
1004 mlx5e_tc_cleanup(priv);
1005 mlx5e_tc_init(priv);
1007 mlx5e_rep_neigh_cleanup(rpriv);
1011 mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
1013 struct mlx5e_rep_priv *rpriv;
1014 struct net_device *netdev;
1015 struct mlx5e_priv *upriv;
1018 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1022 netdev = mlx5e_create_netdev(esw->dev, &mlx5e_rep_profile, rpriv);
1024 pr_warn("Failed to create representor netdev for vport %d\n",
1030 rep->netdev = netdev;
1033 err = mlx5e_attach_netdev(netdev_priv(netdev));
1035 pr_warn("Failed to attach representor netdev for vport %d\n",
1037 goto err_destroy_netdev;
1040 err = mlx5e_rep_neigh_init(rpriv);
1042 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1044 goto err_detach_netdev;
1047 upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
1048 err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
1051 goto err_neigh_cleanup;
1053 err = register_netdev(netdev);
1055 pr_warn("Failed to register representor netdev for vport %d\n",
1057 goto err_egdev_cleanup;
1063 tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
1067 mlx5e_rep_neigh_cleanup(rpriv);
1070 mlx5e_detach_netdev(netdev_priv(netdev));
1073 mlx5e_destroy_netdev(netdev_priv(netdev));
1079 mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
1081 struct net_device *netdev = rep->netdev;
1082 struct mlx5e_priv *priv = netdev_priv(netdev);
1083 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1084 void *ppriv = priv->ppriv;
1085 struct mlx5e_priv *upriv;
1087 unregister_netdev(rep->netdev);
1088 upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
1089 tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
1091 mlx5e_rep_neigh_cleanup(rpriv);
1092 mlx5e_detach_netdev(priv);
1093 mlx5e_destroy_netdev(priv);
1094 kfree(ppriv); /* mlx5e_rep_priv */
1097 static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
1099 struct mlx5_core_dev *mdev = priv->mdev;
1100 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1101 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1105 mlx5_query_nic_vport_mac_address(mdev, 0, mac);
1107 for (vport = 1; vport < total_vfs; vport++) {
1108 struct mlx5_eswitch_rep rep;
1110 rep.load = mlx5e_vport_rep_load;
1111 rep.unload = mlx5e_vport_rep_unload;
1113 ether_addr_copy(rep.hw_id, mac);
1114 mlx5_eswitch_register_vport_rep(esw, vport, &rep);
1118 static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
1120 struct mlx5_core_dev *mdev = priv->mdev;
1121 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1122 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1125 for (vport = 1; vport < total_vfs; vport++)
1126 mlx5_eswitch_unregister_vport_rep(esw, vport);
1129 void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
1131 struct mlx5_core_dev *mdev = priv->mdev;
1132 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1133 struct mlx5_eswitch_rep rep;
1135 mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
1136 rep.load = mlx5e_nic_rep_load;
1137 rep.unload = mlx5e_nic_rep_unload;
1138 rep.vport = FDB_UPLINK_VPORT;
1139 rep.netdev = priv->netdev;
1140 mlx5_eswitch_register_vport_rep(esw, 0, &rep); /* UPLINK PF vport*/
1142 mlx5e_rep_register_vf_vports(priv); /* VFs vports */
1145 void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
1147 struct mlx5_core_dev *mdev = priv->mdev;
1148 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1150 mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
1151 mlx5_eswitch_unregister_vport_rep(esw, 0); /* UPLINK PF*/
1154 void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
1156 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1157 struct mlx5e_rep_priv *rpriv;
1159 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1163 rpriv->rep = &esw->offloads.vport_reps[0];