2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <generated/utsrelease.h>
34 #include <linux/mlx5/fs.h>
35 #include <net/switchdev.h>
36 #include <net/pkt_cls.h>
37 #include <net/act_api.h>
38 #include <net/netevent.h>
47 #define MLX5E_REP_PARAMS_LOG_SQ_SIZE \
48 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)
49 #define MLX5E_REP_PARAMS_LOG_RQ_SIZE \
50 max(0x6, MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)
52 static const char mlx5e_rep_driver_name[] = "mlx5e_rep";
54 static void mlx5e_rep_get_drvinfo(struct net_device *dev,
55 struct ethtool_drvinfo *drvinfo)
57 strlcpy(drvinfo->driver, mlx5e_rep_driver_name,
58 sizeof(drvinfo->driver));
59 strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
62 static const struct counter_desc sw_rep_stats_desc[] = {
63 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
64 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
65 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
66 { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
76 static const struct counter_desc vport_rep_stats_desc[] = {
77 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_packets) },
78 { MLX5E_DECLARE_STAT(struct vport_stats, vport_rx_bytes) },
79 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_packets) },
80 { MLX5E_DECLARE_STAT(struct vport_stats, vport_tx_bytes) },
83 #define NUM_VPORT_REP_SW_COUNTERS ARRAY_SIZE(sw_rep_stats_desc)
84 #define NUM_VPORT_REP_HW_COUNTERS ARRAY_SIZE(vport_rep_stats_desc)
86 static void mlx5e_rep_get_strings(struct net_device *dev,
87 u32 stringset, uint8_t *data)
93 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
94 strcpy(data + (i * ETH_GSTRING_LEN),
95 sw_rep_stats_desc[i].format);
96 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
97 strcpy(data + (i * ETH_GSTRING_LEN),
98 vport_rep_stats_desc[j].format);
103 static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv)
105 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
106 struct mlx5e_rep_priv *rpriv = priv->ppriv;
107 struct mlx5_eswitch_rep *rep = rpriv->rep;
108 struct rtnl_link_stats64 *vport_stats;
109 struct ifla_vf_stats vf_stats;
112 err = mlx5_eswitch_get_vport_stats(esw, rep->vport, &vf_stats);
114 pr_warn("vport %d error %d reading stats\n", rep->vport, err);
118 vport_stats = &priv->stats.vf_vport;
119 /* flip tx/rx as we are reporting the counters for the switch vport */
120 vport_stats->rx_packets = vf_stats.tx_packets;
121 vport_stats->rx_bytes = vf_stats.tx_bytes;
122 vport_stats->tx_packets = vf_stats.rx_packets;
123 vport_stats->tx_bytes = vf_stats.rx_bytes;
126 static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv)
128 struct mlx5e_sw_stats *s = &priv->stats.sw;
129 struct mlx5e_rq_stats *rq_stats;
130 struct mlx5e_sq_stats *sq_stats;
133 read_lock(&priv->stats_lock);
134 if (!priv->channels_active)
137 memset(s, 0, sizeof(*s));
138 for (i = 0; i < priv->channels.num; i++) {
139 struct mlx5e_channel *c = priv->channels.c[i];
141 rq_stats = &c->rq.stats;
143 s->rx_packets += rq_stats->packets;
144 s->rx_bytes += rq_stats->bytes;
146 for (j = 0; j < priv->channels.params.num_tc; j++) {
147 sq_stats = &c->sq[j].stats;
149 s->tx_packets += sq_stats->packets;
150 s->tx_bytes += sq_stats->bytes;
154 read_unlock(&priv->stats_lock);
157 static void mlx5e_rep_get_ethtool_stats(struct net_device *dev,
158 struct ethtool_stats *stats, u64 *data)
160 struct mlx5e_priv *priv = netdev_priv(dev);
166 mutex_lock(&priv->state_lock);
167 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
168 mlx5e_rep_update_sw_counters(priv);
169 mlx5e_rep_update_hw_counters(priv);
170 mutex_unlock(&priv->state_lock);
172 for (i = 0; i < NUM_VPORT_REP_SW_COUNTERS; i++)
173 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
174 sw_rep_stats_desc, i);
176 for (j = 0; j < NUM_VPORT_REP_HW_COUNTERS; j++, i++)
177 data[i] = MLX5E_READ_CTR64_CPU(&priv->stats.vf_vport,
178 vport_rep_stats_desc, j);
181 static int mlx5e_rep_get_sset_count(struct net_device *dev, int sset)
185 return NUM_VPORT_REP_SW_COUNTERS + NUM_VPORT_REP_HW_COUNTERS;
191 static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
192 .get_drvinfo = mlx5e_rep_get_drvinfo,
193 .get_link = ethtool_op_get_link,
194 .get_strings = mlx5e_rep_get_strings,
195 .get_sset_count = mlx5e_rep_get_sset_count,
196 .get_ethtool_stats = mlx5e_rep_get_ethtool_stats,
199 int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
201 struct mlx5e_priv *priv = netdev_priv(dev);
202 struct mlx5e_rep_priv *rpriv = priv->ppriv;
203 struct mlx5_eswitch_rep *rep = rpriv->rep;
204 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
206 if (esw->mode == SRIOV_NONE)
210 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
211 attr->u.ppid.id_len = ETH_ALEN;
212 ether_addr_copy(attr->u.ppid.id, rep->hw_id);
221 static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw,
222 struct mlx5_eswitch_rep *rep)
224 struct mlx5e_rep_sq *rep_sq, *tmp;
225 struct mlx5e_rep_priv *rpriv;
227 if (esw->mode != SRIOV_OFFLOADS)
230 rpriv = mlx5e_rep_to_rep_priv(rep);
231 list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) {
232 mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule);
233 list_del(&rep_sq->list);
238 static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw,
239 struct mlx5_eswitch_rep *rep,
240 u32 *sqns_array, int sqns_num)
242 struct mlx5_flow_handle *flow_rule;
243 struct mlx5e_rep_priv *rpriv;
244 struct mlx5e_rep_sq *rep_sq;
248 if (esw->mode != SRIOV_OFFLOADS)
251 rpriv = mlx5e_rep_to_rep_priv(rep);
252 for (i = 0; i < sqns_num; i++) {
253 rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL);
259 /* Add re-inject rule to the PF/representor sqs */
260 flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw,
263 if (IS_ERR(flow_rule)) {
264 err = PTR_ERR(flow_rule);
268 rep_sq->send_to_vport_rule = flow_rule;
269 list_add(&rep_sq->list, &rpriv->vport_sqs_list);
274 mlx5e_sqs2vport_stop(esw, rep);
278 int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv)
280 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
281 struct mlx5e_rep_priv *rpriv = priv->ppriv;
282 struct mlx5_eswitch_rep *rep = rpriv->rep;
283 struct mlx5e_channel *c;
284 int n, tc, num_sqs = 0;
288 sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL);
292 for (n = 0; n < priv->channels.num; n++) {
293 c = priv->channels.c[n];
294 for (tc = 0; tc < c->num_tc; tc++)
295 sqs[num_sqs++] = c->sq[tc].sqn;
298 err = mlx5e_sqs2vport_start(esw, rep, sqs, num_sqs);
303 netdev_warn(priv->netdev, "Failed to add SQs FWD rules %d\n", err);
307 void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv)
309 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
310 struct mlx5e_rep_priv *rpriv = priv->ppriv;
311 struct mlx5_eswitch_rep *rep = rpriv->rep;
313 mlx5e_sqs2vport_stop(esw, rep);
316 static void mlx5e_rep_neigh_update_init_interval(struct mlx5e_rep_priv *rpriv)
318 #if IS_ENABLED(CONFIG_IPV6)
319 unsigned long ipv6_interval = NEIGH_VAR(&nd_tbl.parms,
322 unsigned long ipv6_interval = ~0UL;
324 unsigned long ipv4_interval = NEIGH_VAR(&arp_tbl.parms,
326 struct net_device *netdev = rpriv->netdev;
327 struct mlx5e_priv *priv = netdev_priv(netdev);
329 rpriv->neigh_update.min_interval = min_t(unsigned long, ipv6_interval, ipv4_interval);
330 mlx5_fc_update_sampling_interval(priv->mdev, rpriv->neigh_update.min_interval);
333 void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv)
335 struct mlx5e_rep_priv *rpriv = priv->ppriv;
336 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
338 mlx5_fc_queue_stats_work(priv->mdev,
339 &neigh_update->neigh_stats_work,
340 neigh_update->min_interval);
343 static void mlx5e_rep_neigh_stats_work(struct work_struct *work)
345 struct mlx5e_rep_priv *rpriv = container_of(work, struct mlx5e_rep_priv,
346 neigh_update.neigh_stats_work.work);
347 struct net_device *netdev = rpriv->netdev;
348 struct mlx5e_priv *priv = netdev_priv(netdev);
349 struct mlx5e_neigh_hash_entry *nhe;
352 if (!list_empty(&rpriv->neigh_update.neigh_list))
353 mlx5e_rep_queue_neigh_stats_work(priv);
355 list_for_each_entry(nhe, &rpriv->neigh_update.neigh_list, neigh_list)
356 mlx5e_tc_update_neigh_used_value(nhe);
361 static void mlx5e_rep_neigh_entry_hold(struct mlx5e_neigh_hash_entry *nhe)
363 refcount_inc(&nhe->refcnt);
366 static void mlx5e_rep_neigh_entry_release(struct mlx5e_neigh_hash_entry *nhe)
368 if (refcount_dec_and_test(&nhe->refcnt))
372 static void mlx5e_rep_update_flows(struct mlx5e_priv *priv,
373 struct mlx5e_encap_entry *e,
374 bool neigh_connected,
375 unsigned char ha[ETH_ALEN])
377 struct ethhdr *eth = (struct ethhdr *)e->encap_header;
381 if ((!neigh_connected && (e->flags & MLX5_ENCAP_ENTRY_VALID)) ||
382 !ether_addr_equal(e->h_dest, ha))
383 mlx5e_tc_encap_flows_del(priv, e);
385 if (neigh_connected && !(e->flags & MLX5_ENCAP_ENTRY_VALID)) {
386 ether_addr_copy(e->h_dest, ha);
387 ether_addr_copy(eth->h_dest, ha);
389 mlx5e_tc_encap_flows_add(priv, e);
393 static void mlx5e_rep_neigh_update(struct work_struct *work)
395 struct mlx5e_neigh_hash_entry *nhe =
396 container_of(work, struct mlx5e_neigh_hash_entry, neigh_update_work);
397 struct neighbour *n = nhe->n;
398 struct mlx5e_encap_entry *e;
399 unsigned char ha[ETH_ALEN];
400 struct mlx5e_priv *priv;
401 bool neigh_connected;
402 bool encap_connected;
407 /* If these parameters are changed after we release the lock,
408 * we'll receive another event letting us know about it.
409 * We use this lock to avoid inconsistency between the neigh validity
410 * and it's hw address.
412 read_lock_bh(&n->lock);
413 memcpy(ha, n->ha, ETH_ALEN);
414 nud_state = n->nud_state;
416 read_unlock_bh(&n->lock);
418 neigh_connected = (nud_state & NUD_VALID) && !dead;
420 list_for_each_entry(e, &nhe->encap_list, encap_list) {
421 encap_connected = !!(e->flags & MLX5_ENCAP_ENTRY_VALID);
422 priv = netdev_priv(e->out_dev);
424 if (encap_connected != neigh_connected ||
425 !ether_addr_equal(e->h_dest, ha))
426 mlx5e_rep_update_flows(priv, e, neigh_connected, ha);
428 mlx5e_rep_neigh_entry_release(nhe);
433 static struct mlx5e_neigh_hash_entry *
434 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
435 struct mlx5e_neigh *m_neigh);
437 static int mlx5e_rep_netevent_event(struct notifier_block *nb,
438 unsigned long event, void *ptr)
440 struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv,
441 neigh_update.netevent_nb);
442 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
443 struct net_device *netdev = rpriv->netdev;
444 struct mlx5e_priv *priv = netdev_priv(netdev);
445 struct mlx5e_neigh_hash_entry *nhe = NULL;
446 struct mlx5e_neigh m_neigh = {};
447 struct neigh_parms *p;
452 case NETEVENT_NEIGH_UPDATE:
454 #if IS_ENABLED(CONFIG_IPV6)
455 if (n->tbl != &nd_tbl && n->tbl != &arp_tbl)
457 if (n->tbl != &arp_tbl)
461 m_neigh.dev = n->dev;
462 m_neigh.family = n->ops->family;
463 memcpy(&m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
465 /* We are in atomic context and can't take RTNL mutex, so use
466 * spin_lock_bh to lookup the neigh table. bh is used since
467 * netevent can be called from a softirq context.
469 spin_lock_bh(&neigh_update->encap_lock);
470 nhe = mlx5e_rep_neigh_entry_lookup(priv, &m_neigh);
472 spin_unlock_bh(&neigh_update->encap_lock);
476 /* This assignment is valid as long as the the neigh reference
481 /* Take a reference to ensure the neighbour and mlx5 encap
482 * entry won't be destructed until we drop the reference in
486 mlx5e_rep_neigh_entry_hold(nhe);
488 if (!queue_work(priv->wq, &nhe->neigh_update_work)) {
489 mlx5e_rep_neigh_entry_release(nhe);
492 spin_unlock_bh(&neigh_update->encap_lock);
495 case NETEVENT_DELAY_PROBE_TIME_UPDATE:
498 /* We check the device is present since we don't care about
499 * changes in the default table, we only care about changes
500 * done per device delay prob time parameter.
502 #if IS_ENABLED(CONFIG_IPV6)
503 if (!p->dev || (p->tbl != &nd_tbl && p->tbl != &arp_tbl))
505 if (!p->dev || p->tbl != &arp_tbl)
509 /* We are in atomic context and can't take RTNL mutex,
510 * so use spin_lock_bh to walk the neigh list and look for
511 * the relevant device. bh is used since netevent can be
512 * called from a softirq context.
514 spin_lock_bh(&neigh_update->encap_lock);
515 list_for_each_entry(nhe, &neigh_update->neigh_list, neigh_list) {
516 if (p->dev == nhe->m_neigh.dev) {
521 spin_unlock_bh(&neigh_update->encap_lock);
525 neigh_update->min_interval = min_t(unsigned long,
526 NEIGH_VAR(p, DELAY_PROBE_TIME),
527 neigh_update->min_interval);
528 mlx5_fc_update_sampling_interval(priv->mdev,
529 neigh_update->min_interval);
535 static const struct rhashtable_params mlx5e_neigh_ht_params = {
536 .head_offset = offsetof(struct mlx5e_neigh_hash_entry, rhash_node),
537 .key_offset = offsetof(struct mlx5e_neigh_hash_entry, m_neigh),
538 .key_len = sizeof(struct mlx5e_neigh),
539 .automatic_shrinking = true,
542 static int mlx5e_rep_neigh_init(struct mlx5e_rep_priv *rpriv)
544 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
547 err = rhashtable_init(&neigh_update->neigh_ht, &mlx5e_neigh_ht_params);
551 INIT_LIST_HEAD(&neigh_update->neigh_list);
552 spin_lock_init(&neigh_update->encap_lock);
553 INIT_DELAYED_WORK(&neigh_update->neigh_stats_work,
554 mlx5e_rep_neigh_stats_work);
555 mlx5e_rep_neigh_update_init_interval(rpriv);
557 rpriv->neigh_update.netevent_nb.notifier_call = mlx5e_rep_netevent_event;
558 err = register_netevent_notifier(&rpriv->neigh_update.netevent_nb);
564 rhashtable_destroy(&neigh_update->neigh_ht);
568 static void mlx5e_rep_neigh_cleanup(struct mlx5e_rep_priv *rpriv)
570 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
571 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
573 unregister_netevent_notifier(&neigh_update->netevent_nb);
575 flush_workqueue(priv->wq); /* flush neigh update works */
577 cancel_delayed_work_sync(&rpriv->neigh_update.neigh_stats_work);
579 rhashtable_destroy(&neigh_update->neigh_ht);
582 static int mlx5e_rep_neigh_entry_insert(struct mlx5e_priv *priv,
583 struct mlx5e_neigh_hash_entry *nhe)
585 struct mlx5e_rep_priv *rpriv = priv->ppriv;
588 err = rhashtable_insert_fast(&rpriv->neigh_update.neigh_ht,
590 mlx5e_neigh_ht_params);
594 list_add(&nhe->neigh_list, &rpriv->neigh_update.neigh_list);
599 static void mlx5e_rep_neigh_entry_remove(struct mlx5e_priv *priv,
600 struct mlx5e_neigh_hash_entry *nhe)
602 struct mlx5e_rep_priv *rpriv = priv->ppriv;
604 spin_lock_bh(&rpriv->neigh_update.encap_lock);
606 list_del(&nhe->neigh_list);
608 rhashtable_remove_fast(&rpriv->neigh_update.neigh_ht,
610 mlx5e_neigh_ht_params);
611 spin_unlock_bh(&rpriv->neigh_update.encap_lock);
614 /* This function must only be called under RTNL lock or under the
615 * representor's encap_lock in case RTNL mutex can't be held.
617 static struct mlx5e_neigh_hash_entry *
618 mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv,
619 struct mlx5e_neigh *m_neigh)
621 struct mlx5e_rep_priv *rpriv = priv->ppriv;
622 struct mlx5e_neigh_update_table *neigh_update = &rpriv->neigh_update;
624 return rhashtable_lookup_fast(&neigh_update->neigh_ht, m_neigh,
625 mlx5e_neigh_ht_params);
628 static int mlx5e_rep_neigh_entry_create(struct mlx5e_priv *priv,
629 struct mlx5e_encap_entry *e,
630 struct mlx5e_neigh_hash_entry **nhe)
634 *nhe = kzalloc(sizeof(**nhe), GFP_KERNEL);
638 memcpy(&(*nhe)->m_neigh, &e->m_neigh, sizeof(e->m_neigh));
639 INIT_WORK(&(*nhe)->neigh_update_work, mlx5e_rep_neigh_update);
640 INIT_LIST_HEAD(&(*nhe)->encap_list);
641 refcount_set(&(*nhe)->refcnt, 1);
643 err = mlx5e_rep_neigh_entry_insert(priv, *nhe);
653 static void mlx5e_rep_neigh_entry_destroy(struct mlx5e_priv *priv,
654 struct mlx5e_neigh_hash_entry *nhe)
656 /* The neigh hash entry must be removed from the hash table regardless
657 * of the reference count value, so it won't be found by the next
658 * neigh notification call. The neigh hash entry reference count is
659 * incremented only during creation and neigh notification calls and
660 * protects from freeing the nhe struct.
662 mlx5e_rep_neigh_entry_remove(priv, nhe);
663 mlx5e_rep_neigh_entry_release(nhe);
666 int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv,
667 struct mlx5e_encap_entry *e)
669 struct mlx5e_neigh_hash_entry *nhe;
672 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
674 err = mlx5e_rep_neigh_entry_create(priv, e, &nhe);
678 list_add(&e->encap_list, &nhe->encap_list);
682 void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv,
683 struct mlx5e_encap_entry *e)
685 struct mlx5e_neigh_hash_entry *nhe;
687 list_del(&e->encap_list);
688 nhe = mlx5e_rep_neigh_entry_lookup(priv, &e->m_neigh);
690 if (list_empty(&nhe->encap_list))
691 mlx5e_rep_neigh_entry_destroy(priv, nhe);
694 static int mlx5e_rep_open(struct net_device *dev)
696 struct mlx5e_priv *priv = netdev_priv(dev);
697 struct mlx5e_rep_priv *rpriv = priv->ppriv;
698 struct mlx5_eswitch_rep *rep = rpriv->rep;
701 mutex_lock(&priv->state_lock);
702 err = mlx5e_open_locked(dev);
706 if (!mlx5_modify_vport_admin_state(priv->mdev,
707 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
708 rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_UP))
709 netif_carrier_on(dev);
712 mutex_unlock(&priv->state_lock);
716 static int mlx5e_rep_close(struct net_device *dev)
718 struct mlx5e_priv *priv = netdev_priv(dev);
719 struct mlx5e_rep_priv *rpriv = priv->ppriv;
720 struct mlx5_eswitch_rep *rep = rpriv->rep;
723 mutex_lock(&priv->state_lock);
724 mlx5_modify_vport_admin_state(priv->mdev,
725 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT,
726 rep->vport, MLX5_ESW_VPORT_ADMIN_STATE_DOWN);
727 ret = mlx5e_close_locked(dev);
728 mutex_unlock(&priv->state_lock);
732 static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
733 char *buf, size_t len)
735 struct mlx5e_priv *priv = netdev_priv(dev);
736 struct mlx5e_rep_priv *rpriv = priv->ppriv;
737 struct mlx5_eswitch_rep *rep = rpriv->rep;
740 ret = snprintf(buf, len, "%d", rep->vport - 1);
748 mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
749 struct tc_cls_flower_offload *cls_flower, int flags)
751 switch (cls_flower->command) {
752 case TC_CLSFLOWER_REPLACE:
753 return mlx5e_configure_flower(priv, cls_flower, flags);
754 case TC_CLSFLOWER_DESTROY:
755 return mlx5e_delete_flower(priv, cls_flower, flags);
756 case TC_CLSFLOWER_STATS:
757 return mlx5e_stats_flower(priv, cls_flower, flags);
763 static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data,
766 struct mlx5e_priv *priv = cb_priv;
768 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
772 case TC_SETUP_CLSFLOWER:
773 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS);
779 static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
782 struct mlx5e_priv *priv = cb_priv;
784 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data))
788 case TC_SETUP_CLSFLOWER:
789 return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
795 static int mlx5e_rep_setup_tc_block(struct net_device *dev,
796 struct tc_block_offload *f)
798 struct mlx5e_priv *priv = netdev_priv(dev);
800 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
803 switch (f->command) {
805 return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
807 case TC_BLOCK_UNBIND:
808 tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
815 static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
820 return mlx5e_rep_setup_tc_block(dev, type_data);
826 bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
828 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
829 struct mlx5e_rep_priv *rpriv = priv->ppriv;
830 struct mlx5_eswitch_rep *rep;
832 if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
836 if (esw->mode == SRIOV_OFFLOADS &&
837 rep && rep->vport == FDB_UPLINK_VPORT)
843 static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
845 struct mlx5e_rep_priv *rpriv = priv->ppriv;
846 struct mlx5_eswitch_rep *rep = rpriv->rep;
848 if (rep && rep->vport != FDB_UPLINK_VPORT)
854 bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id)
856 struct mlx5e_priv *priv = netdev_priv(dev);
859 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
860 if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv))
868 mlx5e_get_sw_stats64(const struct net_device *dev,
869 struct rtnl_link_stats64 *stats)
871 struct mlx5e_priv *priv = netdev_priv(dev);
872 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
874 mlx5e_rep_update_sw_counters(priv);
876 stats->rx_packets = sstats->rx_packets;
877 stats->rx_bytes = sstats->rx_bytes;
878 stats->tx_packets = sstats->tx_packets;
879 stats->tx_bytes = sstats->tx_bytes;
881 stats->tx_dropped = sstats->tx_queue_dropped;
886 int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev,
890 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
891 return mlx5e_get_sw_stats64(dev, sp);
898 mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
900 struct mlx5e_priv *priv = netdev_priv(dev);
902 memcpy(stats, &priv->stats.vf_vport, sizeof(*stats));
905 static const struct switchdev_ops mlx5e_rep_switchdev_ops = {
906 .switchdev_port_attr_get = mlx5e_attr_get,
909 static const struct net_device_ops mlx5e_netdev_ops_rep = {
910 .ndo_open = mlx5e_rep_open,
911 .ndo_stop = mlx5e_rep_close,
912 .ndo_start_xmit = mlx5e_xmit,
913 .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name,
914 .ndo_setup_tc = mlx5e_rep_setup_tc,
915 .ndo_get_stats64 = mlx5e_rep_get_stats,
916 .ndo_has_offload_stats = mlx5e_has_offload_stats,
917 .ndo_get_offload_stats = mlx5e_get_offload_stats,
920 static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev,
921 struct mlx5e_params *params, u16 mtu)
923 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
924 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
925 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
927 params->hard_mtu = MLX5E_ETH_HARD_MTU;
928 params->sw_mtu = mtu;
929 params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE;
930 params->rq_wq_type = MLX5_WQ_TYPE_LINKED_LIST;
931 params->log_rq_mtu_frames = MLX5E_REP_PARAMS_LOG_RQ_SIZE;
933 params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
934 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
937 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
939 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
942 static void mlx5e_build_rep_netdev(struct net_device *netdev)
944 netdev->netdev_ops = &mlx5e_netdev_ops_rep;
946 netdev->watchdog_timeo = 15 * HZ;
948 netdev->ethtool_ops = &mlx5e_rep_ethtool_ops;
950 netdev->switchdev_ops = &mlx5e_rep_switchdev_ops;
952 netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL;
953 netdev->hw_features |= NETIF_F_HW_TC;
955 eth_hw_addr_random(netdev);
958 static void mlx5e_init_rep(struct mlx5_core_dev *mdev,
959 struct net_device *netdev,
960 const struct mlx5e_profile *profile,
963 struct mlx5e_priv *priv = netdev_priv(netdev);
966 priv->netdev = netdev;
967 priv->profile = profile;
970 mutex_init(&priv->state_lock);
972 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
974 priv->channels.params.num_channels = profile->max_nch(mdev);
976 mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu);
977 mlx5e_build_rep_netdev(netdev);
979 mlx5e_timestamp_init(priv);
982 static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
984 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
985 struct mlx5e_rep_priv *rpriv = priv->ppriv;
986 struct mlx5_eswitch_rep *rep = rpriv->rep;
987 struct mlx5_flow_handle *flow_rule;
990 mlx5e_init_l2_addr(priv);
992 err = mlx5e_create_direct_rqts(priv);
996 err = mlx5e_create_direct_tirs(priv);
998 goto err_destroy_direct_rqts;
1000 flow_rule = mlx5_eswitch_create_vport_rx_rule(esw,
1002 priv->direct_tir[0].tirn);
1003 if (IS_ERR(flow_rule)) {
1004 err = PTR_ERR(flow_rule);
1005 goto err_destroy_direct_tirs;
1007 rpriv->vport_rx_rule = flow_rule;
1011 err_destroy_direct_tirs:
1012 mlx5e_destroy_direct_tirs(priv);
1013 err_destroy_direct_rqts:
1014 mlx5e_destroy_direct_rqts(priv);
1018 static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
1020 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1022 mlx5_del_flow_rules(rpriv->vport_rx_rule);
1023 mlx5e_destroy_direct_tirs(priv);
1024 mlx5e_destroy_direct_rqts(priv);
1027 static int mlx5e_init_rep_tx(struct mlx5e_priv *priv)
1031 err = mlx5e_create_tises(priv);
1033 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
1039 static int mlx5e_get_rep_max_num_channels(struct mlx5_core_dev *mdev)
1041 #define MLX5E_PORT_REPRESENTOR_NCH 1
1042 return MLX5E_PORT_REPRESENTOR_NCH;
1045 static const struct mlx5e_profile mlx5e_rep_profile = {
1046 .init = mlx5e_init_rep,
1047 .init_rx = mlx5e_init_rep_rx,
1048 .cleanup_rx = mlx5e_cleanup_rep_rx,
1049 .init_tx = mlx5e_init_rep_tx,
1050 .cleanup_tx = mlx5e_cleanup_nic_tx,
1051 .update_stats = mlx5e_rep_update_hw_counters,
1052 .max_nch = mlx5e_get_rep_max_num_channels,
1053 .update_carrier = NULL,
1054 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1055 .rx_handlers.handle_rx_cqe_mpwqe = NULL /* Not supported */,
1059 /* e-Switch vport representors */
1062 mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1064 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1065 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1069 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
1070 err = mlx5e_add_sqs_fwd_rules(priv);
1075 err = mlx5e_rep_neigh_init(rpriv);
1077 goto err_remove_sqs;
1079 /* init shared tc flow table */
1080 err = mlx5e_tc_esw_init(&rpriv->tc_ht);
1082 goto err_neigh_cleanup;
1087 mlx5e_rep_neigh_cleanup(rpriv);
1089 mlx5e_remove_sqs_fwd_rules(priv);
1094 mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep)
1096 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1097 struct mlx5e_priv *priv = netdev_priv(rpriv->netdev);
1099 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1100 mlx5e_remove_sqs_fwd_rules(priv);
1102 /* clean uplink offloaded TC rules, delete shared tc flow table */
1103 mlx5e_tc_esw_cleanup(&rpriv->tc_ht);
1105 mlx5e_rep_neigh_cleanup(rpriv);
1109 mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep)
1111 struct mlx5e_rep_priv *uplink_rpriv;
1112 struct mlx5e_rep_priv *rpriv;
1113 struct net_device *netdev;
1114 struct mlx5e_priv *upriv;
1117 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1121 netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, rpriv);
1123 pr_warn("Failed to create representor netdev for vport %d\n",
1129 rpriv->netdev = netdev;
1131 rep->rep_if[REP_ETH].priv = rpriv;
1132 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1134 err = mlx5e_attach_netdev(netdev_priv(netdev));
1136 pr_warn("Failed to attach representor netdev for vport %d\n",
1138 goto err_destroy_netdev;
1141 err = mlx5e_rep_neigh_init(rpriv);
1143 pr_warn("Failed to initialized neighbours handling for vport %d\n",
1145 goto err_detach_netdev;
1148 uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH);
1149 upriv = netdev_priv(uplink_rpriv->netdev);
1150 err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev,
1153 goto err_neigh_cleanup;
1155 err = register_netdev(netdev);
1157 pr_warn("Failed to register representor netdev for vport %d\n",
1159 goto err_egdev_cleanup;
1165 tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
1169 mlx5e_rep_neigh_cleanup(rpriv);
1172 mlx5e_detach_netdev(netdev_priv(netdev));
1175 mlx5e_destroy_netdev(netdev_priv(netdev));
1181 mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep)
1183 struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep);
1184 struct net_device *netdev = rpriv->netdev;
1185 struct mlx5e_priv *priv = netdev_priv(netdev);
1186 struct mlx5e_rep_priv *uplink_rpriv;
1187 void *ppriv = priv->ppriv;
1188 struct mlx5e_priv *upriv;
1190 unregister_netdev(netdev);
1191 uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch,
1193 upriv = netdev_priv(uplink_rpriv->netdev);
1194 tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev,
1196 mlx5e_rep_neigh_cleanup(rpriv);
1197 mlx5e_detach_netdev(priv);
1198 mlx5e_destroy_netdev(priv);
1199 kfree(ppriv); /* mlx5e_rep_priv */
1202 static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep)
1204 struct mlx5e_rep_priv *rpriv;
1206 rpriv = mlx5e_rep_to_rep_priv(rep);
1208 return rpriv->netdev;
1211 static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv)
1213 struct mlx5_core_dev *mdev = priv->mdev;
1214 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1215 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1218 for (vport = 1; vport < total_vfs; vport++) {
1219 struct mlx5_eswitch_rep_if rep_if = {};
1221 rep_if.load = mlx5e_vport_rep_load;
1222 rep_if.unload = mlx5e_vport_rep_unload;
1223 rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
1224 mlx5_eswitch_register_vport_rep(esw, vport, &rep_if, REP_ETH);
1228 static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv)
1230 struct mlx5_core_dev *mdev = priv->mdev;
1231 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1232 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
1235 for (vport = 1; vport < total_vfs; vport++)
1236 mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH);
1239 void mlx5e_register_vport_reps(struct mlx5e_priv *priv)
1241 struct mlx5_core_dev *mdev = priv->mdev;
1242 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1243 struct mlx5_eswitch_rep_if rep_if;
1244 struct mlx5e_rep_priv *rpriv;
1246 rpriv = priv->ppriv;
1247 rpriv->netdev = priv->netdev;
1249 rep_if.load = mlx5e_nic_rep_load;
1250 rep_if.unload = mlx5e_nic_rep_unload;
1251 rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev;
1252 rep_if.priv = rpriv;
1253 INIT_LIST_HEAD(&rpriv->vport_sqs_list);
1254 mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/
1256 mlx5e_rep_register_vf_vports(priv); /* VFs vports */
1259 void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv)
1261 struct mlx5_core_dev *mdev = priv->mdev;
1262 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1264 mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */
1265 mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/
1268 void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev)
1270 struct mlx5_eswitch *esw = mdev->priv.eswitch;
1271 struct mlx5e_rep_priv *rpriv;
1273 rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
1277 rpriv->rep = &esw->offloads.vport_reps[0];