2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/list.h>
35 #include <linux/ipv6.h>
36 #include <linux/tcp.h>
37 #include <linux/mlx5/fs.h>
40 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
41 struct mlx5e_l2_rule *ai, int type);
42 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
43 struct mlx5e_l2_rule *ai);
59 MLX5E_ACTION_NONE = 0,
64 struct mlx5e_l2_hash_node {
65 struct hlist_node hlist;
67 struct mlx5e_l2_rule ai;
70 static inline int mlx5e_hash_l2(u8 *addr)
75 static void mlx5e_add_l2_to_hash(struct hlist_head *hash, u8 *addr)
77 struct mlx5e_l2_hash_node *hn;
78 int ix = mlx5e_hash_l2(addr);
81 hlist_for_each_entry(hn, &hash[ix], hlist)
82 if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
88 hn->action = MLX5E_ACTION_NONE;
92 hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
96 ether_addr_copy(hn->ai.addr, addr);
97 hn->action = MLX5E_ACTION_ADD;
99 hlist_add_head(&hn->hlist, &hash[ix]);
102 static void mlx5e_del_l2_from_hash(struct mlx5e_l2_hash_node *hn)
104 hlist_del(&hn->hlist);
108 static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
110 struct net_device *ndev = priv->netdev;
119 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
122 max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
124 if (list_size > max_list_size) {
126 "netdev vlans list size (%d) > (%d) max vport list size, some vlans will be dropped\n",
127 list_size, max_list_size);
128 list_size = max_list_size;
131 vlans = kcalloc(list_size, sizeof(*vlans), GFP_KERNEL);
136 for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
142 err = mlx5_modify_nic_vport_vlans(priv->mdev, vlans, list_size);
144 netdev_err(ndev, "Failed to modify vport vlans list err(%d)\n",
151 enum mlx5e_vlan_rule_type {
152 MLX5E_VLAN_RULE_TYPE_UNTAGGED,
153 MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
154 MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
155 MLX5E_VLAN_RULE_TYPE_MATCH_VID,
158 static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
159 enum mlx5e_vlan_rule_type rule_type,
160 u16 vid, struct mlx5_flow_spec *spec)
162 struct mlx5_flow_act flow_act = {
163 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
164 .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
167 struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
168 struct mlx5_flow_destination dest;
169 struct mlx5_flow_handle **rule_p;
172 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
173 dest.ft = priv->fs.l2.ft.t;
175 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
179 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
180 rule_p = &priv->fs.vlan.untagged_rule;
181 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
182 outer_headers.cvlan_tag);
184 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
185 rule_p = &priv->fs.vlan.any_cvlan_rule;
186 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
187 outer_headers.cvlan_tag);
188 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
190 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
191 rule_p = &priv->fs.vlan.any_svlan_rule;
192 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
193 outer_headers.svlan_tag);
194 MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
196 default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
197 rule_p = &priv->fs.vlan.active_vlans_rule[vid];
198 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
199 outer_headers.cvlan_tag);
200 MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
201 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
202 outer_headers.first_vid);
203 MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
208 *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
210 if (IS_ERR(*rule_p)) {
211 err = PTR_ERR(*rule_p);
213 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
219 static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
220 enum mlx5e_vlan_rule_type rule_type, u16 vid)
222 struct mlx5_flow_spec *spec;
225 spec = mlx5_vzalloc(sizeof(*spec));
227 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
231 if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
232 mlx5e_vport_context_update_vlans(priv);
234 err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
241 static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
242 enum mlx5e_vlan_rule_type rule_type, u16 vid)
245 case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
246 if (priv->fs.vlan.untagged_rule) {
247 mlx5_del_flow_rules(priv->fs.vlan.untagged_rule);
248 priv->fs.vlan.untagged_rule = NULL;
251 case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
252 if (priv->fs.vlan.any_cvlan_rule) {
253 mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
254 priv->fs.vlan.any_cvlan_rule = NULL;
257 case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
258 if (priv->fs.vlan.any_svlan_rule) {
259 mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
260 priv->fs.vlan.any_svlan_rule = NULL;
263 case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
264 mlx5e_vport_context_update_vlans(priv);
265 if (priv->fs.vlan.active_vlans_rule[vid]) {
266 mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
267 priv->fs.vlan.active_vlans_rule[vid] = NULL;
269 mlx5e_vport_context_update_vlans(priv);
274 static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
276 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
277 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
280 static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
284 err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
288 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
291 void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
293 if (!priv->fs.vlan.filter_disabled)
296 priv->fs.vlan.filter_disabled = false;
297 if (priv->netdev->flags & IFF_PROMISC)
299 mlx5e_del_any_vid_rules(priv);
302 void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
304 if (priv->fs.vlan.filter_disabled)
307 priv->fs.vlan.filter_disabled = true;
308 if (priv->netdev->flags & IFF_PROMISC)
310 mlx5e_add_any_vid_rules(priv);
313 int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
316 struct mlx5e_priv *priv = netdev_priv(dev);
318 set_bit(vid, priv->fs.vlan.active_vlans);
320 return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
323 int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
326 struct mlx5e_priv *priv = netdev_priv(dev);
328 clear_bit(vid, priv->fs.vlan.active_vlans);
330 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
335 static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
339 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
341 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
342 mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
345 if (priv->fs.vlan.filter_disabled &&
346 !(priv->netdev->flags & IFF_PROMISC))
347 mlx5e_add_any_vid_rules(priv);
350 static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
354 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
356 for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
357 mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
360 if (priv->fs.vlan.filter_disabled &&
361 !(priv->netdev->flags & IFF_PROMISC))
362 mlx5e_del_any_vid_rules(priv);
365 #define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
366 for (i = 0; i < MLX5E_L2_ADDR_HASH_SIZE; i++) \
367 hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
369 static void mlx5e_execute_l2_action(struct mlx5e_priv *priv,
370 struct mlx5e_l2_hash_node *hn)
372 switch (hn->action) {
373 case MLX5E_ACTION_ADD:
374 mlx5e_add_l2_flow_rule(priv, &hn->ai, MLX5E_FULLMATCH);
375 hn->action = MLX5E_ACTION_NONE;
378 case MLX5E_ACTION_DEL:
379 mlx5e_del_l2_flow_rule(priv, &hn->ai);
380 mlx5e_del_l2_from_hash(hn);
385 static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
387 struct net_device *netdev = priv->netdev;
388 struct netdev_hw_addr *ha;
390 netif_addr_lock_bh(netdev);
392 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc,
393 priv->netdev->dev_addr);
395 netdev_for_each_uc_addr(ha, netdev)
396 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_uc, ha->addr);
398 netdev_for_each_mc_addr(ha, netdev)
399 mlx5e_add_l2_to_hash(priv->fs.l2.netdev_mc, ha->addr);
401 netif_addr_unlock_bh(netdev);
404 static void mlx5e_fill_addr_array(struct mlx5e_priv *priv, int list_type,
405 u8 addr_array[][ETH_ALEN], int size)
407 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
408 struct net_device *ndev = priv->netdev;
409 struct mlx5e_l2_hash_node *hn;
410 struct hlist_head *addr_list;
411 struct hlist_node *tmp;
415 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
417 if (is_uc) /* Make sure our own address is pushed first */
418 ether_addr_copy(addr_array[i++], ndev->dev_addr);
419 else if (priv->fs.l2.broadcast_enabled)
420 ether_addr_copy(addr_array[i++], ndev->broadcast);
422 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi) {
423 if (ether_addr_equal(ndev->dev_addr, hn->ai.addr))
427 ether_addr_copy(addr_array[i++], hn->ai.addr);
431 static void mlx5e_vport_context_update_addr_list(struct mlx5e_priv *priv,
434 bool is_uc = (list_type == MLX5_NVPRT_LIST_TYPE_UC);
435 struct mlx5e_l2_hash_node *hn;
436 u8 (*addr_array)[ETH_ALEN] = NULL;
437 struct hlist_head *addr_list;
438 struct hlist_node *tmp;
444 size = is_uc ? 0 : (priv->fs.l2.broadcast_enabled ? 1 : 0);
446 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_uc_list) :
447 1 << MLX5_CAP_GEN(priv->mdev, log_max_current_mc_list);
449 addr_list = is_uc ? priv->fs.l2.netdev_uc : priv->fs.l2.netdev_mc;
450 mlx5e_for_each_hash_node(hn, tmp, addr_list, hi)
453 if (size > max_size) {
454 netdev_warn(priv->netdev,
455 "netdev %s list size (%d) > (%d) max vport list size, some addresses will be dropped\n",
456 is_uc ? "UC" : "MC", size, max_size);
461 addr_array = kcalloc(size, ETH_ALEN, GFP_KERNEL);
466 mlx5e_fill_addr_array(priv, list_type, addr_array, size);
469 err = mlx5_modify_nic_vport_mac_list(priv->mdev, list_type, addr_array, size);
472 netdev_err(priv->netdev,
473 "Failed to modify vport %s list err(%d)\n",
474 is_uc ? "UC" : "MC", err);
478 static void mlx5e_vport_context_update(struct mlx5e_priv *priv)
480 struct mlx5e_l2_table *ea = &priv->fs.l2;
482 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_UC);
483 mlx5e_vport_context_update_addr_list(priv, MLX5_NVPRT_LIST_TYPE_MC);
484 mlx5_modify_nic_vport_promisc(priv->mdev, 0,
485 ea->allmulti_enabled,
486 ea->promisc_enabled);
489 static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
491 struct mlx5e_l2_hash_node *hn;
492 struct hlist_node *tmp;
495 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
496 mlx5e_execute_l2_action(priv, hn);
498 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
499 mlx5e_execute_l2_action(priv, hn);
502 static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
504 struct mlx5e_l2_hash_node *hn;
505 struct hlist_node *tmp;
508 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_uc, i)
509 hn->action = MLX5E_ACTION_DEL;
510 mlx5e_for_each_hash_node(hn, tmp, priv->fs.l2.netdev_mc, i)
511 hn->action = MLX5E_ACTION_DEL;
513 if (!test_bit(MLX5E_STATE_DESTROYING, &priv->state))
514 mlx5e_sync_netdev_addr(priv);
516 mlx5e_apply_netdev_addr(priv);
519 void mlx5e_set_rx_mode_work(struct work_struct *work)
521 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
524 struct mlx5e_l2_table *ea = &priv->fs.l2;
525 struct net_device *ndev = priv->netdev;
527 bool rx_mode_enable = !test_bit(MLX5E_STATE_DESTROYING, &priv->state);
528 bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
529 bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
530 bool broadcast_enabled = rx_mode_enable;
532 bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
533 bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
534 bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
535 bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
536 bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
537 bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
539 if (enable_promisc) {
540 mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
541 if (!priv->fs.vlan.filter_disabled)
542 mlx5e_add_any_vid_rules(priv);
545 mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
546 if (enable_broadcast)
547 mlx5e_add_l2_flow_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
549 mlx5e_handle_netdev_addr(priv);
551 if (disable_broadcast)
552 mlx5e_del_l2_flow_rule(priv, &ea->broadcast);
553 if (disable_allmulti)
554 mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
555 if (disable_promisc) {
556 if (!priv->fs.vlan.filter_disabled)
557 mlx5e_del_any_vid_rules(priv);
558 mlx5e_del_l2_flow_rule(priv, &ea->promisc);
561 ea->promisc_enabled = promisc_enabled;
562 ea->allmulti_enabled = allmulti_enabled;
563 ea->broadcast_enabled = broadcast_enabled;
565 mlx5e_vport_context_update(priv);
568 static void mlx5e_destroy_groups(struct mlx5e_flow_table *ft)
572 for (i = ft->num_groups - 1; i >= 0; i--) {
573 if (!IS_ERR_OR_NULL(ft->g[i]))
574 mlx5_destroy_flow_group(ft->g[i]);
580 void mlx5e_init_l2_addr(struct mlx5e_priv *priv)
582 ether_addr_copy(priv->fs.l2.broadcast.addr, priv->netdev->broadcast);
585 void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft)
587 mlx5e_destroy_groups(ft);
589 mlx5_destroy_flow_table(ft->t);
593 static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc)
597 for (i = 0; i < MLX5E_NUM_TT; i++) {
598 if (!IS_ERR_OR_NULL(ttc->rules[i])) {
599 mlx5_del_flow_rules(ttc->rules[i]);
600 ttc->rules[i] = NULL;
609 [MLX5E_TT_IPV4_TCP] = {
611 .proto = IPPROTO_TCP,
613 [MLX5E_TT_IPV6_TCP] = {
615 .proto = IPPROTO_TCP,
617 [MLX5E_TT_IPV4_UDP] = {
619 .proto = IPPROTO_UDP,
621 [MLX5E_TT_IPV6_UDP] = {
623 .proto = IPPROTO_UDP,
625 [MLX5E_TT_IPV4_IPSEC_AH] = {
629 [MLX5E_TT_IPV6_IPSEC_AH] = {
633 [MLX5E_TT_IPV4_IPSEC_ESP] = {
635 .proto = IPPROTO_ESP,
637 [MLX5E_TT_IPV6_IPSEC_ESP] = {
639 .proto = IPPROTO_ESP,
655 static struct mlx5_flow_handle *
656 mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
657 struct mlx5_flow_table *ft,
658 struct mlx5_flow_destination *dest,
662 struct mlx5_flow_act flow_act = {
663 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
664 .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
667 struct mlx5_flow_handle *rule;
668 struct mlx5_flow_spec *spec;
671 spec = mlx5_vzalloc(sizeof(*spec));
673 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
674 return ERR_PTR(-ENOMEM);
678 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
679 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol);
680 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto);
683 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
684 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype);
685 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype);
688 rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1);
691 netdev_err(priv->netdev, "%s: add rule failed\n", __func__);
695 return err ? ERR_PTR(err) : rule;
698 static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
700 struct mlx5_flow_destination dest;
701 struct mlx5e_ttc_table *ttc;
702 struct mlx5_flow_handle **rules;
703 struct mlx5_flow_table *ft;
711 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
712 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
713 if (tt == MLX5E_TT_ANY)
714 dest.tir_num = priv->direct_tir[0].tirn;
716 dest.tir_num = priv->indir_tir[tt].tirn;
717 rules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest,
719 ttc_rules[tt].proto);
720 if (IS_ERR(rules[tt]))
727 err = PTR_ERR(rules[tt]);
729 mlx5e_cleanup_ttc_rules(ttc);
733 #define MLX5E_TTC_NUM_GROUPS 3
734 #define MLX5E_TTC_GROUP1_SIZE BIT(3)
735 #define MLX5E_TTC_GROUP2_SIZE BIT(1)
736 #define MLX5E_TTC_GROUP3_SIZE BIT(0)
737 #define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\
738 MLX5E_TTC_GROUP2_SIZE +\
739 MLX5E_TTC_GROUP3_SIZE)
740 static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc)
742 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
743 struct mlx5e_flow_table *ft = &ttc->ft;
749 ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS,
750 sizeof(*ft->g), GFP_KERNEL);
753 in = mlx5_vzalloc(inlen);
760 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
761 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol);
762 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype);
763 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
764 MLX5_SET_CFG(in, start_flow_index, ix);
765 ix += MLX5E_TTC_GROUP1_SIZE;
766 MLX5_SET_CFG(in, end_flow_index, ix - 1);
767 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
768 if (IS_ERR(ft->g[ft->num_groups]))
773 MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0);
774 MLX5_SET_CFG(in, start_flow_index, ix);
775 ix += MLX5E_TTC_GROUP2_SIZE;
776 MLX5_SET_CFG(in, end_flow_index, ix - 1);
777 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
778 if (IS_ERR(ft->g[ft->num_groups]))
783 memset(in, 0, inlen);
784 MLX5_SET_CFG(in, start_flow_index, ix);
785 ix += MLX5E_TTC_GROUP3_SIZE;
786 MLX5_SET_CFG(in, end_flow_index, ix - 1);
787 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
788 if (IS_ERR(ft->g[ft->num_groups]))
796 err = PTR_ERR(ft->g[ft->num_groups]);
797 ft->g[ft->num_groups] = NULL;
803 static void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv)
805 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
807 mlx5e_cleanup_ttc_rules(ttc);
808 mlx5e_destroy_flow_table(&ttc->ft);
811 static int mlx5e_create_ttc_table(struct mlx5e_priv *priv)
813 struct mlx5e_ttc_table *ttc = &priv->fs.ttc;
814 struct mlx5e_flow_table *ft = &ttc->ft;
817 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
818 MLX5E_TTC_TABLE_SIZE, MLX5E_TTC_FT_LEVEL, 0);
820 err = PTR_ERR(ft->t);
825 err = mlx5e_create_ttc_table_groups(ttc);
829 err = mlx5e_generate_ttc_table_rules(priv);
835 mlx5e_destroy_flow_table(ft);
839 static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv,
840 struct mlx5e_l2_rule *ai)
842 if (!IS_ERR_OR_NULL(ai->rule)) {
843 mlx5_del_flow_rules(ai->rule);
848 static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
849 struct mlx5e_l2_rule *ai, int type)
851 struct mlx5_flow_act flow_act = {
852 .action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
853 .flow_tag = MLX5_FS_DEFAULT_FLOW_TAG,
856 struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
857 struct mlx5_flow_destination dest;
858 struct mlx5_flow_spec *spec;
863 spec = mlx5_vzalloc(sizeof(*spec));
865 netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
869 mc_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
870 outer_headers.dmac_47_16);
871 mv_dmac = MLX5_ADDR_OF(fte_match_param, spec->match_value,
872 outer_headers.dmac_47_16);
874 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
875 dest.ft = priv->fs.ttc.ft.t;
878 case MLX5E_FULLMATCH:
879 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
880 eth_broadcast_addr(mc_dmac);
881 ether_addr_copy(mv_dmac, ai->addr);
885 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
894 ai->rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
895 if (IS_ERR(ai->rule)) {
896 netdev_err(priv->netdev, "%s: add l2 rule(mac:%pM) failed\n",
898 err = PTR_ERR(ai->rule);
907 #define MLX5E_NUM_L2_GROUPS 3
908 #define MLX5E_L2_GROUP1_SIZE BIT(0)
909 #define MLX5E_L2_GROUP2_SIZE BIT(15)
910 #define MLX5E_L2_GROUP3_SIZE BIT(0)
911 #define MLX5E_L2_TABLE_SIZE (MLX5E_L2_GROUP1_SIZE +\
912 MLX5E_L2_GROUP2_SIZE +\
913 MLX5E_L2_GROUP3_SIZE)
914 static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
916 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
917 struct mlx5e_flow_table *ft = &l2_table->ft;
924 ft->g = kcalloc(MLX5E_NUM_L2_GROUPS, sizeof(*ft->g), GFP_KERNEL);
927 in = mlx5_vzalloc(inlen);
933 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
934 mc_dmac = MLX5_ADDR_OF(fte_match_param, mc,
935 outer_headers.dmac_47_16);
936 /* Flow Group for promiscuous */
937 MLX5_SET_CFG(in, start_flow_index, ix);
938 ix += MLX5E_L2_GROUP1_SIZE;
939 MLX5_SET_CFG(in, end_flow_index, ix - 1);
940 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
941 if (IS_ERR(ft->g[ft->num_groups]))
942 goto err_destroy_groups;
945 /* Flow Group for full match */
946 eth_broadcast_addr(mc_dmac);
947 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
948 MLX5_SET_CFG(in, start_flow_index, ix);
949 ix += MLX5E_L2_GROUP2_SIZE;
950 MLX5_SET_CFG(in, end_flow_index, ix - 1);
951 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
952 if (IS_ERR(ft->g[ft->num_groups]))
953 goto err_destroy_groups;
956 /* Flow Group for allmulti */
957 eth_zero_addr(mc_dmac);
959 MLX5_SET_CFG(in, start_flow_index, ix);
960 ix += MLX5E_L2_GROUP3_SIZE;
961 MLX5_SET_CFG(in, end_flow_index, ix - 1);
962 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
963 if (IS_ERR(ft->g[ft->num_groups]))
964 goto err_destroy_groups;
971 err = PTR_ERR(ft->g[ft->num_groups]);
972 ft->g[ft->num_groups] = NULL;
973 mlx5e_destroy_groups(ft);
979 static void mlx5e_destroy_l2_table(struct mlx5e_priv *priv)
981 mlx5e_destroy_flow_table(&priv->fs.l2.ft);
984 static int mlx5e_create_l2_table(struct mlx5e_priv *priv)
986 struct mlx5e_l2_table *l2_table = &priv->fs.l2;
987 struct mlx5e_flow_table *ft = &l2_table->ft;
991 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
992 MLX5E_L2_TABLE_SIZE, MLX5E_L2_FT_LEVEL, 0);
995 err = PTR_ERR(ft->t);
1000 err = mlx5e_create_l2_table_groups(l2_table);
1002 goto err_destroy_flow_table;
1006 err_destroy_flow_table:
1007 mlx5_destroy_flow_table(ft->t);
1013 #define MLX5E_NUM_VLAN_GROUPS 3
1014 #define MLX5E_VLAN_GROUP0_SIZE BIT(12)
1015 #define MLX5E_VLAN_GROUP1_SIZE BIT(1)
1016 #define MLX5E_VLAN_GROUP2_SIZE BIT(0)
1017 #define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
1018 MLX5E_VLAN_GROUP1_SIZE +\
1019 MLX5E_VLAN_GROUP2_SIZE)
1021 static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
1026 u8 *mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
1028 memset(in, 0, inlen);
1029 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1030 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1031 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
1032 MLX5_SET_CFG(in, start_flow_index, ix);
1033 ix += MLX5E_VLAN_GROUP0_SIZE;
1034 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1035 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1036 if (IS_ERR(ft->g[ft->num_groups]))
1037 goto err_destroy_groups;
1040 memset(in, 0, inlen);
1041 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1042 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
1043 MLX5_SET_CFG(in, start_flow_index, ix);
1044 ix += MLX5E_VLAN_GROUP1_SIZE;
1045 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1046 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1047 if (IS_ERR(ft->g[ft->num_groups]))
1048 goto err_destroy_groups;
1051 memset(in, 0, inlen);
1052 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
1053 MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
1054 MLX5_SET_CFG(in, start_flow_index, ix);
1055 ix += MLX5E_VLAN_GROUP2_SIZE;
1056 MLX5_SET_CFG(in, end_flow_index, ix - 1);
1057 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
1058 if (IS_ERR(ft->g[ft->num_groups]))
1059 goto err_destroy_groups;
1065 err = PTR_ERR(ft->g[ft->num_groups]);
1066 ft->g[ft->num_groups] = NULL;
1067 mlx5e_destroy_groups(ft);
1072 static int mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft)
1075 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
1078 in = mlx5_vzalloc(inlen);
1082 err = __mlx5e_create_vlan_table_groups(ft, in, inlen);
1088 static int mlx5e_create_vlan_table(struct mlx5e_priv *priv)
1090 struct mlx5e_flow_table *ft = &priv->fs.vlan.ft;
1094 ft->t = mlx5_create_flow_table(priv->fs.ns, MLX5E_NIC_PRIO,
1095 MLX5E_VLAN_TABLE_SIZE, MLX5E_VLAN_FT_LEVEL, 0);
1097 if (IS_ERR(ft->t)) {
1098 err = PTR_ERR(ft->t);
1102 ft->g = kcalloc(MLX5E_NUM_VLAN_GROUPS, sizeof(*ft->g), GFP_KERNEL);
1105 goto err_destroy_vlan_table;
1108 err = mlx5e_create_vlan_table_groups(ft);
1112 mlx5e_add_vlan_rules(priv);
1118 err_destroy_vlan_table:
1119 mlx5_destroy_flow_table(ft->t);
1125 static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv)
1127 mlx5e_del_vlan_rules(priv);
1128 mlx5e_destroy_flow_table(&priv->fs.vlan.ft);
1131 int mlx5e_create_flow_steering(struct mlx5e_priv *priv)
1135 priv->fs.ns = mlx5_get_flow_namespace(priv->mdev,
1136 MLX5_FLOW_NAMESPACE_KERNEL);
1141 err = mlx5e_arfs_create_tables(priv);
1143 netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n",
1145 priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
1148 err = mlx5e_create_ttc_table(priv);
1150 netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
1152 goto err_destroy_arfs_tables;
1155 err = mlx5e_create_l2_table(priv);
1157 netdev_err(priv->netdev, "Failed to create l2 table, err=%d\n",
1159 goto err_destroy_ttc_table;
1162 err = mlx5e_create_vlan_table(priv);
1164 netdev_err(priv->netdev, "Failed to create vlan table, err=%d\n",
1166 goto err_destroy_l2_table;
1169 mlx5e_ethtool_init_steering(priv);
1173 err_destroy_l2_table:
1174 mlx5e_destroy_l2_table(priv);
1175 err_destroy_ttc_table:
1176 mlx5e_destroy_ttc_table(priv);
1177 err_destroy_arfs_tables:
1178 mlx5e_arfs_destroy_tables(priv);
1183 void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv)
1185 mlx5e_destroy_vlan_table(priv);
1186 mlx5e_destroy_l2_table(priv);
1187 mlx5e_destroy_ttc_table(priv);
1188 mlx5e_arfs_destroy_tables(priv);
1189 mlx5e_ethtool_cleanup_steering(priv);