2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/hash.h>
34 #include <linux/mlx5/fs.h>
36 #include <linux/ipv6.h>
40 #define ARFS_HASH_SHIFT BITS_PER_BYTE
41 #define ARFS_HASH_SIZE BIT(BITS_PER_BYTE)
44 struct mlx5e_flow_table ft;
45 struct mlx5_flow_handle *default_rule;
46 struct hlist_head rules_hash[ARFS_HASH_SIZE];
57 struct mlx5e_arfs_tables {
58 struct arfs_table arfs_tables[ARFS_NUM_TYPES];
59 /* Protect aRFS rules list */
62 struct workqueue_struct *wq;
70 struct in6_addr src_ipv6;
74 struct in6_addr dst_ipv6;
81 struct mlx5e_priv *priv;
82 struct work_struct arfs_work;
83 struct mlx5_flow_handle *rule;
84 struct hlist_node hlist;
86 /* Flow ID passed to ndo_rx_flow_steer */
88 /* Filter ID returned by ndo_rx_flow_steer */
90 struct arfs_tuple tuple;
93 #define mlx5e_for_each_arfs_rule(hn, tmp, arfs_tables, i, j) \
94 for (i = 0; i < ARFS_NUM_TYPES; i++) \
95 mlx5e_for_each_hash_arfs_rule(hn, tmp, arfs_tables[i].rules_hash, j)
97 #define mlx5e_for_each_hash_arfs_rule(hn, tmp, hash, j) \
98 for (j = 0; j < ARFS_HASH_SIZE; j++) \
99 hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist)
101 static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type)
105 return MLX5_TT_IPV4_TCP;
107 return MLX5_TT_IPV4_UDP;
109 return MLX5_TT_IPV6_TCP;
111 return MLX5_TT_IPV6_UDP;
117 static int arfs_disable(struct mlx5e_flow_steering *fs)
119 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
122 for (i = 0; i < ARFS_NUM_TYPES; i++) {
123 /* Modify ttc rules destination back to their default */
124 err = mlx5_ttc_fwd_default_dest(ttc, arfs_get_tt(i));
127 "%s: modify ttc[%d] default destination failed, err(%d)\n",
128 __func__, arfs_get_tt(i), err);
135 static void arfs_del_rules(struct mlx5e_flow_steering *fs);
137 int mlx5e_arfs_disable(struct mlx5e_flow_steering *fs)
139 /* Moving to switchdev mode, fs->arfs is freed by mlx5e_nic_profile
140 * cleanup_rx callback and it is not recreated when
141 * mlx5e_uplink_rep_profile is loaded as mlx5e_create_flow_steering()
142 * is not called by the uplink_rep profile init_rx callback. Thus, if
143 * ntuple is set, moving to switchdev flow will enter this function
144 * with fs->arfs nullified.
146 if (!mlx5e_fs_get_arfs(fs))
151 return arfs_disable(fs);
154 int mlx5e_arfs_enable(struct mlx5e_flow_steering *fs)
156 struct mlx5_ttc_table *ttc = mlx5e_fs_get_ttc(fs, false);
157 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
158 struct mlx5_flow_destination dest = {};
161 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
162 for (i = 0; i < ARFS_NUM_TYPES; i++) {
163 dest.ft = arfs->arfs_tables[i].ft.t;
164 /* Modify ttc rules destination to point on the aRFS FTs */
165 err = mlx5_ttc_fwd_dest(ttc, arfs_get_tt(i), &dest);
167 fs_err(fs, "%s: modify ttc[%d] dest to arfs, failed err(%d)\n",
168 __func__, arfs_get_tt(i), err);
176 static void arfs_destroy_table(struct arfs_table *arfs_t)
178 mlx5_del_flow_rules(arfs_t->default_rule);
179 mlx5e_destroy_flow_table(&arfs_t->ft);
182 static void _mlx5e_cleanup_tables(struct mlx5e_flow_steering *fs)
184 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
188 destroy_workqueue(arfs->wq);
189 for (i = 0; i < ARFS_NUM_TYPES; i++) {
190 if (!IS_ERR_OR_NULL(arfs->arfs_tables[i].ft.t))
191 arfs_destroy_table(&arfs->arfs_tables[i]);
195 void mlx5e_arfs_destroy_tables(struct mlx5e_flow_steering *fs, bool ntuple)
197 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
202 _mlx5e_cleanup_tables(fs);
203 mlx5e_fs_set_arfs(fs, NULL);
207 static int arfs_add_default_rule(struct mlx5e_flow_steering *fs,
208 struct mlx5e_rx_res *rx_res,
211 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
212 struct arfs_table *arfs_t = &arfs->arfs_tables[type];
213 struct mlx5_flow_destination dest = {};
214 MLX5_DECLARE_FLOW_ACT(flow_act);
215 enum mlx5_traffic_types tt;
218 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
219 tt = arfs_get_tt(type);
221 fs_err(fs, "%s: bad arfs_type: %d\n", __func__, type);
225 /* FIXME: Must use mlx5_ttc_get_default_dest(),
226 * but can't since TTC default is not setup yet !
228 dest.tir_num = mlx5e_rx_res_get_tirn_rss(rx_res, tt);
229 arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL,
232 if (IS_ERR(arfs_t->default_rule)) {
233 err = PTR_ERR(arfs_t->default_rule);
234 arfs_t->default_rule = NULL;
235 fs_err(fs, "%s: add rule failed, arfs type=%d\n", __func__, type);
241 #define MLX5E_ARFS_NUM_GROUPS 2
242 #define MLX5E_ARFS_GROUP1_SIZE (BIT(16) - 1)
243 #define MLX5E_ARFS_GROUP2_SIZE BIT(0)
244 #define MLX5E_ARFS_TABLE_SIZE (MLX5E_ARFS_GROUP1_SIZE +\
245 MLX5E_ARFS_GROUP2_SIZE)
246 static int arfs_create_groups(struct mlx5e_flow_table *ft,
249 int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
250 void *outer_headers_c;
256 ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
257 sizeof(*ft->g), GFP_KERNEL);
261 in = kvzalloc(inlen, GFP_KERNEL);
267 mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
268 outer_headers_c = MLX5_ADDR_OF(fte_match_param, mc,
270 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ethertype);
274 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport);
275 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport);
279 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_dport);
280 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, udp_sport);
290 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
291 src_ipv4_src_ipv6.ipv4_layout.ipv4);
292 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c,
293 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
297 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
298 src_ipv4_src_ipv6.ipv6_layout.ipv6),
300 memset(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
301 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
309 MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
310 MLX5_SET_CFG(in, start_flow_index, ix);
311 ix += MLX5E_ARFS_GROUP1_SIZE;
312 MLX5_SET_CFG(in, end_flow_index, ix - 1);
313 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
314 if (IS_ERR(ft->g[ft->num_groups]))
315 goto err_clean_group;
318 memset(in, 0, inlen);
319 MLX5_SET_CFG(in, start_flow_index, ix);
320 ix += MLX5E_ARFS_GROUP2_SIZE;
321 MLX5_SET_CFG(in, end_flow_index, ix - 1);
322 ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
323 if (IS_ERR(ft->g[ft->num_groups]))
324 goto err_clean_group;
331 err = PTR_ERR(ft->g[ft->num_groups]);
332 ft->g[ft->num_groups] = NULL;
341 static int arfs_create_table(struct mlx5e_flow_steering *fs,
342 struct mlx5e_rx_res *rx_res,
345 struct mlx5_flow_namespace *ns = mlx5e_fs_get_ns(fs, false);
346 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
347 struct mlx5e_flow_table *ft = &arfs->arfs_tables[type].ft;
348 struct mlx5_flow_table_attr ft_attr = {};
353 ft_attr.max_fte = MLX5E_ARFS_TABLE_SIZE;
354 ft_attr.level = MLX5E_ARFS_FT_LEVEL;
355 ft_attr.prio = MLX5E_NIC_PRIO;
357 ft->t = mlx5_create_flow_table(ns, &ft_attr);
359 err = PTR_ERR(ft->t);
364 err = arfs_create_groups(ft, type);
368 err = arfs_add_default_rule(fs, rx_res, type);
374 mlx5e_destroy_flow_table(ft);
378 int mlx5e_arfs_create_tables(struct mlx5e_flow_steering *fs,
379 struct mlx5e_rx_res *rx_res, bool ntuple)
381 struct mlx5e_arfs_tables *arfs;
388 arfs = kvzalloc(sizeof(*arfs), GFP_KERNEL);
392 spin_lock_init(&arfs->arfs_lock);
393 arfs->wq = create_singlethread_workqueue("mlx5e_arfs");
397 mlx5e_fs_set_arfs(fs, arfs);
399 for (i = 0; i < ARFS_NUM_TYPES; i++) {
400 err = arfs_create_table(fs, rx_res, i);
407 _mlx5e_cleanup_tables(fs);
409 mlx5e_fs_set_arfs(fs, NULL);
414 #define MLX5E_ARFS_EXPIRY_QUOTA 60
416 static void arfs_may_expire_flow(struct mlx5e_priv *priv)
418 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
419 struct arfs_rule *arfs_rule;
420 struct hlist_node *htmp;
421 HLIST_HEAD(del_list);
426 spin_lock_bh(&arfs->arfs_lock);
427 mlx5e_for_each_arfs_rule(arfs_rule, htmp, arfs->arfs_tables, i, j) {
428 if (!work_pending(&arfs_rule->arfs_work) &&
429 rps_may_expire_flow(priv->netdev,
430 arfs_rule->rxq, arfs_rule->flow_id,
431 arfs_rule->filter_id)) {
432 hlist_del_init(&arfs_rule->hlist);
433 hlist_add_head(&arfs_rule->hlist, &del_list);
434 if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
438 spin_unlock_bh(&arfs->arfs_lock);
439 hlist_for_each_entry_safe(arfs_rule, htmp, &del_list, hlist) {
440 if (arfs_rule->rule) {
441 mlx5_del_flow_rules(arfs_rule->rule);
442 priv->channel_stats[arfs_rule->rxq]->rq.arfs_expired++;
444 hlist_del(&arfs_rule->hlist);
449 static void arfs_del_rules(struct mlx5e_flow_steering *fs)
451 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(fs);
452 struct hlist_node *htmp;
453 struct arfs_rule *rule;
454 HLIST_HEAD(del_list);
458 spin_lock_bh(&arfs->arfs_lock);
459 mlx5e_for_each_arfs_rule(rule, htmp, arfs->arfs_tables, i, j) {
460 hlist_del_init(&rule->hlist);
461 hlist_add_head(&rule->hlist, &del_list);
463 spin_unlock_bh(&arfs->arfs_lock);
465 hlist_for_each_entry_safe(rule, htmp, &del_list, hlist) {
466 cancel_work_sync(&rule->arfs_work);
468 mlx5_del_flow_rules(rule->rule);
469 hlist_del(&rule->hlist);
474 static struct hlist_head *
475 arfs_hash_bucket(struct arfs_table *arfs_t, __be16 src_port,
481 l = (__force unsigned long)src_port |
482 ((__force unsigned long)dst_port << 2);
484 bucket_idx = hash_long(l, ARFS_HASH_SHIFT);
486 return &arfs_t->rules_hash[bucket_idx];
489 static struct arfs_table *arfs_get_table(struct mlx5e_arfs_tables *arfs,
490 u8 ip_proto, __be16 etype)
492 if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_TCP)
493 return &arfs->arfs_tables[ARFS_IPV4_TCP];
494 if (etype == htons(ETH_P_IP) && ip_proto == IPPROTO_UDP)
495 return &arfs->arfs_tables[ARFS_IPV4_UDP];
496 if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_TCP)
497 return &arfs->arfs_tables[ARFS_IPV6_TCP];
498 if (etype == htons(ETH_P_IPV6) && ip_proto == IPPROTO_UDP)
499 return &arfs->arfs_tables[ARFS_IPV6_UDP];
504 static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
505 struct arfs_rule *arfs_rule)
507 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
508 struct arfs_tuple *tuple = &arfs_rule->tuple;
509 struct mlx5_flow_handle *rule = NULL;
510 struct mlx5_flow_destination dest = {};
511 MLX5_DECLARE_FLOW_ACT(flow_act);
512 struct arfs_table *arfs_table;
513 struct mlx5_flow_spec *spec;
514 struct mlx5_flow_table *ft;
517 spec = kvzalloc(sizeof(*spec), GFP_KERNEL);
519 priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
523 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
524 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
525 outer_headers.ethertype);
526 MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype,
527 ntohs(tuple->etype));
528 arfs_table = arfs_get_table(arfs, tuple->ip_proto, tuple->etype);
530 WARN_ONCE(1, "arfs table does not exist for etype %u and ip_proto %u\n",
531 tuple->etype, tuple->ip_proto);
536 ft = arfs_table->ft.t;
537 if (tuple->ip_proto == IPPROTO_TCP) {
538 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
539 outer_headers.tcp_dport);
540 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
541 outer_headers.tcp_sport);
542 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_dport,
543 ntohs(tuple->dst_port));
544 MLX5_SET(fte_match_param, spec->match_value, outer_headers.tcp_sport,
545 ntohs(tuple->src_port));
547 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
548 outer_headers.udp_dport);
549 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
550 outer_headers.udp_sport);
551 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_dport,
552 ntohs(tuple->dst_port));
553 MLX5_SET(fte_match_param, spec->match_value, outer_headers.udp_sport,
554 ntohs(tuple->src_port));
556 if (tuple->etype == htons(ETH_P_IP)) {
557 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
558 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4),
561 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
562 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
565 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
566 outer_headers.src_ipv4_src_ipv6.ipv4_layout.ipv4);
567 MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
568 outer_headers.dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
570 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
571 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
574 memcpy(MLX5_ADDR_OF(fte_match_param, spec->match_value,
575 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
578 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
579 outer_headers.src_ipv4_src_ipv6.ipv6_layout.ipv6),
582 memset(MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
583 outer_headers.dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
587 dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
588 dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq);
589 rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1);
592 priv->channel_stats[arfs_rule->rxq]->rq.arfs_err++;
593 netdev_dbg(priv->netdev,
594 "%s: add rule(filter id=%d, rq idx=%d, ip proto=0x%x) failed,err=%d\n",
595 __func__, arfs_rule->filter_id, arfs_rule->rxq,
596 tuple->ip_proto, err);
601 return err ? ERR_PTR(err) : rule;
604 static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
605 struct mlx5_flow_handle *rule, u16 rxq)
607 struct mlx5_flow_destination dst = {};
610 dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
611 dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq);
612 err = mlx5_modify_rule_destination(rule, &dst, NULL);
614 priv->channel_stats[rxq]->rq.arfs_err++;
615 netdev_warn(priv->netdev,
616 "Failed to modify aRFS rule destination to rq=%d\n", rxq);
620 static void arfs_handle_work(struct work_struct *work)
622 struct arfs_rule *arfs_rule = container_of(work,
625 struct mlx5e_priv *priv = arfs_rule->priv;
626 struct mlx5e_arfs_tables *arfs;
627 struct mlx5_flow_handle *rule;
629 arfs = mlx5e_fs_get_arfs(priv->fs);
630 mutex_lock(&priv->state_lock);
631 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
632 spin_lock_bh(&arfs->arfs_lock);
633 hlist_del(&arfs_rule->hlist);
634 spin_unlock_bh(&arfs->arfs_lock);
636 mutex_unlock(&priv->state_lock);
640 mutex_unlock(&priv->state_lock);
642 if (!arfs_rule->rule) {
643 rule = arfs_add_rule(priv, arfs_rule);
646 arfs_rule->rule = rule;
647 priv->channel_stats[arfs_rule->rxq]->rq.arfs_add++;
649 arfs_modify_rule_rq(priv, arfs_rule->rule,
653 arfs_may_expire_flow(priv);
656 static struct arfs_rule *arfs_alloc_rule(struct mlx5e_priv *priv,
657 struct arfs_table *arfs_t,
658 const struct flow_keys *fk,
659 u16 rxq, u32 flow_id)
661 struct mlx5e_arfs_tables *arfs = mlx5e_fs_get_arfs(priv->fs);
662 struct arfs_rule *rule;
663 struct arfs_tuple *tuple;
665 rule = kzalloc(sizeof(*rule), GFP_ATOMIC);
667 priv->channel_stats[rxq]->rq.arfs_err++;
673 INIT_WORK(&rule->arfs_work, arfs_handle_work);
675 tuple = &rule->tuple;
676 tuple->etype = fk->basic.n_proto;
677 tuple->ip_proto = fk->basic.ip_proto;
678 if (tuple->etype == htons(ETH_P_IP)) {
679 tuple->src_ipv4 = fk->addrs.v4addrs.src;
680 tuple->dst_ipv4 = fk->addrs.v4addrs.dst;
682 memcpy(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
683 sizeof(struct in6_addr));
684 memcpy(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
685 sizeof(struct in6_addr));
687 tuple->src_port = fk->ports.src;
688 tuple->dst_port = fk->ports.dst;
690 rule->flow_id = flow_id;
691 rule->filter_id = arfs->last_filter_id++ % RPS_NO_FILTER;
693 hlist_add_head(&rule->hlist,
694 arfs_hash_bucket(arfs_t, tuple->src_port,
699 static bool arfs_cmp(const struct arfs_tuple *tuple, const struct flow_keys *fk)
701 if (tuple->src_port != fk->ports.src || tuple->dst_port != fk->ports.dst)
703 if (tuple->etype != fk->basic.n_proto)
705 if (tuple->etype == htons(ETH_P_IP))
706 return tuple->src_ipv4 == fk->addrs.v4addrs.src &&
707 tuple->dst_ipv4 == fk->addrs.v4addrs.dst;
708 if (tuple->etype == htons(ETH_P_IPV6))
709 return !memcmp(&tuple->src_ipv6, &fk->addrs.v6addrs.src,
710 sizeof(struct in6_addr)) &&
711 !memcmp(&tuple->dst_ipv6, &fk->addrs.v6addrs.dst,
712 sizeof(struct in6_addr));
716 static struct arfs_rule *arfs_find_rule(struct arfs_table *arfs_t,
717 const struct flow_keys *fk)
719 struct arfs_rule *arfs_rule;
720 struct hlist_head *head;
722 head = arfs_hash_bucket(arfs_t, fk->ports.src, fk->ports.dst);
723 hlist_for_each_entry(arfs_rule, head, hlist) {
724 if (arfs_cmp(&arfs_rule->tuple, fk))
731 int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
732 u16 rxq_index, u32 flow_id)
734 struct mlx5e_priv *priv = netdev_priv(dev);
735 struct mlx5e_arfs_tables *arfs;
736 struct arfs_rule *arfs_rule;
737 struct arfs_table *arfs_t;
740 arfs = mlx5e_fs_get_arfs(priv->fs);
741 if (!skb_flow_dissect_flow_keys(skb, &fk, 0))
742 return -EPROTONOSUPPORT;
744 if (fk.basic.n_proto != htons(ETH_P_IP) &&
745 fk.basic.n_proto != htons(ETH_P_IPV6))
746 return -EPROTONOSUPPORT;
748 if (skb->encapsulation)
749 return -EPROTONOSUPPORT;
751 arfs_t = arfs_get_table(arfs, fk.basic.ip_proto, fk.basic.n_proto);
753 return -EPROTONOSUPPORT;
755 spin_lock_bh(&arfs->arfs_lock);
756 arfs_rule = arfs_find_rule(arfs_t, &fk);
758 if (arfs_rule->rxq == rxq_index || work_busy(&arfs_rule->arfs_work)) {
759 spin_unlock_bh(&arfs->arfs_lock);
760 return arfs_rule->filter_id;
763 priv->channel_stats[rxq_index]->rq.arfs_request_in++;
764 priv->channel_stats[arfs_rule->rxq]->rq.arfs_request_out++;
765 arfs_rule->rxq = rxq_index;
767 arfs_rule = arfs_alloc_rule(priv, arfs_t, &fk, rxq_index, flow_id);
769 spin_unlock_bh(&arfs->arfs_lock);
773 queue_work(arfs->wq, &arfs_rule->arfs_work);
774 spin_unlock_bh(&arfs->arfs_lock);
775 return arfs_rule->filter_id;