2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_skbedit.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <net/switchdev.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include <net/tc_act/tc_tunnel_key.h>
49 struct mlx5e_tc_flow {
50 struct rhash_head node;
52 struct mlx5_flow_handle *rule;
53 struct mlx5_esw_flow_attr *attr;
56 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
57 #define MLX5E_TC_TABLE_NUM_GROUPS 4
59 static struct mlx5_flow_handle *
60 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
61 struct mlx5_flow_spec *spec,
62 u32 action, u32 flow_tag)
64 struct mlx5_core_dev *dev = priv->mdev;
65 struct mlx5_flow_destination dest = { 0 };
66 struct mlx5_flow_act flow_act = {
71 struct mlx5_fc *counter = NULL;
72 struct mlx5_flow_handle *rule;
73 bool table_created = false;
75 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
76 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
77 dest.ft = priv->fs.vlan.ft.t;
78 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
79 counter = mlx5_fc_create(dev, true);
81 return ERR_CAST(counter);
83 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
84 dest.counter = counter;
87 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
89 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
91 MLX5E_TC_TABLE_NUM_ENTRIES,
92 MLX5E_TC_TABLE_NUM_GROUPS,
94 if (IS_ERR(priv->fs.tc.t)) {
95 netdev_err(priv->netdev,
96 "Failed to create tc offload table\n");
97 rule = ERR_CAST(priv->fs.tc.t);
101 table_created = true;
104 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
105 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
114 mlx5_destroy_flow_table(priv->fs.tc.t);
115 priv->fs.tc.t = NULL;
118 mlx5_fc_destroy(dev, counter);
123 static struct mlx5_flow_handle *
124 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
125 struct mlx5_flow_spec *spec,
126 struct mlx5_esw_flow_attr *attr)
128 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
131 err = mlx5_eswitch_add_vlan_action(esw, attr);
135 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
138 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
139 struct mlx5_flow_handle *rule,
140 struct mlx5_esw_flow_attr *attr)
142 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
143 struct mlx5_fc *counter = NULL;
145 counter = mlx5_flow_rule_counter(rule);
147 if (esw && esw->mode == SRIOV_OFFLOADS)
148 mlx5_eswitch_del_vlan_action(esw, attr);
150 mlx5_del_flow_rules(rule);
152 mlx5_fc_destroy(priv->mdev, counter);
154 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
155 mlx5_destroy_flow_table(priv->fs.tc.t);
156 priv->fs.tc.t = NULL;
160 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
161 struct tc_cls_flower_offload *f)
163 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
165 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
167 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
169 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
172 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
173 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
175 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
176 struct flow_dissector_key_keyid *key =
177 skb_flow_dissector_target(f->dissector,
178 FLOW_DISSECTOR_KEY_ENC_KEYID,
180 struct flow_dissector_key_keyid *mask =
181 skb_flow_dissector_target(f->dissector,
182 FLOW_DISSECTOR_KEY_ENC_KEYID,
184 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
185 be32_to_cpu(mask->keyid));
186 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
187 be32_to_cpu(key->keyid));
191 static int parse_tunnel_attr(struct mlx5e_priv *priv,
192 struct mlx5_flow_spec *spec,
193 struct tc_cls_flower_offload *f)
195 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
197 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
200 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
201 struct flow_dissector_key_ports *key =
202 skb_flow_dissector_target(f->dissector,
203 FLOW_DISSECTOR_KEY_ENC_PORTS,
205 struct flow_dissector_key_ports *mask =
206 skb_flow_dissector_target(f->dissector,
207 FLOW_DISSECTOR_KEY_ENC_PORTS,
210 /* Full udp dst port must be given */
211 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
214 /* udp src port isn't supported */
215 if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
218 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
219 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
220 parse_vxlan_attr(spec, f);
224 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
225 udp_dport, ntohs(mask->dst));
226 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
227 udp_dport, ntohs(key->dst));
229 } else { /* udp dst port must be given */
233 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
234 struct flow_dissector_key_ipv4_addrs *key =
235 skb_flow_dissector_target(f->dissector,
236 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
238 struct flow_dissector_key_ipv4_addrs *mask =
239 skb_flow_dissector_target(f->dissector,
240 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
242 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
243 src_ipv4_src_ipv6.ipv4_layout.ipv4,
245 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
246 src_ipv4_src_ipv6.ipv4_layout.ipv4,
249 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
250 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
252 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
253 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
257 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
258 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
260 /* Enforce DMAC when offloading incoming tunneled flows.
261 * Flow counters require a match on the DMAC.
263 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
264 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
265 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
266 dmac_47_16), priv->netdev->dev_addr);
268 /* let software handle IP fragments */
269 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
270 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
275 static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
276 struct tc_cls_flower_offload *f)
278 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
280 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
285 if (f->dissector->used_keys &
286 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
287 BIT(FLOW_DISSECTOR_KEY_BASIC) |
288 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
289 BIT(FLOW_DISSECTOR_KEY_VLAN) |
290 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
291 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
292 BIT(FLOW_DISSECTOR_KEY_PORTS) |
293 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
294 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
295 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
296 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
297 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
298 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
299 f->dissector->used_keys);
303 if ((dissector_uses_key(f->dissector,
304 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
305 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
306 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
307 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
308 struct flow_dissector_key_control *key =
309 skb_flow_dissector_target(f->dissector,
310 FLOW_DISSECTOR_KEY_ENC_CONTROL,
312 switch (key->addr_type) {
313 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
314 if (parse_tunnel_attr(priv, spec, f))
321 /* In decap flow, header pointers should point to the inner
322 * headers, outer header were already set by parse_tunnel_attr
324 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
326 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
330 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
331 struct flow_dissector_key_control *key =
332 skb_flow_dissector_target(f->dissector,
333 FLOW_DISSECTOR_KEY_CONTROL,
335 addr_type = key->addr_type;
338 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
339 struct flow_dissector_key_basic *key =
340 skb_flow_dissector_target(f->dissector,
341 FLOW_DISSECTOR_KEY_BASIC,
343 struct flow_dissector_key_basic *mask =
344 skb_flow_dissector_target(f->dissector,
345 FLOW_DISSECTOR_KEY_BASIC,
347 ip_proto = key->ip_proto;
349 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
350 ntohs(mask->n_proto));
351 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
352 ntohs(key->n_proto));
354 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
356 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
360 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
361 struct flow_dissector_key_eth_addrs *key =
362 skb_flow_dissector_target(f->dissector,
363 FLOW_DISSECTOR_KEY_ETH_ADDRS,
365 struct flow_dissector_key_eth_addrs *mask =
366 skb_flow_dissector_target(f->dissector,
367 FLOW_DISSECTOR_KEY_ETH_ADDRS,
370 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
373 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
377 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
380 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
385 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
386 struct flow_dissector_key_vlan *key =
387 skb_flow_dissector_target(f->dissector,
388 FLOW_DISSECTOR_KEY_VLAN,
390 struct flow_dissector_key_vlan *mask =
391 skb_flow_dissector_target(f->dissector,
392 FLOW_DISSECTOR_KEY_VLAN,
395 MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
396 MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
398 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
399 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
403 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
404 struct flow_dissector_key_ipv4_addrs *key =
405 skb_flow_dissector_target(f->dissector,
406 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
408 struct flow_dissector_key_ipv4_addrs *mask =
409 skb_flow_dissector_target(f->dissector,
410 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
413 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
414 src_ipv4_src_ipv6.ipv4_layout.ipv4),
415 &mask->src, sizeof(mask->src));
416 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
417 src_ipv4_src_ipv6.ipv4_layout.ipv4),
418 &key->src, sizeof(key->src));
419 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
420 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
421 &mask->dst, sizeof(mask->dst));
422 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
423 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
424 &key->dst, sizeof(key->dst));
427 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
428 struct flow_dissector_key_ipv6_addrs *key =
429 skb_flow_dissector_target(f->dissector,
430 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
432 struct flow_dissector_key_ipv6_addrs *mask =
433 skb_flow_dissector_target(f->dissector,
434 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
437 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
438 src_ipv4_src_ipv6.ipv6_layout.ipv6),
439 &mask->src, sizeof(mask->src));
440 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
441 src_ipv4_src_ipv6.ipv6_layout.ipv6),
442 &key->src, sizeof(key->src));
444 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
445 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
446 &mask->dst, sizeof(mask->dst));
447 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
448 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
449 &key->dst, sizeof(key->dst));
452 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
453 struct flow_dissector_key_ports *key =
454 skb_flow_dissector_target(f->dissector,
455 FLOW_DISSECTOR_KEY_PORTS,
457 struct flow_dissector_key_ports *mask =
458 skb_flow_dissector_target(f->dissector,
459 FLOW_DISSECTOR_KEY_PORTS,
463 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
464 tcp_sport, ntohs(mask->src));
465 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
466 tcp_sport, ntohs(key->src));
468 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
469 tcp_dport, ntohs(mask->dst));
470 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
471 tcp_dport, ntohs(key->dst));
475 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
476 udp_sport, ntohs(mask->src));
477 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
478 udp_sport, ntohs(key->src));
480 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
481 udp_dport, ntohs(mask->dst));
482 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
483 udp_dport, ntohs(key->dst));
486 netdev_err(priv->netdev,
487 "Only UDP and TCP transport are supported\n");
495 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
496 u32 *action, u32 *flow_tag)
498 const struct tc_action *a;
501 if (tc_no_actions(exts))
504 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
507 tcf_exts_to_list(exts, &actions);
508 list_for_each_entry(a, &actions, list) {
509 /* Only support a single action per rule */
513 if (is_tcf_gact_shot(a)) {
514 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
515 if (MLX5_CAP_FLOWTABLE(priv->mdev,
516 flow_table_properties_nic_receive.flow_counter))
517 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
521 if (is_tcf_skbedit_mark(a)) {
522 u32 mark = tcf_skbedit_mark(a);
524 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
525 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
531 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
541 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
542 struct mlx5_esw_flow_attr *attr)
544 const struct tc_action *a;
547 if (tc_no_actions(exts))
550 memset(attr, 0, sizeof(*attr));
551 attr->in_rep = priv->ppriv;
553 tcf_exts_to_list(exts, &actions);
554 list_for_each_entry(a, &actions, list) {
555 if (is_tcf_gact_shot(a)) {
556 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
557 MLX5_FLOW_CONTEXT_ACTION_COUNT;
561 if (is_tcf_mirred_egress_redirect(a)) {
562 int ifindex = tcf_mirred_ifindex(a);
563 struct net_device *out_dev;
564 struct mlx5e_priv *out_priv;
566 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
568 if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
569 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
570 priv->netdev->name, out_dev->name);
574 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
575 MLX5_FLOW_CONTEXT_ACTION_COUNT;
576 out_priv = netdev_priv(out_dev);
577 attr->out_rep = out_priv->ppriv;
581 if (is_tcf_vlan(a)) {
582 if (tcf_vlan_action(a) == VLAN_F_POP) {
583 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
584 } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
585 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
588 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
589 attr->vlan = tcf_vlan_push_vid(a);
594 if (is_tcf_tunnel_release(a)) {
595 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
604 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
605 struct tc_cls_flower_offload *f)
607 struct mlx5e_tc_table *tc = &priv->fs.tc;
609 bool fdb_flow = false;
610 u32 flow_tag, action;
611 struct mlx5e_tc_flow *flow;
612 struct mlx5_flow_spec *spec;
613 struct mlx5_flow_handle *old = NULL;
614 struct mlx5_esw_flow_attr *old_attr = NULL;
615 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
617 if (esw && esw->mode == SRIOV_OFFLOADS)
620 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
624 old_attr = flow->attr;
627 flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
630 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
633 spec = mlx5_vzalloc(sizeof(*spec));
634 if (!spec || !flow) {
639 flow->cookie = f->cookie;
641 err = parse_cls_flower(priv, spec, f);
646 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
647 err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
650 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
652 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
655 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
658 if (IS_ERR(flow->rule)) {
659 err = PTR_ERR(flow->rule);
663 err = rhashtable_insert_fast(&tc->ht, &flow->node,
669 mlx5e_tc_del_flow(priv, old, old_attr);
674 mlx5_del_flow_rules(flow->rule);
684 int mlx5e_delete_flower(struct mlx5e_priv *priv,
685 struct tc_cls_flower_offload *f)
687 struct mlx5e_tc_flow *flow;
688 struct mlx5e_tc_table *tc = &priv->fs.tc;
690 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
695 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
697 mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
704 int mlx5e_stats_flower(struct mlx5e_priv *priv,
705 struct tc_cls_flower_offload *f)
707 struct mlx5e_tc_table *tc = &priv->fs.tc;
708 struct mlx5e_tc_flow *flow;
710 struct mlx5_fc *counter;
716 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
721 counter = mlx5_flow_rule_counter(flow->rule);
725 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
727 tcf_exts_to_list(f->exts, &actions);
728 list_for_each_entry(a, &actions, list)
729 tcf_action_stats_update(a, bytes, packets, lastuse);
734 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
735 .head_offset = offsetof(struct mlx5e_tc_flow, node),
736 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
737 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
738 .automatic_shrinking = true,
741 int mlx5e_tc_init(struct mlx5e_priv *priv)
743 struct mlx5e_tc_table *tc = &priv->fs.tc;
745 tc->ht_params = mlx5e_tc_flow_ht_params;
746 return rhashtable_init(&tc->ht, &tc->ht_params);
749 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
751 struct mlx5e_tc_flow *flow = ptr;
752 struct mlx5e_priv *priv = arg;
754 mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
758 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
760 struct mlx5e_tc_table *tc = &priv->fs.tc;
762 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
764 if (!IS_ERR_OR_NULL(tc->t)) {
765 mlx5_destroy_flow_table(tc->t);