net/mlx5: Support encap id when setting new steering entry
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_skbedit.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <net/switchdev.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include "en.h"
44 #include "en_tc.h"
45 #include "eswitch.h"
46
47 struct mlx5e_tc_flow {
48         struct rhash_head       node;
49         u64                     cookie;
50         struct mlx5_flow_handle *rule;
51         struct mlx5_esw_flow_attr *attr;
52 };
53
54 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
55 #define MLX5E_TC_TABLE_NUM_GROUPS 4
56
57 static struct mlx5_flow_handle *
58 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
59                       struct mlx5_flow_spec *spec,
60                       u32 action, u32 flow_tag)
61 {
62         struct mlx5_core_dev *dev = priv->mdev;
63         struct mlx5_flow_destination dest = { 0 };
64         struct mlx5_flow_act flow_act = {
65                 .action = action,
66                 .flow_tag = flow_tag,
67                 .encap_id = 0,
68         };
69         struct mlx5_fc *counter = NULL;
70         struct mlx5_flow_handle *rule;
71         bool table_created = false;
72
73         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
74                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
75                 dest.ft = priv->fs.vlan.ft.t;
76         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
77                 counter = mlx5_fc_create(dev, true);
78                 if (IS_ERR(counter))
79                         return ERR_CAST(counter);
80
81                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
82                 dest.counter = counter;
83         }
84
85         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
86                 priv->fs.tc.t =
87                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
88                                                             MLX5E_TC_PRIO,
89                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
90                                                             MLX5E_TC_TABLE_NUM_GROUPS,
91                                                             0, 0);
92                 if (IS_ERR(priv->fs.tc.t)) {
93                         netdev_err(priv->netdev,
94                                    "Failed to create tc offload table\n");
95                         rule = ERR_CAST(priv->fs.tc.t);
96                         goto err_create_ft;
97                 }
98
99                 table_created = true;
100         }
101
102         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
103         rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
104
105         if (IS_ERR(rule))
106                 goto err_add_rule;
107
108         return rule;
109
110 err_add_rule:
111         if (table_created) {
112                 mlx5_destroy_flow_table(priv->fs.tc.t);
113                 priv->fs.tc.t = NULL;
114         }
115 err_create_ft:
116         mlx5_fc_destroy(dev, counter);
117
118         return rule;
119 }
120
121 static struct mlx5_flow_handle *
122 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
123                       struct mlx5_flow_spec *spec,
124                       struct mlx5_esw_flow_attr *attr)
125 {
126         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
127         int err;
128
129         err = mlx5_eswitch_add_vlan_action(esw, attr);
130         if (err)
131                 return ERR_PTR(err);
132
133         return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
134 }
135
136 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
137                               struct mlx5_flow_handle *rule,
138                               struct mlx5_esw_flow_attr *attr)
139 {
140         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
141         struct mlx5_fc *counter = NULL;
142
143         counter = mlx5_flow_rule_counter(rule);
144
145         if (esw && esw->mode == SRIOV_OFFLOADS)
146                 mlx5_eswitch_del_vlan_action(esw, attr);
147
148         mlx5_del_flow_rules(rule);
149
150         mlx5_fc_destroy(priv->mdev, counter);
151
152         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
153                 mlx5_destroy_flow_table(priv->fs.tc.t);
154                 priv->fs.tc.t = NULL;
155         }
156 }
157
158 static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
159                             struct tc_cls_flower_offload *f)
160 {
161         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
162                                        outer_headers);
163         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
164                                        outer_headers);
165         u16 addr_type = 0;
166         u8 ip_proto = 0;
167
168         if (f->dissector->used_keys &
169             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
170               BIT(FLOW_DISSECTOR_KEY_BASIC) |
171               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
172               BIT(FLOW_DISSECTOR_KEY_VLAN) |
173               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
174               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
175               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
176                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
177                             f->dissector->used_keys);
178                 return -EOPNOTSUPP;
179         }
180
181         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
182                 struct flow_dissector_key_control *key =
183                         skb_flow_dissector_target(f->dissector,
184                                                   FLOW_DISSECTOR_KEY_CONTROL,
185                                                   f->key);
186                 addr_type = key->addr_type;
187         }
188
189         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
190                 struct flow_dissector_key_basic *key =
191                         skb_flow_dissector_target(f->dissector,
192                                                   FLOW_DISSECTOR_KEY_BASIC,
193                                                   f->key);
194                 struct flow_dissector_key_basic *mask =
195                         skb_flow_dissector_target(f->dissector,
196                                                   FLOW_DISSECTOR_KEY_BASIC,
197                                                   f->mask);
198                 ip_proto = key->ip_proto;
199
200                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
201                          ntohs(mask->n_proto));
202                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
203                          ntohs(key->n_proto));
204
205                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
206                          mask->ip_proto);
207                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
208                          key->ip_proto);
209         }
210
211         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
212                 struct flow_dissector_key_eth_addrs *key =
213                         skb_flow_dissector_target(f->dissector,
214                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
215                                                   f->key);
216                 struct flow_dissector_key_eth_addrs *mask =
217                         skb_flow_dissector_target(f->dissector,
218                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
219                                                   f->mask);
220
221                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
222                                              dmac_47_16),
223                                 mask->dst);
224                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
225                                              dmac_47_16),
226                                 key->dst);
227
228                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
229                                              smac_47_16),
230                                 mask->src);
231                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
232                                              smac_47_16),
233                                 key->src);
234         }
235
236         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
237                 struct flow_dissector_key_vlan *key =
238                         skb_flow_dissector_target(f->dissector,
239                                                   FLOW_DISSECTOR_KEY_VLAN,
240                                                   f->key);
241                 struct flow_dissector_key_vlan *mask =
242                         skb_flow_dissector_target(f->dissector,
243                                                   FLOW_DISSECTOR_KEY_VLAN,
244                                                   f->mask);
245                 if (mask->vlan_id) {
246                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
247                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
248
249                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
250                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
251                 }
252         }
253
254         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
255                 struct flow_dissector_key_ipv4_addrs *key =
256                         skb_flow_dissector_target(f->dissector,
257                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
258                                                   f->key);
259                 struct flow_dissector_key_ipv4_addrs *mask =
260                         skb_flow_dissector_target(f->dissector,
261                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
262                                                   f->mask);
263
264                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
265                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
266                        &mask->src, sizeof(mask->src));
267                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
268                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
269                        &key->src, sizeof(key->src));
270                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
271                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
272                        &mask->dst, sizeof(mask->dst));
273                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
274                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
275                        &key->dst, sizeof(key->dst));
276         }
277
278         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
279                 struct flow_dissector_key_ipv6_addrs *key =
280                         skb_flow_dissector_target(f->dissector,
281                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
282                                                   f->key);
283                 struct flow_dissector_key_ipv6_addrs *mask =
284                         skb_flow_dissector_target(f->dissector,
285                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
286                                                   f->mask);
287
288                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
289                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
290                        &mask->src, sizeof(mask->src));
291                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
292                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
293                        &key->src, sizeof(key->src));
294
295                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
296                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
297                        &mask->dst, sizeof(mask->dst));
298                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
299                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
300                        &key->dst, sizeof(key->dst));
301         }
302
303         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
304                 struct flow_dissector_key_ports *key =
305                         skb_flow_dissector_target(f->dissector,
306                                                   FLOW_DISSECTOR_KEY_PORTS,
307                                                   f->key);
308                 struct flow_dissector_key_ports *mask =
309                         skb_flow_dissector_target(f->dissector,
310                                                   FLOW_DISSECTOR_KEY_PORTS,
311                                                   f->mask);
312                 switch (ip_proto) {
313                 case IPPROTO_TCP:
314                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
315                                  tcp_sport, ntohs(mask->src));
316                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
317                                  tcp_sport, ntohs(key->src));
318
319                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
320                                  tcp_dport, ntohs(mask->dst));
321                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
322                                  tcp_dport, ntohs(key->dst));
323                         break;
324
325                 case IPPROTO_UDP:
326                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
327                                  udp_sport, ntohs(mask->src));
328                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
329                                  udp_sport, ntohs(key->src));
330
331                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
332                                  udp_dport, ntohs(mask->dst));
333                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
334                                  udp_dport, ntohs(key->dst));
335                         break;
336                 default:
337                         netdev_err(priv->netdev,
338                                    "Only UDP and TCP transport are supported\n");
339                         return -EINVAL;
340                 }
341         }
342
343         return 0;
344 }
345
346 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
347                                 u32 *action, u32 *flow_tag)
348 {
349         const struct tc_action *a;
350         LIST_HEAD(actions);
351
352         if (tc_no_actions(exts))
353                 return -EINVAL;
354
355         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
356         *action = 0;
357
358         tcf_exts_to_list(exts, &actions);
359         list_for_each_entry(a, &actions, list) {
360                 /* Only support a single action per rule */
361                 if (*action)
362                         return -EINVAL;
363
364                 if (is_tcf_gact_shot(a)) {
365                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
366                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
367                                                flow_table_properties_nic_receive.flow_counter))
368                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
369                         continue;
370                 }
371
372                 if (is_tcf_skbedit_mark(a)) {
373                         u32 mark = tcf_skbedit_mark(a);
374
375                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
376                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
377                                             mark);
378                                 return -EINVAL;
379                         }
380
381                         *flow_tag = mark;
382                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
383                         continue;
384                 }
385
386                 return -EINVAL;
387         }
388
389         return 0;
390 }
391
392 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
393                                 struct mlx5_esw_flow_attr *attr)
394 {
395         const struct tc_action *a;
396         LIST_HEAD(actions);
397
398         if (tc_no_actions(exts))
399                 return -EINVAL;
400
401         memset(attr, 0, sizeof(*attr));
402         attr->in_rep = priv->ppriv;
403
404         tcf_exts_to_list(exts, &actions);
405         list_for_each_entry(a, &actions, list) {
406                 if (is_tcf_gact_shot(a)) {
407                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
408                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
409                         continue;
410                 }
411
412                 if (is_tcf_mirred_egress_redirect(a)) {
413                         int ifindex = tcf_mirred_ifindex(a);
414                         struct net_device *out_dev;
415                         struct mlx5e_priv *out_priv;
416
417                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
418
419                         if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
420                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
421                                        priv->netdev->name, out_dev->name);
422                                 return -EINVAL;
423                         }
424
425                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
426                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
427                         out_priv = netdev_priv(out_dev);
428                         attr->out_rep = out_priv->ppriv;
429                         continue;
430                 }
431
432                 if (is_tcf_vlan(a)) {
433                         if (tcf_vlan_action(a) == VLAN_F_POP) {
434                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
435                         } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
436                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
437                                         return -EOPNOTSUPP;
438
439                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
440                                 attr->vlan = tcf_vlan_push_vid(a);
441                         }
442                         continue;
443                 }
444
445                 return -EINVAL;
446         }
447         return 0;
448 }
449
450 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
451                            struct tc_cls_flower_offload *f)
452 {
453         struct mlx5e_tc_table *tc = &priv->fs.tc;
454         int err = 0;
455         bool fdb_flow = false;
456         u32 flow_tag, action;
457         struct mlx5e_tc_flow *flow;
458         struct mlx5_flow_spec *spec;
459         struct mlx5_flow_handle *old = NULL;
460         struct mlx5_esw_flow_attr *old_attr = NULL;
461         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
462
463         if (esw && esw->mode == SRIOV_OFFLOADS)
464                 fdb_flow = true;
465
466         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
467                                       tc->ht_params);
468         if (flow) {
469                 old = flow->rule;
470                 old_attr = flow->attr;
471         } else {
472                 if (fdb_flow)
473                         flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
474                                        GFP_KERNEL);
475                 else
476                         flow = kzalloc(sizeof(*flow), GFP_KERNEL);
477         }
478
479         spec = mlx5_vzalloc(sizeof(*spec));
480         if (!spec || !flow) {
481                 err = -ENOMEM;
482                 goto err_free;
483         }
484
485         flow->cookie = f->cookie;
486
487         err = parse_cls_flower(priv, spec, f);
488         if (err < 0)
489                 goto err_free;
490
491         if (fdb_flow) {
492                 flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
493                 err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
494                 if (err < 0)
495                         goto err_free;
496                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
497         } else {
498                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
499                 if (err < 0)
500                         goto err_free;
501                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
502         }
503
504         if (IS_ERR(flow->rule)) {
505                 err = PTR_ERR(flow->rule);
506                 goto err_free;
507         }
508
509         err = rhashtable_insert_fast(&tc->ht, &flow->node,
510                                      tc->ht_params);
511         if (err)
512                 goto err_del_rule;
513
514         if (old)
515                 mlx5e_tc_del_flow(priv, old, old_attr);
516
517         goto out;
518
519 err_del_rule:
520         mlx5_del_flow_rules(flow->rule);
521
522 err_free:
523         if (!old)
524                 kfree(flow);
525 out:
526         kvfree(spec);
527         return err;
528 }
529
530 int mlx5e_delete_flower(struct mlx5e_priv *priv,
531                         struct tc_cls_flower_offload *f)
532 {
533         struct mlx5e_tc_flow *flow;
534         struct mlx5e_tc_table *tc = &priv->fs.tc;
535
536         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
537                                       tc->ht_params);
538         if (!flow)
539                 return -EINVAL;
540
541         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
542
543         mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
544
545         kfree(flow);
546
547         return 0;
548 }
549
550 int mlx5e_stats_flower(struct mlx5e_priv *priv,
551                        struct tc_cls_flower_offload *f)
552 {
553         struct mlx5e_tc_table *tc = &priv->fs.tc;
554         struct mlx5e_tc_flow *flow;
555         struct tc_action *a;
556         struct mlx5_fc *counter;
557         LIST_HEAD(actions);
558         u64 bytes;
559         u64 packets;
560         u64 lastuse;
561
562         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
563                                       tc->ht_params);
564         if (!flow)
565                 return -EINVAL;
566
567         counter = mlx5_flow_rule_counter(flow->rule);
568         if (!counter)
569                 return 0;
570
571         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
572
573         tcf_exts_to_list(f->exts, &actions);
574         list_for_each_entry(a, &actions, list)
575                 tcf_action_stats_update(a, bytes, packets, lastuse);
576
577         return 0;
578 }
579
580 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
581         .head_offset = offsetof(struct mlx5e_tc_flow, node),
582         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
583         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
584         .automatic_shrinking = true,
585 };
586
587 int mlx5e_tc_init(struct mlx5e_priv *priv)
588 {
589         struct mlx5e_tc_table *tc = &priv->fs.tc;
590
591         tc->ht_params = mlx5e_tc_flow_ht_params;
592         return rhashtable_init(&tc->ht, &tc->ht_params);
593 }
594
595 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
596 {
597         struct mlx5e_tc_flow *flow = ptr;
598         struct mlx5e_priv *priv = arg;
599
600         mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
601         kfree(flow);
602 }
603
604 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
605 {
606         struct mlx5e_tc_table *tc = &priv->fs.tc;
607
608         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
609
610         if (!IS_ERR_OR_NULL(tc->t)) {
611                 mlx5_destroy_flow_table(tc->t);
612                 tc->t = NULL;
613         }
614 }