Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_skbedit.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <net/switchdev.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include "en.h"
44 #include "en_tc.h"
45 #include "eswitch.h"
46
47 struct mlx5e_tc_flow {
48         struct rhash_head       node;
49         u64                     cookie;
50         struct mlx5_flow_rule   *rule;
51         struct mlx5_esw_flow_attr *attr;
52 };
53
54 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
55 #define MLX5E_TC_TABLE_NUM_GROUPS 4
56
57 static struct mlx5_flow_rule *mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
58                                                     struct mlx5_flow_spec *spec,
59                                                     u32 action, u32 flow_tag)
60 {
61         struct mlx5_core_dev *dev = priv->mdev;
62         struct mlx5_flow_destination dest = { 0 };
63         struct mlx5_fc *counter = NULL;
64         struct mlx5_flow_rule *rule;
65         bool table_created = false;
66
67         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
68                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
69                 dest.ft = priv->fs.vlan.ft.t;
70         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
71                 counter = mlx5_fc_create(dev, true);
72                 if (IS_ERR(counter))
73                         return ERR_CAST(counter);
74
75                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
76                 dest.counter = counter;
77         }
78
79         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
80                 priv->fs.tc.t =
81                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
82                                                             MLX5E_TC_PRIO,
83                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
84                                                             MLX5E_TC_TABLE_NUM_GROUPS,
85                                                             0);
86                 if (IS_ERR(priv->fs.tc.t)) {
87                         netdev_err(priv->netdev,
88                                    "Failed to create tc offload table\n");
89                         rule = ERR_CAST(priv->fs.tc.t);
90                         goto err_create_ft;
91                 }
92
93                 table_created = true;
94         }
95
96         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
97         rule = mlx5_add_flow_rule(priv->fs.tc.t, spec,
98                                   action, flow_tag,
99                                   &dest);
100
101         if (IS_ERR(rule))
102                 goto err_add_rule;
103
104         return rule;
105
106 err_add_rule:
107         if (table_created) {
108                 mlx5_destroy_flow_table(priv->fs.tc.t);
109                 priv->fs.tc.t = NULL;
110         }
111 err_create_ft:
112         mlx5_fc_destroy(dev, counter);
113
114         return rule;
115 }
116
117 static struct mlx5_flow_rule *mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
118                                                     struct mlx5_flow_spec *spec,
119                                                     struct mlx5_esw_flow_attr *attr)
120 {
121         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
122         int err;
123
124         err = mlx5_eswitch_add_vlan_action(esw, attr);
125         if (err)
126                 return ERR_PTR(err);
127
128         return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
129 }
130
131 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
132                               struct mlx5_flow_rule *rule,
133                               struct mlx5_esw_flow_attr *attr)
134 {
135         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
136         struct mlx5_fc *counter = NULL;
137
138         counter = mlx5_flow_rule_counter(rule);
139
140         if (esw && esw->mode == SRIOV_OFFLOADS)
141                 mlx5_eswitch_del_vlan_action(esw, attr);
142
143         mlx5_del_flow_rule(rule);
144
145         mlx5_fc_destroy(priv->mdev, counter);
146
147         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
148                 mlx5_destroy_flow_table(priv->fs.tc.t);
149                 priv->fs.tc.t = NULL;
150         }
151 }
152
153 static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
154                             struct tc_cls_flower_offload *f)
155 {
156         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
157                                        outer_headers);
158         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
159                                        outer_headers);
160         u16 addr_type = 0;
161         u8 ip_proto = 0;
162
163         if (f->dissector->used_keys &
164             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
165               BIT(FLOW_DISSECTOR_KEY_BASIC) |
166               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
167               BIT(FLOW_DISSECTOR_KEY_VLAN) |
168               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
169               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
170               BIT(FLOW_DISSECTOR_KEY_PORTS))) {
171                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
172                             f->dissector->used_keys);
173                 return -EOPNOTSUPP;
174         }
175
176         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
177                 struct flow_dissector_key_control *key =
178                         skb_flow_dissector_target(f->dissector,
179                                                   FLOW_DISSECTOR_KEY_CONTROL,
180                                                   f->key);
181                 addr_type = key->addr_type;
182         }
183
184         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
185                 struct flow_dissector_key_basic *key =
186                         skb_flow_dissector_target(f->dissector,
187                                                   FLOW_DISSECTOR_KEY_BASIC,
188                                                   f->key);
189                 struct flow_dissector_key_basic *mask =
190                         skb_flow_dissector_target(f->dissector,
191                                                   FLOW_DISSECTOR_KEY_BASIC,
192                                                   f->mask);
193                 ip_proto = key->ip_proto;
194
195                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
196                          ntohs(mask->n_proto));
197                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
198                          ntohs(key->n_proto));
199
200                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
201                          mask->ip_proto);
202                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
203                          key->ip_proto);
204         }
205
206         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
207                 struct flow_dissector_key_eth_addrs *key =
208                         skb_flow_dissector_target(f->dissector,
209                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
210                                                   f->key);
211                 struct flow_dissector_key_eth_addrs *mask =
212                         skb_flow_dissector_target(f->dissector,
213                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
214                                                   f->mask);
215
216                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
217                                              dmac_47_16),
218                                 mask->dst);
219                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
220                                              dmac_47_16),
221                                 key->dst);
222
223                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
224                                              smac_47_16),
225                                 mask->src);
226                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
227                                              smac_47_16),
228                                 key->src);
229         }
230
231         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
232                 struct flow_dissector_key_vlan *key =
233                         skb_flow_dissector_target(f->dissector,
234                                                   FLOW_DISSECTOR_KEY_VLAN,
235                                                   f->key);
236                 struct flow_dissector_key_vlan *mask =
237                         skb_flow_dissector_target(f->dissector,
238                                                   FLOW_DISSECTOR_KEY_VLAN,
239                                                   f->mask);
240                 if (mask->vlan_id) {
241                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
242                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
243
244                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
245                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
246                 }
247         }
248
249         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
250                 struct flow_dissector_key_ipv4_addrs *key =
251                         skb_flow_dissector_target(f->dissector,
252                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
253                                                   f->key);
254                 struct flow_dissector_key_ipv4_addrs *mask =
255                         skb_flow_dissector_target(f->dissector,
256                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
257                                                   f->mask);
258
259                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
260                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
261                        &mask->src, sizeof(mask->src));
262                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
263                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
264                        &key->src, sizeof(key->src));
265                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
266                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
267                        &mask->dst, sizeof(mask->dst));
268                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
269                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
270                        &key->dst, sizeof(key->dst));
271         }
272
273         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
274                 struct flow_dissector_key_ipv6_addrs *key =
275                         skb_flow_dissector_target(f->dissector,
276                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
277                                                   f->key);
278                 struct flow_dissector_key_ipv6_addrs *mask =
279                         skb_flow_dissector_target(f->dissector,
280                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
281                                                   f->mask);
282
283                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
284                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
285                        &mask->src, sizeof(mask->src));
286                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
287                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
288                        &key->src, sizeof(key->src));
289
290                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
291                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
292                        &mask->dst, sizeof(mask->dst));
293                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
294                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
295                        &key->dst, sizeof(key->dst));
296         }
297
298         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
299                 struct flow_dissector_key_ports *key =
300                         skb_flow_dissector_target(f->dissector,
301                                                   FLOW_DISSECTOR_KEY_PORTS,
302                                                   f->key);
303                 struct flow_dissector_key_ports *mask =
304                         skb_flow_dissector_target(f->dissector,
305                                                   FLOW_DISSECTOR_KEY_PORTS,
306                                                   f->mask);
307                 switch (ip_proto) {
308                 case IPPROTO_TCP:
309                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
310                                  tcp_sport, ntohs(mask->src));
311                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
312                                  tcp_sport, ntohs(key->src));
313
314                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
315                                  tcp_dport, ntohs(mask->dst));
316                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
317                                  tcp_dport, ntohs(key->dst));
318                         break;
319
320                 case IPPROTO_UDP:
321                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
322                                  udp_sport, ntohs(mask->src));
323                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
324                                  udp_sport, ntohs(key->src));
325
326                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
327                                  udp_dport, ntohs(mask->dst));
328                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
329                                  udp_dport, ntohs(key->dst));
330                         break;
331                 default:
332                         netdev_err(priv->netdev,
333                                    "Only UDP and TCP transport are supported\n");
334                         return -EINVAL;
335                 }
336         }
337
338         return 0;
339 }
340
341 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
342                                 u32 *action, u32 *flow_tag)
343 {
344         const struct tc_action *a;
345         LIST_HEAD(actions);
346
347         if (tc_no_actions(exts))
348                 return -EINVAL;
349
350         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
351         *action = 0;
352
353         tcf_exts_to_list(exts, &actions);
354         list_for_each_entry(a, &actions, list) {
355                 /* Only support a single action per rule */
356                 if (*action)
357                         return -EINVAL;
358
359                 if (is_tcf_gact_shot(a)) {
360                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
361                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
362                                                flow_table_properties_nic_receive.flow_counter))
363                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
364                         continue;
365                 }
366
367                 if (is_tcf_skbedit_mark(a)) {
368                         u32 mark = tcf_skbedit_mark(a);
369
370                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
371                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
372                                             mark);
373                                 return -EINVAL;
374                         }
375
376                         *flow_tag = mark;
377                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
378                         continue;
379                 }
380
381                 return -EINVAL;
382         }
383
384         return 0;
385 }
386
387 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
388                                 struct mlx5_esw_flow_attr *attr)
389 {
390         const struct tc_action *a;
391         LIST_HEAD(actions);
392
393         if (tc_no_actions(exts))
394                 return -EINVAL;
395
396         memset(attr, 0, sizeof(*attr));
397         attr->in_rep = priv->ppriv;
398
399         tcf_exts_to_list(exts, &actions);
400         list_for_each_entry(a, &actions, list) {
401                 if (is_tcf_gact_shot(a)) {
402                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
403                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
404                         continue;
405                 }
406
407                 if (is_tcf_mirred_redirect(a)) {
408                         int ifindex = tcf_mirred_ifindex(a);
409                         struct net_device *out_dev;
410                         struct mlx5e_priv *out_priv;
411
412                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
413
414                         if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
415                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
416                                        priv->netdev->name, out_dev->name);
417                                 return -EINVAL;
418                         }
419
420                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
421                         out_priv = netdev_priv(out_dev);
422                         attr->out_rep = out_priv->ppriv;
423                         continue;
424                 }
425
426                 if (is_tcf_vlan(a)) {
427                         if (tcf_vlan_action(a) == VLAN_F_POP) {
428                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
429                         } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
430                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
431                                         return -EOPNOTSUPP;
432
433                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
434                                 attr->vlan = tcf_vlan_push_vid(a);
435                         }
436                         continue;
437                 }
438
439                 return -EINVAL;
440         }
441         return 0;
442 }
443
444 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
445                            struct tc_cls_flower_offload *f)
446 {
447         struct mlx5e_tc_table *tc = &priv->fs.tc;
448         int err = 0;
449         bool fdb_flow = false;
450         u32 flow_tag, action;
451         struct mlx5e_tc_flow *flow;
452         struct mlx5_flow_spec *spec;
453         struct mlx5_flow_rule *old = NULL;
454         struct mlx5_esw_flow_attr *old_attr = NULL;
455         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
456
457         if (esw && esw->mode == SRIOV_OFFLOADS)
458                 fdb_flow = true;
459
460         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
461                                       tc->ht_params);
462         if (flow) {
463                 old = flow->rule;
464                 old_attr = flow->attr;
465         } else {
466                 if (fdb_flow)
467                         flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
468                                        GFP_KERNEL);
469                 else
470                         flow = kzalloc(sizeof(*flow), GFP_KERNEL);
471         }
472
473         spec = mlx5_vzalloc(sizeof(*spec));
474         if (!spec || !flow) {
475                 err = -ENOMEM;
476                 goto err_free;
477         }
478
479         flow->cookie = f->cookie;
480
481         err = parse_cls_flower(priv, spec, f);
482         if (err < 0)
483                 goto err_free;
484
485         if (fdb_flow) {
486                 flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
487                 err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
488                 if (err < 0)
489                         goto err_free;
490                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
491         } else {
492                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
493                 if (err < 0)
494                         goto err_free;
495                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
496         }
497
498         if (IS_ERR(flow->rule)) {
499                 err = PTR_ERR(flow->rule);
500                 goto err_free;
501         }
502
503         err = rhashtable_insert_fast(&tc->ht, &flow->node,
504                                      tc->ht_params);
505         if (err)
506                 goto err_del_rule;
507
508         if (old)
509                 mlx5e_tc_del_flow(priv, old, old_attr);
510
511         goto out;
512
513 err_del_rule:
514         mlx5_del_flow_rule(flow->rule);
515
516 err_free:
517         if (!old)
518                 kfree(flow);
519 out:
520         kvfree(spec);
521         return err;
522 }
523
524 int mlx5e_delete_flower(struct mlx5e_priv *priv,
525                         struct tc_cls_flower_offload *f)
526 {
527         struct mlx5e_tc_flow *flow;
528         struct mlx5e_tc_table *tc = &priv->fs.tc;
529
530         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
531                                       tc->ht_params);
532         if (!flow)
533                 return -EINVAL;
534
535         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
536
537         mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
538
539         kfree(flow);
540
541         return 0;
542 }
543
544 int mlx5e_stats_flower(struct mlx5e_priv *priv,
545                        struct tc_cls_flower_offload *f)
546 {
547         struct mlx5e_tc_table *tc = &priv->fs.tc;
548         struct mlx5e_tc_flow *flow;
549         struct tc_action *a;
550         struct mlx5_fc *counter;
551         LIST_HEAD(actions);
552         u64 bytes;
553         u64 packets;
554         u64 lastuse;
555
556         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
557                                       tc->ht_params);
558         if (!flow)
559                 return -EINVAL;
560
561         counter = mlx5_flow_rule_counter(flow->rule);
562         if (!counter)
563                 return 0;
564
565         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
566
567         tcf_exts_to_list(f->exts, &actions);
568         list_for_each_entry(a, &actions, list)
569                 tcf_action_stats_update(a, bytes, packets, lastuse);
570
571         return 0;
572 }
573
574 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
575         .head_offset = offsetof(struct mlx5e_tc_flow, node),
576         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
577         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
578         .automatic_shrinking = true,
579 };
580
581 int mlx5e_tc_init(struct mlx5e_priv *priv)
582 {
583         struct mlx5e_tc_table *tc = &priv->fs.tc;
584
585         tc->ht_params = mlx5e_tc_flow_ht_params;
586         return rhashtable_init(&tc->ht, &tc->ht_params);
587 }
588
589 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
590 {
591         struct mlx5e_tc_flow *flow = ptr;
592         struct mlx5e_priv *priv = arg;
593
594         mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
595         kfree(flow);
596 }
597
598 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
599 {
600         struct mlx5e_tc_table *tc = &priv->fs.tc;
601
602         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
603
604         if (!IS_ERR_OR_NULL(tc->t)) {
605                 mlx5_destroy_flow_table(tc->t);
606                 tc->t = NULL;
607         }
608 }