Merge tag 'for-linus-4.15-ofs1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
48 #include <net/arp.h>
49 #include "en.h"
50 #include "en_rep.h"
51 #include "en_tc.h"
52 #include "eswitch.h"
53 #include "vxlan.h"
54
55 struct mlx5_nic_flow_attr {
56         u32 action;
57         u32 flow_tag;
58         u32 mod_hdr_id;
59 };
60
61 enum {
62         MLX5E_TC_FLOW_ESWITCH   = BIT(0),
63         MLX5E_TC_FLOW_NIC       = BIT(1),
64         MLX5E_TC_FLOW_OFFLOADED = BIT(2),
65 };
66
67 struct mlx5e_tc_flow {
68         struct rhash_head       node;
69         u64                     cookie;
70         u8                      flags;
71         struct mlx5_flow_handle *rule;
72         struct list_head        encap;   /* flows sharing the same encap ID */
73         struct list_head        mod_hdr; /* flows sharing the same mod hdr ID */
74         union {
75                 struct mlx5_esw_flow_attr esw_attr[0];
76                 struct mlx5_nic_flow_attr nic_attr[0];
77         };
78 };
79
80 struct mlx5e_tc_flow_parse_attr {
81         struct ip_tunnel_info tun_info;
82         struct mlx5_flow_spec spec;
83         int num_mod_hdr_actions;
84         void *mod_hdr_actions;
85         int mirred_ifindex;
86 };
87
88 enum {
89         MLX5_HEADER_TYPE_VXLAN = 0x0,
90         MLX5_HEADER_TYPE_NVGRE = 0x1,
91 };
92
93 #define MLX5E_TC_TABLE_NUM_GROUPS 4
94 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
95
96 struct mod_hdr_key {
97         int num_actions;
98         void *actions;
99 };
100
101 struct mlx5e_mod_hdr_entry {
102         /* a node of a hash table which keeps all the mod_hdr entries */
103         struct hlist_node mod_hdr_hlist;
104
105         /* flows sharing the same mod_hdr entry */
106         struct list_head flows;
107
108         struct mod_hdr_key key;
109
110         u32 mod_hdr_id;
111 };
112
113 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
114
115 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
116 {
117         return jhash(key->actions,
118                      key->num_actions * MLX5_MH_ACT_SZ, 0);
119 }
120
121 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
122                                    struct mod_hdr_key *b)
123 {
124         if (a->num_actions != b->num_actions)
125                 return 1;
126
127         return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
128 }
129
130 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
131                                 struct mlx5e_tc_flow *flow,
132                                 struct mlx5e_tc_flow_parse_attr *parse_attr)
133 {
134         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
135         int num_actions, actions_size, namespace, err;
136         struct mlx5e_mod_hdr_entry *mh;
137         struct mod_hdr_key key;
138         bool found = false;
139         u32 hash_key;
140
141         num_actions  = parse_attr->num_mod_hdr_actions;
142         actions_size = MLX5_MH_ACT_SZ * num_actions;
143
144         key.actions = parse_attr->mod_hdr_actions;
145         key.num_actions = num_actions;
146
147         hash_key = hash_mod_hdr_info(&key);
148
149         if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
150                 namespace = MLX5_FLOW_NAMESPACE_FDB;
151                 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
152                                        mod_hdr_hlist, hash_key) {
153                         if (!cmp_mod_hdr_info(&mh->key, &key)) {
154                                 found = true;
155                                 break;
156                         }
157                 }
158         } else {
159                 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
160                 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
161                                        mod_hdr_hlist, hash_key) {
162                         if (!cmp_mod_hdr_info(&mh->key, &key)) {
163                                 found = true;
164                                 break;
165                         }
166                 }
167         }
168
169         if (found)
170                 goto attach_flow;
171
172         mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
173         if (!mh)
174                 return -ENOMEM;
175
176         mh->key.actions = (void *)mh + sizeof(*mh);
177         memcpy(mh->key.actions, key.actions, actions_size);
178         mh->key.num_actions = num_actions;
179         INIT_LIST_HEAD(&mh->flows);
180
181         err = mlx5_modify_header_alloc(priv->mdev, namespace,
182                                        mh->key.num_actions,
183                                        mh->key.actions,
184                                        &mh->mod_hdr_id);
185         if (err)
186                 goto out_err;
187
188         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
189                 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
190         else
191                 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
192
193 attach_flow:
194         list_add(&flow->mod_hdr, &mh->flows);
195         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
196                 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
197         else
198                 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
199
200         return 0;
201
202 out_err:
203         kfree(mh);
204         return err;
205 }
206
207 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
208                                  struct mlx5e_tc_flow *flow)
209 {
210         struct list_head *next = flow->mod_hdr.next;
211
212         list_del(&flow->mod_hdr);
213
214         if (list_empty(next)) {
215                 struct mlx5e_mod_hdr_entry *mh;
216
217                 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
218
219                 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
220                 hash_del(&mh->mod_hdr_hlist);
221                 kfree(mh);
222         }
223 }
224
225 static struct mlx5_flow_handle *
226 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
227                       struct mlx5e_tc_flow_parse_attr *parse_attr,
228                       struct mlx5e_tc_flow *flow)
229 {
230         struct mlx5_nic_flow_attr *attr = flow->nic_attr;
231         struct mlx5_core_dev *dev = priv->mdev;
232         struct mlx5_flow_destination dest = {};
233         struct mlx5_flow_act flow_act = {
234                 .action = attr->action,
235                 .flow_tag = attr->flow_tag,
236                 .encap_id = 0,
237         };
238         struct mlx5_fc *counter = NULL;
239         struct mlx5_flow_handle *rule;
240         bool table_created = false;
241         int err;
242
243         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
244                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
245                 dest.ft = priv->fs.vlan.ft.t;
246         } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
247                 counter = mlx5_fc_create(dev, true);
248                 if (IS_ERR(counter))
249                         return ERR_CAST(counter);
250
251                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
252                 dest.counter = counter;
253         }
254
255         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
256                 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
257                 flow_act.modify_id = attr->mod_hdr_id;
258                 kfree(parse_attr->mod_hdr_actions);
259                 if (err) {
260                         rule = ERR_PTR(err);
261                         goto err_create_mod_hdr_id;
262                 }
263         }
264
265         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
266                 int tc_grp_size, tc_tbl_size;
267                 u32 max_flow_counter;
268
269                 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
270                                     MLX5_CAP_GEN(dev, max_flow_counter_15_0);
271
272                 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
273
274                 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
275                                     BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
276
277                 priv->fs.tc.t =
278                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
279                                                             MLX5E_TC_PRIO,
280                                                             tc_tbl_size,
281                                                             MLX5E_TC_TABLE_NUM_GROUPS,
282                                                             0, 0);
283                 if (IS_ERR(priv->fs.tc.t)) {
284                         netdev_err(priv->netdev,
285                                    "Failed to create tc offload table\n");
286                         rule = ERR_CAST(priv->fs.tc.t);
287                         goto err_create_ft;
288                 }
289
290                 table_created = true;
291         }
292
293         parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
294         rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
295                                    &flow_act, &dest, 1);
296
297         if (IS_ERR(rule))
298                 goto err_add_rule;
299
300         return rule;
301
302 err_add_rule:
303         if (table_created) {
304                 mlx5_destroy_flow_table(priv->fs.tc.t);
305                 priv->fs.tc.t = NULL;
306         }
307 err_create_ft:
308         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
309                 mlx5e_detach_mod_hdr(priv, flow);
310 err_create_mod_hdr_id:
311         mlx5_fc_destroy(dev, counter);
312
313         return rule;
314 }
315
316 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
317                                   struct mlx5e_tc_flow *flow)
318 {
319         struct mlx5_nic_flow_attr *attr = flow->nic_attr;
320         struct mlx5_fc *counter = NULL;
321
322         counter = mlx5_flow_rule_counter(flow->rule);
323         mlx5_del_flow_rules(flow->rule);
324         mlx5_fc_destroy(priv->mdev, counter);
325
326         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
327                 mlx5_destroy_flow_table(priv->fs.tc.t);
328                 priv->fs.tc.t = NULL;
329         }
330
331         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
332                 mlx5e_detach_mod_hdr(priv, flow);
333 }
334
335 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
336                                struct mlx5e_tc_flow *flow);
337
338 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
339                               struct ip_tunnel_info *tun_info,
340                               struct net_device *mirred_dev,
341                               struct net_device **encap_dev,
342                               struct mlx5e_tc_flow *flow);
343
344 static struct mlx5_flow_handle *
345 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
346                       struct mlx5e_tc_flow_parse_attr *parse_attr,
347                       struct mlx5e_tc_flow *flow)
348 {
349         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
350         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
351         struct net_device *out_dev, *encap_dev = NULL;
352         struct mlx5_flow_handle *rule = NULL;
353         struct mlx5e_rep_priv *rpriv;
354         struct mlx5e_priv *out_priv;
355         int err;
356
357         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
358                 out_dev = __dev_get_by_index(dev_net(priv->netdev),
359                                              attr->parse_attr->mirred_ifindex);
360                 err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
361                                          out_dev, &encap_dev, flow);
362                 if (err) {
363                         rule = ERR_PTR(err);
364                         if (err != -EAGAIN)
365                                 goto err_attach_encap;
366                 }
367                 out_priv = netdev_priv(encap_dev);
368                 rpriv = out_priv->ppriv;
369                 attr->out_rep = rpriv->rep;
370         }
371
372         err = mlx5_eswitch_add_vlan_action(esw, attr);
373         if (err) {
374                 rule = ERR_PTR(err);
375                 goto err_add_vlan;
376         }
377
378         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
379                 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
380                 kfree(parse_attr->mod_hdr_actions);
381                 if (err) {
382                         rule = ERR_PTR(err);
383                         goto err_mod_hdr;
384                 }
385         }
386
387         /* we get here if (1) there's no error (rule being null) or when
388          * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
389          */
390         if (rule != ERR_PTR(-EAGAIN)) {
391                 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
392                 if (IS_ERR(rule))
393                         goto err_add_rule;
394         }
395         return rule;
396
397 err_add_rule:
398         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
399                 mlx5e_detach_mod_hdr(priv, flow);
400 err_mod_hdr:
401         mlx5_eswitch_del_vlan_action(esw, attr);
402 err_add_vlan:
403         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
404                 mlx5e_detach_encap(priv, flow);
405 err_attach_encap:
406         return rule;
407 }
408
409 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
410                                   struct mlx5e_tc_flow *flow)
411 {
412         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
413         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
414
415         if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
416                 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
417                 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
418         }
419
420         mlx5_eswitch_del_vlan_action(esw, attr);
421
422         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
423                 mlx5e_detach_encap(priv, flow);
424                 kvfree(attr->parse_attr);
425         }
426
427         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
428                 mlx5e_detach_mod_hdr(priv, flow);
429 }
430
431 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
432                               struct mlx5e_encap_entry *e)
433 {
434         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
435         struct mlx5_esw_flow_attr *esw_attr;
436         struct mlx5e_tc_flow *flow;
437         int err;
438
439         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
440                                e->encap_size, e->encap_header,
441                                &e->encap_id);
442         if (err) {
443                 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
444                                err);
445                 return;
446         }
447         e->flags |= MLX5_ENCAP_ENTRY_VALID;
448         mlx5e_rep_queue_neigh_stats_work(priv);
449
450         list_for_each_entry(flow, &e->flows, encap) {
451                 esw_attr = flow->esw_attr;
452                 esw_attr->encap_id = e->encap_id;
453                 flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
454                 if (IS_ERR(flow->rule)) {
455                         err = PTR_ERR(flow->rule);
456                         mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
457                                        err);
458                         continue;
459                 }
460                 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
461         }
462 }
463
464 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
465                               struct mlx5e_encap_entry *e)
466 {
467         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
468         struct mlx5e_tc_flow *flow;
469
470         list_for_each_entry(flow, &e->flows, encap) {
471                 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
472                         flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
473                         mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
474                 }
475         }
476
477         if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
478                 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
479                 mlx5_encap_dealloc(priv->mdev, e->encap_id);
480         }
481 }
482
483 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
484 {
485         struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
486         u64 bytes, packets, lastuse = 0;
487         struct mlx5e_tc_flow *flow;
488         struct mlx5e_encap_entry *e;
489         struct mlx5_fc *counter;
490         struct neigh_table *tbl;
491         bool neigh_used = false;
492         struct neighbour *n;
493
494         if (m_neigh->family == AF_INET)
495                 tbl = &arp_tbl;
496 #if IS_ENABLED(CONFIG_IPV6)
497         else if (m_neigh->family == AF_INET6)
498                 tbl = ipv6_stub->nd_tbl;
499 #endif
500         else
501                 return;
502
503         list_for_each_entry(e, &nhe->encap_list, encap_list) {
504                 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
505                         continue;
506                 list_for_each_entry(flow, &e->flows, encap) {
507                         if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
508                                 counter = mlx5_flow_rule_counter(flow->rule);
509                                 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
510                                 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
511                                         neigh_used = true;
512                                         break;
513                                 }
514                         }
515                 }
516         }
517
518         if (neigh_used) {
519                 nhe->reported_lastuse = jiffies;
520
521                 /* find the relevant neigh according to the cached device and
522                  * dst ip pair
523                  */
524                 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
525                 if (!n) {
526                         WARN(1, "The neighbour already freed\n");
527                         return;
528                 }
529
530                 neigh_event_send(n, NULL);
531                 neigh_release(n);
532         }
533 }
534
535 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
536                                struct mlx5e_tc_flow *flow)
537 {
538         struct list_head *next = flow->encap.next;
539
540         list_del(&flow->encap);
541         if (list_empty(next)) {
542                 struct mlx5e_encap_entry *e;
543
544                 e = list_entry(next, struct mlx5e_encap_entry, flows);
545                 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
546
547                 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
548                         mlx5_encap_dealloc(priv->mdev, e->encap_id);
549
550                 hash_del_rcu(&e->encap_hlist);
551                 kfree(e->encap_header);
552                 kfree(e);
553         }
554 }
555
556 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
557                               struct mlx5e_tc_flow *flow)
558 {
559         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
560                 mlx5e_tc_del_fdb_flow(priv, flow);
561         else
562                 mlx5e_tc_del_nic_flow(priv, flow);
563 }
564
565 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
566                              struct tc_cls_flower_offload *f)
567 {
568         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
569                                        outer_headers);
570         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
571                                        outer_headers);
572         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
573                                     misc_parameters);
574         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
575                                     misc_parameters);
576
577         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
578         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
579
580         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
581                 struct flow_dissector_key_keyid *key =
582                         skb_flow_dissector_target(f->dissector,
583                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
584                                                   f->key);
585                 struct flow_dissector_key_keyid *mask =
586                         skb_flow_dissector_target(f->dissector,
587                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
588                                                   f->mask);
589                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
590                          be32_to_cpu(mask->keyid));
591                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
592                          be32_to_cpu(key->keyid));
593         }
594 }
595
596 static int parse_tunnel_attr(struct mlx5e_priv *priv,
597                              struct mlx5_flow_spec *spec,
598                              struct tc_cls_flower_offload *f)
599 {
600         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
601                                        outer_headers);
602         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
603                                        outer_headers);
604
605         struct flow_dissector_key_control *enc_control =
606                 skb_flow_dissector_target(f->dissector,
607                                           FLOW_DISSECTOR_KEY_ENC_CONTROL,
608                                           f->key);
609
610         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
611                 struct flow_dissector_key_ports *key =
612                         skb_flow_dissector_target(f->dissector,
613                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
614                                                   f->key);
615                 struct flow_dissector_key_ports *mask =
616                         skb_flow_dissector_target(f->dissector,
617                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
618                                                   f->mask);
619                 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
620                 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
621                 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
622
623                 /* Full udp dst port must be given */
624                 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
625                         goto vxlan_match_offload_err;
626
627                 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
628                     MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
629                         parse_vxlan_attr(spec, f);
630                 else {
631                         netdev_warn(priv->netdev,
632                                     "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
633                         return -EOPNOTSUPP;
634                 }
635
636                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
637                          udp_dport, ntohs(mask->dst));
638                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
639                          udp_dport, ntohs(key->dst));
640
641                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
642                          udp_sport, ntohs(mask->src));
643                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
644                          udp_sport, ntohs(key->src));
645         } else { /* udp dst port must be given */
646 vxlan_match_offload_err:
647                 netdev_warn(priv->netdev,
648                             "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
649                 return -EOPNOTSUPP;
650         }
651
652         if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
653                 struct flow_dissector_key_ipv4_addrs *key =
654                         skb_flow_dissector_target(f->dissector,
655                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
656                                                   f->key);
657                 struct flow_dissector_key_ipv4_addrs *mask =
658                         skb_flow_dissector_target(f->dissector,
659                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
660                                                   f->mask);
661                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
662                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
663                          ntohl(mask->src));
664                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
665                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
666                          ntohl(key->src));
667
668                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
669                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
670                          ntohl(mask->dst));
671                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
672                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
673                          ntohl(key->dst));
674
675                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
676                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
677         } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
678                 struct flow_dissector_key_ipv6_addrs *key =
679                         skb_flow_dissector_target(f->dissector,
680                                                   FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
681                                                   f->key);
682                 struct flow_dissector_key_ipv6_addrs *mask =
683                         skb_flow_dissector_target(f->dissector,
684                                                   FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
685                                                   f->mask);
686
687                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
688                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
689                        &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
690                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
691                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
692                        &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
693
694                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
695                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
696                        &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
697                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
698                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
699                        &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
700
701                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
702                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
703         }
704
705         /* Enforce DMAC when offloading incoming tunneled flows.
706          * Flow counters require a match on the DMAC.
707          */
708         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
709         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
710         ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
711                                      dmac_47_16), priv->netdev->dev_addr);
712
713         /* let software handle IP fragments */
714         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
715         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
716
717         return 0;
718 }
719
720 static int __parse_cls_flower(struct mlx5e_priv *priv,
721                               struct mlx5_flow_spec *spec,
722                               struct tc_cls_flower_offload *f,
723                               u8 *min_inline)
724 {
725         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
726                                        outer_headers);
727         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
728                                        outer_headers);
729         u16 addr_type = 0;
730         u8 ip_proto = 0;
731
732         *min_inline = MLX5_INLINE_MODE_L2;
733
734         if (f->dissector->used_keys &
735             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
736               BIT(FLOW_DISSECTOR_KEY_BASIC) |
737               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
738               BIT(FLOW_DISSECTOR_KEY_VLAN) |
739               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
740               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
741               BIT(FLOW_DISSECTOR_KEY_PORTS) |
742               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
743               BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
744               BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
745               BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
746               BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
747               BIT(FLOW_DISSECTOR_KEY_TCP) |
748               BIT(FLOW_DISSECTOR_KEY_IP))) {
749                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
750                             f->dissector->used_keys);
751                 return -EOPNOTSUPP;
752         }
753
754         if ((dissector_uses_key(f->dissector,
755                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
756              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
757              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
758             dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
759                 struct flow_dissector_key_control *key =
760                         skb_flow_dissector_target(f->dissector,
761                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
762                                                   f->key);
763                 switch (key->addr_type) {
764                 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
765                 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
766                         if (parse_tunnel_attr(priv, spec, f))
767                                 return -EOPNOTSUPP;
768                         break;
769                 default:
770                         return -EOPNOTSUPP;
771                 }
772
773                 /* In decap flow, header pointers should point to the inner
774                  * headers, outer header were already set by parse_tunnel_attr
775                  */
776                 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
777                                          inner_headers);
778                 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
779                                          inner_headers);
780         }
781
782         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
783                 struct flow_dissector_key_control *key =
784                         skb_flow_dissector_target(f->dissector,
785                                                   FLOW_DISSECTOR_KEY_CONTROL,
786                                                   f->key);
787
788                 struct flow_dissector_key_control *mask =
789                         skb_flow_dissector_target(f->dissector,
790                                                   FLOW_DISSECTOR_KEY_CONTROL,
791                                                   f->mask);
792                 addr_type = key->addr_type;
793
794                 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
795                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
796                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
797                                  key->flags & FLOW_DIS_IS_FRAGMENT);
798
799                         /* the HW doesn't need L3 inline to match on frag=no */
800                         if (key->flags & FLOW_DIS_IS_FRAGMENT)
801                                 *min_inline = MLX5_INLINE_MODE_IP;
802                 }
803         }
804
805         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
806                 struct flow_dissector_key_basic *key =
807                         skb_flow_dissector_target(f->dissector,
808                                                   FLOW_DISSECTOR_KEY_BASIC,
809                                                   f->key);
810                 struct flow_dissector_key_basic *mask =
811                         skb_flow_dissector_target(f->dissector,
812                                                   FLOW_DISSECTOR_KEY_BASIC,
813                                                   f->mask);
814                 ip_proto = key->ip_proto;
815
816                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
817                          ntohs(mask->n_proto));
818                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
819                          ntohs(key->n_proto));
820
821                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
822                          mask->ip_proto);
823                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
824                          key->ip_proto);
825
826                 if (mask->ip_proto)
827                         *min_inline = MLX5_INLINE_MODE_IP;
828         }
829
830         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
831                 struct flow_dissector_key_eth_addrs *key =
832                         skb_flow_dissector_target(f->dissector,
833                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
834                                                   f->key);
835                 struct flow_dissector_key_eth_addrs *mask =
836                         skb_flow_dissector_target(f->dissector,
837                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
838                                                   f->mask);
839
840                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
841                                              dmac_47_16),
842                                 mask->dst);
843                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
844                                              dmac_47_16),
845                                 key->dst);
846
847                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
848                                              smac_47_16),
849                                 mask->src);
850                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
851                                              smac_47_16),
852                                 key->src);
853         }
854
855         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
856                 struct flow_dissector_key_vlan *key =
857                         skb_flow_dissector_target(f->dissector,
858                                                   FLOW_DISSECTOR_KEY_VLAN,
859                                                   f->key);
860                 struct flow_dissector_key_vlan *mask =
861                         skb_flow_dissector_target(f->dissector,
862                                                   FLOW_DISSECTOR_KEY_VLAN,
863                                                   f->mask);
864                 if (mask->vlan_id || mask->vlan_priority) {
865                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
866                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
867
868                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
869                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
870
871                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
872                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
873                 }
874         }
875
876         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
877                 struct flow_dissector_key_ipv4_addrs *key =
878                         skb_flow_dissector_target(f->dissector,
879                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
880                                                   f->key);
881                 struct flow_dissector_key_ipv4_addrs *mask =
882                         skb_flow_dissector_target(f->dissector,
883                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
884                                                   f->mask);
885
886                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
887                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
888                        &mask->src, sizeof(mask->src));
889                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
890                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
891                        &key->src, sizeof(key->src));
892                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
893                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
894                        &mask->dst, sizeof(mask->dst));
895                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
896                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
897                        &key->dst, sizeof(key->dst));
898
899                 if (mask->src || mask->dst)
900                         *min_inline = MLX5_INLINE_MODE_IP;
901         }
902
903         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
904                 struct flow_dissector_key_ipv6_addrs *key =
905                         skb_flow_dissector_target(f->dissector,
906                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
907                                                   f->key);
908                 struct flow_dissector_key_ipv6_addrs *mask =
909                         skb_flow_dissector_target(f->dissector,
910                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
911                                                   f->mask);
912
913                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
914                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
915                        &mask->src, sizeof(mask->src));
916                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
917                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
918                        &key->src, sizeof(key->src));
919
920                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
921                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
922                        &mask->dst, sizeof(mask->dst));
923                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
924                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
925                        &key->dst, sizeof(key->dst));
926
927                 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
928                     ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
929                         *min_inline = MLX5_INLINE_MODE_IP;
930         }
931
932         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
933                 struct flow_dissector_key_ip *key =
934                         skb_flow_dissector_target(f->dissector,
935                                                   FLOW_DISSECTOR_KEY_IP,
936                                                   f->key);
937                 struct flow_dissector_key_ip *mask =
938                         skb_flow_dissector_target(f->dissector,
939                                                   FLOW_DISSECTOR_KEY_IP,
940                                                   f->mask);
941
942                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
943                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
944
945                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
946                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos  >> 2);
947
948                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
949                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
950
951                 if (mask->ttl &&
952                     !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
953                                                 ft_field_support.outer_ipv4_ttl))
954                         return -EOPNOTSUPP;
955
956                 if (mask->tos || mask->ttl)
957                         *min_inline = MLX5_INLINE_MODE_IP;
958         }
959
960         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
961                 struct flow_dissector_key_ports *key =
962                         skb_flow_dissector_target(f->dissector,
963                                                   FLOW_DISSECTOR_KEY_PORTS,
964                                                   f->key);
965                 struct flow_dissector_key_ports *mask =
966                         skb_flow_dissector_target(f->dissector,
967                                                   FLOW_DISSECTOR_KEY_PORTS,
968                                                   f->mask);
969                 switch (ip_proto) {
970                 case IPPROTO_TCP:
971                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
972                                  tcp_sport, ntohs(mask->src));
973                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
974                                  tcp_sport, ntohs(key->src));
975
976                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
977                                  tcp_dport, ntohs(mask->dst));
978                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
979                                  tcp_dport, ntohs(key->dst));
980                         break;
981
982                 case IPPROTO_UDP:
983                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
984                                  udp_sport, ntohs(mask->src));
985                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
986                                  udp_sport, ntohs(key->src));
987
988                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
989                                  udp_dport, ntohs(mask->dst));
990                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
991                                  udp_dport, ntohs(key->dst));
992                         break;
993                 default:
994                         netdev_err(priv->netdev,
995                                    "Only UDP and TCP transport are supported\n");
996                         return -EINVAL;
997                 }
998
999                 if (mask->src || mask->dst)
1000                         *min_inline = MLX5_INLINE_MODE_TCP_UDP;
1001         }
1002
1003         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1004                 struct flow_dissector_key_tcp *key =
1005                         skb_flow_dissector_target(f->dissector,
1006                                                   FLOW_DISSECTOR_KEY_TCP,
1007                                                   f->key);
1008                 struct flow_dissector_key_tcp *mask =
1009                         skb_flow_dissector_target(f->dissector,
1010                                                   FLOW_DISSECTOR_KEY_TCP,
1011                                                   f->mask);
1012
1013                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1014                          ntohs(mask->flags));
1015                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1016                          ntohs(key->flags));
1017
1018                 if (mask->flags)
1019                         *min_inline = MLX5_INLINE_MODE_TCP_UDP;
1020         }
1021
1022         return 0;
1023 }
1024
1025 static int parse_cls_flower(struct mlx5e_priv *priv,
1026                             struct mlx5e_tc_flow *flow,
1027                             struct mlx5_flow_spec *spec,
1028                             struct tc_cls_flower_offload *f)
1029 {
1030         struct mlx5_core_dev *dev = priv->mdev;
1031         struct mlx5_eswitch *esw = dev->priv.eswitch;
1032         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1033         struct mlx5_eswitch_rep *rep;
1034         u8 min_inline;
1035         int err;
1036
1037         err = __parse_cls_flower(priv, spec, f, &min_inline);
1038
1039         if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1040                 rep = rpriv->rep;
1041                 if (rep->vport != FDB_UPLINK_VPORT &&
1042                     (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1043                     esw->offloads.inline_mode < min_inline)) {
1044                         netdev_warn(priv->netdev,
1045                                     "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1046                                     min_inline, esw->offloads.inline_mode);
1047                         return -EOPNOTSUPP;
1048                 }
1049         }
1050
1051         return err;
1052 }
1053
1054 struct pedit_headers {
1055         struct ethhdr  eth;
1056         struct iphdr   ip4;
1057         struct ipv6hdr ip6;
1058         struct tcphdr  tcp;
1059         struct udphdr  udp;
1060 };
1061
1062 static int pedit_header_offsets[] = {
1063         [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1064         [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1065         [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1066         [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1067         [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1068 };
1069
1070 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1071
1072 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1073                          struct pedit_headers *masks,
1074                          struct pedit_headers *vals)
1075 {
1076         u32 *curr_pmask, *curr_pval;
1077
1078         if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1079                 goto out_err;
1080
1081         curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1082         curr_pval  = (u32 *)(pedit_header(vals, hdr_type) + offset);
1083
1084         if (*curr_pmask & mask)  /* disallow acting twice on the same location */
1085                 goto out_err;
1086
1087         *curr_pmask |= mask;
1088         *curr_pval  |= (val & mask);
1089
1090         return 0;
1091
1092 out_err:
1093         return -EOPNOTSUPP;
1094 }
1095
1096 struct mlx5_fields {
1097         u8  field;
1098         u8  size;
1099         u32 offset;
1100 };
1101
1102 #define OFFLOAD(fw_field, size, field, off) \
1103                 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1104
1105 static struct mlx5_fields fields[] = {
1106         OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1107         OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1108         OFFLOAD(DMAC_15_0,  2, eth.h_dest[4], 0),
1109         OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1110         OFFLOAD(SMAC_15_0,  2, eth.h_source[4], 0),
1111         OFFLOAD(ETHERTYPE,  2, eth.h_proto, 0),
1112
1113         OFFLOAD(IP_TTL, 1, ip4.ttl,   0),
1114         OFFLOAD(SIPV4,  4, ip4.saddr, 0),
1115         OFFLOAD(DIPV4,  4, ip4.daddr, 0),
1116
1117         OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1118         OFFLOAD(SIPV6_95_64,  4, ip6.saddr.s6_addr32[1], 0),
1119         OFFLOAD(SIPV6_63_32,  4, ip6.saddr.s6_addr32[2], 0),
1120         OFFLOAD(SIPV6_31_0,   4, ip6.saddr.s6_addr32[3], 0),
1121         OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1122         OFFLOAD(DIPV6_95_64,  4, ip6.daddr.s6_addr32[1], 0),
1123         OFFLOAD(DIPV6_63_32,  4, ip6.daddr.s6_addr32[2], 0),
1124         OFFLOAD(DIPV6_31_0,   4, ip6.daddr.s6_addr32[3], 0),
1125         OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1126
1127         OFFLOAD(TCP_SPORT, 2, tcp.source,  0),
1128         OFFLOAD(TCP_DPORT, 2, tcp.dest,    0),
1129         OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1130
1131         OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1132         OFFLOAD(UDP_DPORT, 2, udp.dest,   0),
1133 };
1134
1135 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1136  * max from the SW pedit action. On success, it says how many HW actions were
1137  * actually parsed.
1138  */
1139 static int offload_pedit_fields(struct pedit_headers *masks,
1140                                 struct pedit_headers *vals,
1141                                 struct mlx5e_tc_flow_parse_attr *parse_attr)
1142 {
1143         struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1144         int i, action_size, nactions, max_actions, first, last, next_z;
1145         void *s_masks_p, *a_masks_p, *vals_p;
1146         struct mlx5_fields *f;
1147         u8 cmd, field_bsize;
1148         u32 s_mask, a_mask;
1149         unsigned long mask;
1150         __be32 mask_be32;
1151         __be16 mask_be16;
1152         void *action;
1153
1154         set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1155         add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1156         set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1157         add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1158
1159         action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1160         action = parse_attr->mod_hdr_actions;
1161         max_actions = parse_attr->num_mod_hdr_actions;
1162         nactions = 0;
1163
1164         for (i = 0; i < ARRAY_SIZE(fields); i++) {
1165                 f = &fields[i];
1166                 /* avoid seeing bits set from previous iterations */
1167                 s_mask = 0;
1168                 a_mask = 0;
1169
1170                 s_masks_p = (void *)set_masks + f->offset;
1171                 a_masks_p = (void *)add_masks + f->offset;
1172
1173                 memcpy(&s_mask, s_masks_p, f->size);
1174                 memcpy(&a_mask, a_masks_p, f->size);
1175
1176                 if (!s_mask && !a_mask) /* nothing to offload here */
1177                         continue;
1178
1179                 if (s_mask && a_mask) {
1180                         printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1181                         return -EOPNOTSUPP;
1182                 }
1183
1184                 if (nactions == max_actions) {
1185                         printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1186                         return -EOPNOTSUPP;
1187                 }
1188
1189                 if (s_mask) {
1190                         cmd  = MLX5_ACTION_TYPE_SET;
1191                         mask = s_mask;
1192                         vals_p = (void *)set_vals + f->offset;
1193                         /* clear to denote we consumed this field */
1194                         memset(s_masks_p, 0, f->size);
1195                 } else {
1196                         cmd  = MLX5_ACTION_TYPE_ADD;
1197                         mask = a_mask;
1198                         vals_p = (void *)add_vals + f->offset;
1199                         /* clear to denote we consumed this field */
1200                         memset(a_masks_p, 0, f->size);
1201                 }
1202
1203                 field_bsize = f->size * BITS_PER_BYTE;
1204
1205                 if (field_bsize == 32) {
1206                         mask_be32 = *(__be32 *)&mask;
1207                         mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1208                 } else if (field_bsize == 16) {
1209                         mask_be16 = *(__be16 *)&mask;
1210                         mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1211                 }
1212
1213                 first = find_first_bit(&mask, field_bsize);
1214                 next_z = find_next_zero_bit(&mask, field_bsize, first);
1215                 last  = find_last_bit(&mask, field_bsize);
1216                 if (first < next_z && next_z < last) {
1217                         printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
1218                                mask);
1219                         return -EOPNOTSUPP;
1220                 }
1221
1222                 MLX5_SET(set_action_in, action, action_type, cmd);
1223                 MLX5_SET(set_action_in, action, field, f->field);
1224
1225                 if (cmd == MLX5_ACTION_TYPE_SET) {
1226                         MLX5_SET(set_action_in, action, offset, first);
1227                         /* length is num of bits to be written, zero means length of 32 */
1228                         MLX5_SET(set_action_in, action, length, (last - first + 1));
1229                 }
1230
1231                 if (field_bsize == 32)
1232                         MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
1233                 else if (field_bsize == 16)
1234                         MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
1235                 else if (field_bsize == 8)
1236                         MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
1237
1238                 action += action_size;
1239                 nactions++;
1240         }
1241
1242         parse_attr->num_mod_hdr_actions = nactions;
1243         return 0;
1244 }
1245
1246 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1247                                  const struct tc_action *a, int namespace,
1248                                  struct mlx5e_tc_flow_parse_attr *parse_attr)
1249 {
1250         int nkeys, action_size, max_actions;
1251
1252         nkeys = tcf_pedit_nkeys(a);
1253         action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1254
1255         if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1256                 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1257         else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1258                 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1259
1260         /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1261         max_actions = min(max_actions, nkeys * 16);
1262
1263         parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1264         if (!parse_attr->mod_hdr_actions)
1265                 return -ENOMEM;
1266
1267         parse_attr->num_mod_hdr_actions = max_actions;
1268         return 0;
1269 }
1270
1271 static const struct pedit_headers zero_masks = {};
1272
1273 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1274                                  const struct tc_action *a, int namespace,
1275                                  struct mlx5e_tc_flow_parse_attr *parse_attr)
1276 {
1277         struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1278         int nkeys, i, err = -EOPNOTSUPP;
1279         u32 mask, val, offset;
1280         u8 cmd, htype;
1281
1282         nkeys = tcf_pedit_nkeys(a);
1283
1284         memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1285         memset(vals,  0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1286
1287         for (i = 0; i < nkeys; i++) {
1288                 htype = tcf_pedit_htype(a, i);
1289                 cmd = tcf_pedit_cmd(a, i);
1290                 err = -EOPNOTSUPP; /* can't be all optimistic */
1291
1292                 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1293                         printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1294                         goto out_err;
1295                 }
1296
1297                 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1298                         printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1299                         goto out_err;
1300                 }
1301
1302                 mask = tcf_pedit_mask(a, i);
1303                 val = tcf_pedit_val(a, i);
1304                 offset = tcf_pedit_offset(a, i);
1305
1306                 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1307                 if (err)
1308                         goto out_err;
1309         }
1310
1311         err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1312         if (err)
1313                 goto out_err;
1314
1315         err = offload_pedit_fields(masks, vals, parse_attr);
1316         if (err < 0)
1317                 goto out_dealloc_parsed_actions;
1318
1319         for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1320                 cmd_masks = &masks[cmd];
1321                 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1322                         printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1323                                cmd);
1324                         print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1325                                        16, 1, cmd_masks, sizeof(zero_masks), true);
1326                         err = -EOPNOTSUPP;
1327                         goto out_dealloc_parsed_actions;
1328                 }
1329         }
1330
1331         return 0;
1332
1333 out_dealloc_parsed_actions:
1334         kfree(parse_attr->mod_hdr_actions);
1335 out_err:
1336         return err;
1337 }
1338
1339 static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1340 {
1341         u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1342                          TCA_CSUM_UPDATE_FLAG_UDP;
1343
1344         /*  The HW recalcs checksums only if re-writing headers */
1345         if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1346                 netdev_warn(priv->netdev,
1347                             "TC csum action is only offloaded with pedit\n");
1348                 return false;
1349         }
1350
1351         if (update_flags & ~prot_flags) {
1352                 netdev_warn(priv->netdev,
1353                             "can't offload TC csum action for some header/s - flags %#x\n",
1354                             update_flags);
1355                 return false;
1356         }
1357
1358         return true;
1359 }
1360
1361 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1362                                           struct tcf_exts *exts)
1363 {
1364         const struct tc_action *a;
1365         bool modify_ip_header;
1366         LIST_HEAD(actions);
1367         u8 htype, ip_proto;
1368         void *headers_v;
1369         u16 ethertype;
1370         int nkeys, i;
1371
1372         headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1373         ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1374
1375         /* for non-IP we only re-write MACs, so we're okay */
1376         if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
1377                 goto out_ok;
1378
1379         modify_ip_header = false;
1380         tcf_exts_to_list(exts, &actions);
1381         list_for_each_entry(a, &actions, list) {
1382                 if (!is_tcf_pedit(a))
1383                         continue;
1384
1385                 nkeys = tcf_pedit_nkeys(a);
1386                 for (i = 0; i < nkeys; i++) {
1387                         htype = tcf_pedit_htype(a, i);
1388                         if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1389                             htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1390                                 modify_ip_header = true;
1391                                 break;
1392                         }
1393                 }
1394         }
1395
1396         ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1397         if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
1398                 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
1399                 return false;
1400         }
1401
1402 out_ok:
1403         return true;
1404 }
1405
1406 static bool actions_match_supported(struct mlx5e_priv *priv,
1407                                     struct tcf_exts *exts,
1408                                     struct mlx5e_tc_flow_parse_attr *parse_attr,
1409                                     struct mlx5e_tc_flow *flow)
1410 {
1411         u32 actions;
1412
1413         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1414                 actions = flow->esw_attr->action;
1415         else
1416                 actions = flow->nic_attr->action;
1417
1418         if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1419                 return modify_header_match_supported(&parse_attr->spec, exts);
1420
1421         return true;
1422 }
1423
1424 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1425                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
1426                                 struct mlx5e_tc_flow *flow)
1427 {
1428         struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1429         const struct tc_action *a;
1430         LIST_HEAD(actions);
1431         int err;
1432
1433         if (!tcf_exts_has_actions(exts))
1434                 return -EINVAL;
1435
1436         attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1437         attr->action = 0;
1438
1439         tcf_exts_to_list(exts, &actions);
1440         list_for_each_entry(a, &actions, list) {
1441                 if (is_tcf_gact_shot(a)) {
1442                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1443                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
1444                                                flow_table_properties_nic_receive.flow_counter))
1445                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1446                         continue;
1447                 }
1448
1449                 if (is_tcf_pedit(a)) {
1450                         err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1451                                                     parse_attr);
1452                         if (err)
1453                                 return err;
1454
1455                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1456                                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1457                         continue;
1458                 }
1459
1460                 if (is_tcf_csum(a)) {
1461                         if (csum_offload_supported(priv, attr->action,
1462                                                    tcf_csum_update_flags(a)))
1463                                 continue;
1464
1465                         return -EOPNOTSUPP;
1466                 }
1467
1468                 if (is_tcf_skbedit_mark(a)) {
1469                         u32 mark = tcf_skbedit_mark(a);
1470
1471                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1472                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1473                                             mark);
1474                                 return -EINVAL;
1475                         }
1476
1477                         attr->flow_tag = mark;
1478                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1479                         continue;
1480                 }
1481
1482                 return -EINVAL;
1483         }
1484
1485         if (!actions_match_supported(priv, exts, parse_attr, flow))
1486                 return -EOPNOTSUPP;
1487
1488         return 0;
1489 }
1490
1491 static inline int cmp_encap_info(struct ip_tunnel_key *a,
1492                                  struct ip_tunnel_key *b)
1493 {
1494         return memcmp(a, b, sizeof(*a));
1495 }
1496
1497 static inline int hash_encap_info(struct ip_tunnel_key *key)
1498 {
1499         return jhash(key, sizeof(*key), 0);
1500 }
1501
1502 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1503                                    struct net_device *mirred_dev,
1504                                    struct net_device **out_dev,
1505                                    struct flowi4 *fl4,
1506                                    struct neighbour **out_n,
1507                                    int *out_ttl)
1508 {
1509         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1510         struct rtable *rt;
1511         struct neighbour *n = NULL;
1512
1513 #if IS_ENABLED(CONFIG_INET)
1514         int ret;
1515
1516         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
1517         ret = PTR_ERR_OR_ZERO(rt);
1518         if (ret)
1519                 return ret;
1520 #else
1521         return -EOPNOTSUPP;
1522 #endif
1523         /* if the egress device isn't on the same HW e-switch, we use the uplink */
1524         if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1525                 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1526         else
1527                 *out_dev = rt->dst.dev;
1528
1529         *out_ttl = ip4_dst_hoplimit(&rt->dst);
1530         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1531         ip_rt_put(rt);
1532         if (!n)
1533                 return -ENOMEM;
1534
1535         *out_n = n;
1536         return 0;
1537 }
1538
1539 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1540                                    struct net_device *mirred_dev,
1541                                    struct net_device **out_dev,
1542                                    struct flowi6 *fl6,
1543                                    struct neighbour **out_n,
1544                                    int *out_ttl)
1545 {
1546         struct neighbour *n = NULL;
1547         struct dst_entry *dst;
1548
1549 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1550         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1551         int ret;
1552
1553         ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
1554                                          fl6);
1555         if (ret < 0)
1556                 return ret;
1557
1558         *out_ttl = ip6_dst_hoplimit(dst);
1559
1560         /* if the egress device isn't on the same HW e-switch, we use the uplink */
1561         if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1562                 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1563         else
1564                 *out_dev = dst->dev;
1565 #else
1566         return -EOPNOTSUPP;
1567 #endif
1568
1569         n = dst_neigh_lookup(dst, &fl6->daddr);
1570         dst_release(dst);
1571         if (!n)
1572                 return -ENOMEM;
1573
1574         *out_n = n;
1575         return 0;
1576 }
1577
1578 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1579                                   char buf[], int encap_size,
1580                                   unsigned char h_dest[ETH_ALEN],
1581                                   int ttl,
1582                                   __be32 daddr,
1583                                   __be32 saddr,
1584                                   __be16 udp_dst_port,
1585                                   __be32 vx_vni)
1586 {
1587         struct ethhdr *eth = (struct ethhdr *)buf;
1588         struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1589         struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1590         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1591
1592         memset(buf, 0, encap_size);
1593
1594         ether_addr_copy(eth->h_dest, h_dest);
1595         ether_addr_copy(eth->h_source, out_dev->dev_addr);
1596         eth->h_proto = htons(ETH_P_IP);
1597
1598         ip->daddr = daddr;
1599         ip->saddr = saddr;
1600
1601         ip->ttl = ttl;
1602         ip->protocol = IPPROTO_UDP;
1603         ip->version = 0x4;
1604         ip->ihl = 0x5;
1605
1606         udp->dest = udp_dst_port;
1607         vxh->vx_flags = VXLAN_HF_VNI;
1608         vxh->vx_vni = vxlan_vni_field(vx_vni);
1609 }
1610
1611 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1612                                   char buf[], int encap_size,
1613                                   unsigned char h_dest[ETH_ALEN],
1614                                   int ttl,
1615                                   struct in6_addr *daddr,
1616                                   struct in6_addr *saddr,
1617                                   __be16 udp_dst_port,
1618                                   __be32 vx_vni)
1619 {
1620         struct ethhdr *eth = (struct ethhdr *)buf;
1621         struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1622         struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1623         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1624
1625         memset(buf, 0, encap_size);
1626
1627         ether_addr_copy(eth->h_dest, h_dest);
1628         ether_addr_copy(eth->h_source, out_dev->dev_addr);
1629         eth->h_proto = htons(ETH_P_IPV6);
1630
1631         ip6_flow_hdr(ip6h, 0, 0);
1632         /* the HW fills up ipv6 payload len */
1633         ip6h->nexthdr     = IPPROTO_UDP;
1634         ip6h->hop_limit   = ttl;
1635         ip6h->daddr       = *daddr;
1636         ip6h->saddr       = *saddr;
1637
1638         udp->dest = udp_dst_port;
1639         vxh->vx_flags = VXLAN_HF_VNI;
1640         vxh->vx_vni = vxlan_vni_field(vx_vni);
1641 }
1642
1643 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1644                                           struct net_device *mirred_dev,
1645                                           struct mlx5e_encap_entry *e)
1646 {
1647         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1648         int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
1649         struct ip_tunnel_key *tun_key = &e->tun_info.key;
1650         struct net_device *out_dev;
1651         struct neighbour *n = NULL;
1652         struct flowi4 fl4 = {};
1653         char *encap_header;
1654         int ttl, err;
1655         u8 nud_state;
1656
1657         if (max_encap_size < ipv4_encap_size) {
1658                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1659                                ipv4_encap_size, max_encap_size);
1660                 return -EOPNOTSUPP;
1661         }
1662
1663         encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
1664         if (!encap_header)
1665                 return -ENOMEM;
1666
1667         switch (e->tunnel_type) {
1668         case MLX5_HEADER_TYPE_VXLAN:
1669                 fl4.flowi4_proto = IPPROTO_UDP;
1670                 fl4.fl4_dport = tun_key->tp_dst;
1671                 break;
1672         default:
1673                 err = -EOPNOTSUPP;
1674                 goto free_encap;
1675         }
1676         fl4.flowi4_tos = tun_key->tos;
1677         fl4.daddr = tun_key->u.ipv4.dst;
1678         fl4.saddr = tun_key->u.ipv4.src;
1679
1680         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
1681                                       &fl4, &n, &ttl);
1682         if (err)
1683                 goto free_encap;
1684
1685         /* used by mlx5e_detach_encap to lookup a neigh hash table
1686          * entry in the neigh hash table when a user deletes a rule
1687          */
1688         e->m_neigh.dev = n->dev;
1689         e->m_neigh.family = n->ops->family;
1690         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1691         e->out_dev = out_dev;
1692
1693         /* It's importent to add the neigh to the hash table before checking
1694          * the neigh validity state. So if we'll get a notification, in case the
1695          * neigh changes it's validity state, we would find the relevant neigh
1696          * in the hash.
1697          */
1698         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1699         if (err)
1700                 goto free_encap;
1701
1702         read_lock_bh(&n->lock);
1703         nud_state = n->nud_state;
1704         ether_addr_copy(e->h_dest, n->ha);
1705         read_unlock_bh(&n->lock);
1706
1707         switch (e->tunnel_type) {
1708         case MLX5_HEADER_TYPE_VXLAN:
1709                 gen_vxlan_header_ipv4(out_dev, encap_header,
1710                                       ipv4_encap_size, e->h_dest, ttl,
1711                                       fl4.daddr,
1712                                       fl4.saddr, tun_key->tp_dst,
1713                                       tunnel_id_to_key32(tun_key->tun_id));
1714                 break;
1715         default:
1716                 err = -EOPNOTSUPP;
1717                 goto destroy_neigh_entry;
1718         }
1719         e->encap_size = ipv4_encap_size;
1720         e->encap_header = encap_header;
1721
1722         if (!(nud_state & NUD_VALID)) {
1723                 neigh_event_send(n, NULL);
1724                 err = -EAGAIN;
1725                 goto out;
1726         }
1727
1728         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1729                                ipv4_encap_size, encap_header, &e->encap_id);
1730         if (err)
1731                 goto destroy_neigh_entry;
1732
1733         e->flags |= MLX5_ENCAP_ENTRY_VALID;
1734         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1735         neigh_release(n);
1736         return err;
1737
1738 destroy_neigh_entry:
1739         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1740 free_encap:
1741         kfree(encap_header);
1742 out:
1743         if (n)
1744                 neigh_release(n);
1745         return err;
1746 }
1747
1748 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1749                                           struct net_device *mirred_dev,
1750                                           struct mlx5e_encap_entry *e)
1751 {
1752         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1753         int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
1754         struct ip_tunnel_key *tun_key = &e->tun_info.key;
1755         struct net_device *out_dev;
1756         struct neighbour *n = NULL;
1757         struct flowi6 fl6 = {};
1758         char *encap_header;
1759         int err, ttl = 0;
1760         u8 nud_state;
1761
1762         if (max_encap_size < ipv6_encap_size) {
1763                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1764                                ipv6_encap_size, max_encap_size);
1765                 return -EOPNOTSUPP;
1766         }
1767
1768         encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
1769         if (!encap_header)
1770                 return -ENOMEM;
1771
1772         switch (e->tunnel_type) {
1773         case MLX5_HEADER_TYPE_VXLAN:
1774                 fl6.flowi6_proto = IPPROTO_UDP;
1775                 fl6.fl6_dport = tun_key->tp_dst;
1776                 break;
1777         default:
1778                 err = -EOPNOTSUPP;
1779                 goto free_encap;
1780         }
1781
1782         fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1783         fl6.daddr = tun_key->u.ipv6.dst;
1784         fl6.saddr = tun_key->u.ipv6.src;
1785
1786         err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
1787                                       &fl6, &n, &ttl);
1788         if (err)
1789                 goto free_encap;
1790
1791         /* used by mlx5e_detach_encap to lookup a neigh hash table
1792          * entry in the neigh hash table when a user deletes a rule
1793          */
1794         e->m_neigh.dev = n->dev;
1795         e->m_neigh.family = n->ops->family;
1796         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1797         e->out_dev = out_dev;
1798
1799         /* It's importent to add the neigh to the hash table before checking
1800          * the neigh validity state. So if we'll get a notification, in case the
1801          * neigh changes it's validity state, we would find the relevant neigh
1802          * in the hash.
1803          */
1804         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1805         if (err)
1806                 goto free_encap;
1807
1808         read_lock_bh(&n->lock);
1809         nud_state = n->nud_state;
1810         ether_addr_copy(e->h_dest, n->ha);
1811         read_unlock_bh(&n->lock);
1812
1813         switch (e->tunnel_type) {
1814         case MLX5_HEADER_TYPE_VXLAN:
1815                 gen_vxlan_header_ipv6(out_dev, encap_header,
1816                                       ipv6_encap_size, e->h_dest, ttl,
1817                                       &fl6.daddr,
1818                                       &fl6.saddr, tun_key->tp_dst,
1819                                       tunnel_id_to_key32(tun_key->tun_id));
1820                 break;
1821         default:
1822                 err = -EOPNOTSUPP;
1823                 goto destroy_neigh_entry;
1824         }
1825
1826         e->encap_size = ipv6_encap_size;
1827         e->encap_header = encap_header;
1828
1829         if (!(nud_state & NUD_VALID)) {
1830                 neigh_event_send(n, NULL);
1831                 err = -EAGAIN;
1832                 goto out;
1833         }
1834
1835         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1836                                ipv6_encap_size, encap_header, &e->encap_id);
1837         if (err)
1838                 goto destroy_neigh_entry;
1839
1840         e->flags |= MLX5_ENCAP_ENTRY_VALID;
1841         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1842         neigh_release(n);
1843         return err;
1844
1845 destroy_neigh_entry:
1846         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1847 free_encap:
1848         kfree(encap_header);
1849 out:
1850         if (n)
1851                 neigh_release(n);
1852         return err;
1853 }
1854
1855 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1856                               struct ip_tunnel_info *tun_info,
1857                               struct net_device *mirred_dev,
1858                               struct net_device **encap_dev,
1859                               struct mlx5e_tc_flow *flow)
1860 {
1861         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1862         struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1863         unsigned short family = ip_tunnel_info_af(tun_info);
1864         struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1865         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1866         struct ip_tunnel_key *key = &tun_info->key;
1867         struct mlx5e_encap_entry *e;
1868         int tunnel_type, err = 0;
1869         uintptr_t hash_key;
1870         bool found = false;
1871
1872         /* udp dst port must be set */
1873         if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
1874                 goto vxlan_encap_offload_err;
1875
1876         /* setting udp src port isn't supported */
1877         if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1878 vxlan_encap_offload_err:
1879                 netdev_warn(priv->netdev,
1880                             "must set udp dst port and not set udp src port\n");
1881                 return -EOPNOTSUPP;
1882         }
1883
1884         if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
1885             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
1886                 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1887         } else {
1888                 netdev_warn(priv->netdev,
1889                             "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
1890                 return -EOPNOTSUPP;
1891         }
1892
1893         hash_key = hash_encap_info(key);
1894
1895         hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1896                                    encap_hlist, hash_key) {
1897                 if (!cmp_encap_info(&e->tun_info.key, key)) {
1898                         found = true;
1899                         break;
1900                 }
1901         }
1902
1903         /* must verify if encap is valid or not */
1904         if (found)
1905                 goto attach_flow;
1906
1907         e = kzalloc(sizeof(*e), GFP_KERNEL);
1908         if (!e)
1909                 return -ENOMEM;
1910
1911         e->tun_info = *tun_info;
1912         e->tunnel_type = tunnel_type;
1913         INIT_LIST_HEAD(&e->flows);
1914
1915         if (family == AF_INET)
1916                 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
1917         else if (family == AF_INET6)
1918                 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
1919
1920         if (err && err != -EAGAIN)
1921                 goto out_err;
1922
1923         hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1924
1925 attach_flow:
1926         list_add(&flow->encap, &e->flows);
1927         *encap_dev = e->out_dev;
1928         if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1929                 attr->encap_id = e->encap_id;
1930         else
1931                 err = -EAGAIN;
1932
1933         return err;
1934
1935 out_err:
1936         kfree(e);
1937         return err;
1938 }
1939
1940 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1941                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
1942                                 struct mlx5e_tc_flow *flow)
1943 {
1944         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1945         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1946         struct ip_tunnel_info *info = NULL;
1947         const struct tc_action *a;
1948         LIST_HEAD(actions);
1949         bool encap = false;
1950         int err = 0;
1951
1952         if (!tcf_exts_has_actions(exts))
1953                 return -EINVAL;
1954
1955         memset(attr, 0, sizeof(*attr));
1956         attr->in_rep = rpriv->rep;
1957
1958         tcf_exts_to_list(exts, &actions);
1959         list_for_each_entry(a, &actions, list) {
1960                 if (is_tcf_gact_shot(a)) {
1961                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1962                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
1963                         continue;
1964                 }
1965
1966                 if (is_tcf_pedit(a)) {
1967                         err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1968                                                     parse_attr);
1969                         if (err)
1970                                 return err;
1971
1972                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1973                         continue;
1974                 }
1975
1976                 if (is_tcf_csum(a)) {
1977                         if (csum_offload_supported(priv, attr->action,
1978                                                    tcf_csum_update_flags(a)))
1979                                 continue;
1980
1981                         return -EOPNOTSUPP;
1982                 }
1983
1984                 if (is_tcf_mirred_egress_redirect(a)) {
1985                         int ifindex = tcf_mirred_ifindex(a);
1986                         struct net_device *out_dev;
1987                         struct mlx5e_priv *out_priv;
1988
1989                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1990
1991                         if (switchdev_port_same_parent_id(priv->netdev,
1992                                                           out_dev)) {
1993                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1994                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
1995                                 out_priv = netdev_priv(out_dev);
1996                                 rpriv = out_priv->ppriv;
1997                                 attr->out_rep = rpriv->rep;
1998                         } else if (encap) {
1999                                 parse_attr->mirred_ifindex = ifindex;
2000                                 parse_attr->tun_info = *info;
2001                                 attr->parse_attr = parse_attr;
2002                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
2003                                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2004                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
2005                                 /* attr->out_rep is resolved when we handle encap */
2006                         } else {
2007                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2008                                        priv->netdev->name, out_dev->name);
2009                                 return -EINVAL;
2010                         }
2011                         continue;
2012                 }
2013
2014                 if (is_tcf_tunnel_set(a)) {
2015                         info = tcf_tunnel_info(a);
2016                         if (info)
2017                                 encap = true;
2018                         else
2019                                 return -EOPNOTSUPP;
2020                         continue;
2021                 }
2022
2023                 if (is_tcf_vlan(a)) {
2024                         if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2025                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2026                         } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2027                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
2028                                         return -EOPNOTSUPP;
2029
2030                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2031                                 attr->vlan = tcf_vlan_push_vid(a);
2032                         } else { /* action is TCA_VLAN_ACT_MODIFY */
2033                                 return -EOPNOTSUPP;
2034                         }
2035                         continue;
2036                 }
2037
2038                 if (is_tcf_tunnel_release(a)) {
2039                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2040                         continue;
2041                 }
2042
2043                 return -EINVAL;
2044         }
2045
2046         if (!actions_match_supported(priv, exts, parse_attr, flow))
2047                 return -EOPNOTSUPP;
2048
2049         return err;
2050 }
2051
2052 int mlx5e_configure_flower(struct mlx5e_priv *priv,
2053                            struct tc_cls_flower_offload *f)
2054 {
2055         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2056         struct mlx5e_tc_flow_parse_attr *parse_attr;
2057         struct mlx5e_tc_table *tc = &priv->fs.tc;
2058         struct mlx5e_tc_flow *flow;
2059         int attr_size, err = 0;
2060         u8 flow_flags = 0;
2061
2062         if (esw && esw->mode == SRIOV_OFFLOADS) {
2063                 flow_flags = MLX5E_TC_FLOW_ESWITCH;
2064                 attr_size  = sizeof(struct mlx5_esw_flow_attr);
2065         } else {
2066                 flow_flags = MLX5E_TC_FLOW_NIC;
2067                 attr_size  = sizeof(struct mlx5_nic_flow_attr);
2068         }
2069
2070         flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2071         parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2072         if (!parse_attr || !flow) {
2073                 err = -ENOMEM;
2074                 goto err_free;
2075         }
2076
2077         flow->cookie = f->cookie;
2078         flow->flags = flow_flags;
2079
2080         err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
2081         if (err < 0)
2082                 goto err_free;
2083
2084         if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
2085                 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
2086                 if (err < 0)
2087                         goto err_free;
2088                 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
2089         } else {
2090                 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
2091                 if (err < 0)
2092                         goto err_free;
2093                 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
2094         }
2095
2096         if (IS_ERR(flow->rule)) {
2097                 err = PTR_ERR(flow->rule);
2098                 if (err != -EAGAIN)
2099                         goto err_free;
2100         }
2101
2102         if (err != -EAGAIN)
2103                 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2104
2105         err = rhashtable_insert_fast(&tc->ht, &flow->node,
2106                                      tc->ht_params);
2107         if (err)
2108                 goto err_del_rule;
2109
2110         if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
2111             !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
2112                 kvfree(parse_attr);
2113         return err;
2114
2115 err_del_rule:
2116         mlx5e_tc_del_flow(priv, flow);
2117
2118 err_free:
2119         kvfree(parse_attr);
2120         kfree(flow);
2121         return err;
2122 }
2123
2124 int mlx5e_delete_flower(struct mlx5e_priv *priv,
2125                         struct tc_cls_flower_offload *f)
2126 {
2127         struct mlx5e_tc_flow *flow;
2128         struct mlx5e_tc_table *tc = &priv->fs.tc;
2129
2130         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2131                                       tc->ht_params);
2132         if (!flow)
2133                 return -EINVAL;
2134
2135         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
2136
2137         mlx5e_tc_del_flow(priv, flow);
2138
2139         kfree(flow);
2140
2141         return 0;
2142 }
2143
2144 int mlx5e_stats_flower(struct mlx5e_priv *priv,
2145                        struct tc_cls_flower_offload *f)
2146 {
2147         struct mlx5e_tc_table *tc = &priv->fs.tc;
2148         struct mlx5e_tc_flow *flow;
2149         struct mlx5_fc *counter;
2150         u64 bytes;
2151         u64 packets;
2152         u64 lastuse;
2153
2154         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2155                                       tc->ht_params);
2156         if (!flow)
2157                 return -EINVAL;
2158
2159         if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
2160                 return 0;
2161
2162         counter = mlx5_flow_rule_counter(flow->rule);
2163         if (!counter)
2164                 return 0;
2165
2166         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
2167
2168         tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
2169
2170         return 0;
2171 }
2172
2173 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
2174         .head_offset = offsetof(struct mlx5e_tc_flow, node),
2175         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2176         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2177         .automatic_shrinking = true,
2178 };
2179
2180 int mlx5e_tc_init(struct mlx5e_priv *priv)
2181 {
2182         struct mlx5e_tc_table *tc = &priv->fs.tc;
2183
2184         hash_init(tc->mod_hdr_tbl);
2185
2186         tc->ht_params = mlx5e_tc_flow_ht_params;
2187         return rhashtable_init(&tc->ht, &tc->ht_params);
2188 }
2189
2190 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
2191 {
2192         struct mlx5e_tc_flow *flow = ptr;
2193         struct mlx5e_priv *priv = arg;
2194
2195         mlx5e_tc_del_flow(priv, flow);
2196         kfree(flow);
2197 }
2198
2199 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
2200 {
2201         struct mlx5e_tc_table *tc = &priv->fs.tc;
2202
2203         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
2204
2205         if (!IS_ERR_OR_NULL(tc->t)) {
2206                 mlx5_destroy_flow_table(tc->t);
2207                 tc->t = NULL;
2208         }
2209 }