net/mlx5e: Use vhca id as the hairpin peer identifier
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
48 #include <net/arp.h>
49 #include "en.h"
50 #include "en_rep.h"
51 #include "en_tc.h"
52 #include "eswitch.h"
53 #include "vxlan.h"
54
55 struct mlx5_nic_flow_attr {
56         u32 action;
57         u32 flow_tag;
58         u32 mod_hdr_id;
59         u32 hairpin_tirn;
60 };
61
62 enum {
63         MLX5E_TC_FLOW_ESWITCH   = BIT(0),
64         MLX5E_TC_FLOW_NIC       = BIT(1),
65         MLX5E_TC_FLOW_OFFLOADED = BIT(2),
66         MLX5E_TC_FLOW_HAIRPIN   = BIT(3),
67 };
68
69 struct mlx5e_tc_flow {
70         struct rhash_head       node;
71         u64                     cookie;
72         u8                      flags;
73         struct mlx5_flow_handle *rule;
74         struct list_head        encap;   /* flows sharing the same encap ID */
75         struct list_head        mod_hdr; /* flows sharing the same mod hdr ID */
76         struct list_head        hairpin; /* flows sharing the same hairpin */
77         union {
78                 struct mlx5_esw_flow_attr esw_attr[0];
79                 struct mlx5_nic_flow_attr nic_attr[0];
80         };
81 };
82
83 struct mlx5e_tc_flow_parse_attr {
84         struct ip_tunnel_info tun_info;
85         struct mlx5_flow_spec spec;
86         int num_mod_hdr_actions;
87         void *mod_hdr_actions;
88         int mirred_ifindex;
89 };
90
91 enum {
92         MLX5_HEADER_TYPE_VXLAN = 0x0,
93         MLX5_HEADER_TYPE_NVGRE = 0x1,
94 };
95
96 #define MLX5E_TC_TABLE_NUM_GROUPS 4
97 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
98
99 struct mlx5e_hairpin {
100         struct mlx5_hairpin *pair;
101
102         struct mlx5_core_dev *func_mdev;
103         u32 tdn;
104         u32 tirn;
105 };
106
107 struct mlx5e_hairpin_entry {
108         /* a node of a hash table which keeps all the  hairpin entries */
109         struct hlist_node hairpin_hlist;
110
111         /* flows sharing the same hairpin */
112         struct list_head flows;
113
114         u16 peer_vhca_id;
115         struct mlx5e_hairpin *hp;
116 };
117
118 struct mod_hdr_key {
119         int num_actions;
120         void *actions;
121 };
122
123 struct mlx5e_mod_hdr_entry {
124         /* a node of a hash table which keeps all the mod_hdr entries */
125         struct hlist_node mod_hdr_hlist;
126
127         /* flows sharing the same mod_hdr entry */
128         struct list_head flows;
129
130         struct mod_hdr_key key;
131
132         u32 mod_hdr_id;
133 };
134
135 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
136
137 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
138 {
139         return jhash(key->actions,
140                      key->num_actions * MLX5_MH_ACT_SZ, 0);
141 }
142
143 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
144                                    struct mod_hdr_key *b)
145 {
146         if (a->num_actions != b->num_actions)
147                 return 1;
148
149         return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
150 }
151
152 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
153                                 struct mlx5e_tc_flow *flow,
154                                 struct mlx5e_tc_flow_parse_attr *parse_attr)
155 {
156         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
157         int num_actions, actions_size, namespace, err;
158         struct mlx5e_mod_hdr_entry *mh;
159         struct mod_hdr_key key;
160         bool found = false;
161         u32 hash_key;
162
163         num_actions  = parse_attr->num_mod_hdr_actions;
164         actions_size = MLX5_MH_ACT_SZ * num_actions;
165
166         key.actions = parse_attr->mod_hdr_actions;
167         key.num_actions = num_actions;
168
169         hash_key = hash_mod_hdr_info(&key);
170
171         if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
172                 namespace = MLX5_FLOW_NAMESPACE_FDB;
173                 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
174                                        mod_hdr_hlist, hash_key) {
175                         if (!cmp_mod_hdr_info(&mh->key, &key)) {
176                                 found = true;
177                                 break;
178                         }
179                 }
180         } else {
181                 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
182                 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
183                                        mod_hdr_hlist, hash_key) {
184                         if (!cmp_mod_hdr_info(&mh->key, &key)) {
185                                 found = true;
186                                 break;
187                         }
188                 }
189         }
190
191         if (found)
192                 goto attach_flow;
193
194         mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
195         if (!mh)
196                 return -ENOMEM;
197
198         mh->key.actions = (void *)mh + sizeof(*mh);
199         memcpy(mh->key.actions, key.actions, actions_size);
200         mh->key.num_actions = num_actions;
201         INIT_LIST_HEAD(&mh->flows);
202
203         err = mlx5_modify_header_alloc(priv->mdev, namespace,
204                                        mh->key.num_actions,
205                                        mh->key.actions,
206                                        &mh->mod_hdr_id);
207         if (err)
208                 goto out_err;
209
210         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
211                 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
212         else
213                 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
214
215 attach_flow:
216         list_add(&flow->mod_hdr, &mh->flows);
217         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
218                 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
219         else
220                 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
221
222         return 0;
223
224 out_err:
225         kfree(mh);
226         return err;
227 }
228
229 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
230                                  struct mlx5e_tc_flow *flow)
231 {
232         struct list_head *next = flow->mod_hdr.next;
233
234         list_del(&flow->mod_hdr);
235
236         if (list_empty(next)) {
237                 struct mlx5e_mod_hdr_entry *mh;
238
239                 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
240
241                 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
242                 hash_del(&mh->mod_hdr_hlist);
243                 kfree(mh);
244         }
245 }
246
247 static
248 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
249 {
250         struct net_device *netdev;
251         struct mlx5e_priv *priv;
252
253         netdev = __dev_get_by_index(net, ifindex);
254         priv = netdev_priv(netdev);
255         return priv->mdev;
256 }
257
258 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
259 {
260         u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
261         void *tirc;
262         int err;
263
264         err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
265         if (err)
266                 goto alloc_tdn_err;
267
268         tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
269
270         MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
271         MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn);
272         MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
273
274         err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
275         if (err)
276                 goto create_tir_err;
277
278         return 0;
279
280 create_tir_err:
281         mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
282 alloc_tdn_err:
283         return err;
284 }
285
286 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
287 {
288         mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
289         mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
290 }
291
292 static struct mlx5e_hairpin *
293 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
294                      int peer_ifindex)
295 {
296         struct mlx5_core_dev *func_mdev, *peer_mdev;
297         struct mlx5e_hairpin *hp;
298         struct mlx5_hairpin *pair;
299         int err;
300
301         hp = kzalloc(sizeof(*hp), GFP_KERNEL);
302         if (!hp)
303                 return ERR_PTR(-ENOMEM);
304
305         func_mdev = priv->mdev;
306         peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
307
308         pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
309         if (IS_ERR(pair)) {
310                 err = PTR_ERR(pair);
311                 goto create_pair_err;
312         }
313         hp->pair = pair;
314         hp->func_mdev = func_mdev;
315
316         err = mlx5e_hairpin_create_transport(hp);
317         if (err)
318                 goto create_transport_err;
319
320         return hp;
321
322 create_transport_err:
323         mlx5_core_hairpin_destroy(hp->pair);
324 create_pair_err:
325         kfree(hp);
326         return ERR_PTR(err);
327 }
328
329 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
330 {
331         mlx5e_hairpin_destroy_transport(hp);
332         mlx5_core_hairpin_destroy(hp->pair);
333         kvfree(hp);
334 }
335
336 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
337                                                      u16 peer_vhca_id)
338 {
339         struct mlx5e_hairpin_entry *hpe;
340
341         hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
342                                hairpin_hlist, peer_vhca_id) {
343                 if (hpe->peer_vhca_id == peer_vhca_id)
344                         return hpe;
345         }
346
347         return NULL;
348 }
349
350 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
351                                   struct mlx5e_tc_flow *flow,
352                                   struct mlx5e_tc_flow_parse_attr *parse_attr)
353 {
354         int peer_ifindex = parse_attr->mirred_ifindex;
355         struct mlx5_hairpin_params params;
356         struct mlx5_core_dev *peer_mdev;
357         struct mlx5e_hairpin_entry *hpe;
358         struct mlx5e_hairpin *hp;
359         u16 peer_id;
360         int err;
361
362         peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
363         if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
364                 netdev_warn(priv->netdev, "hairpin is not supported\n");
365                 return -EOPNOTSUPP;
366         }
367
368         peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
369         hpe = mlx5e_hairpin_get(priv, peer_id);
370         if (hpe)
371                 goto attach_flow;
372
373         hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
374         if (!hpe)
375                 return -ENOMEM;
376
377         INIT_LIST_HEAD(&hpe->flows);
378         hpe->peer_vhca_id = peer_id;
379
380         params.log_data_size = 15;
381         params.log_data_size = min_t(u8, params.log_data_size,
382                                      MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
383         params.log_data_size = max_t(u8, params.log_data_size,
384                                      MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
385         params.q_counter = priv->q_counter;
386
387         hp = mlx5e_hairpin_create(priv, &params, peer_ifindex);
388         if (IS_ERR(hp)) {
389                 err = PTR_ERR(hp);
390                 goto create_hairpin_err;
391         }
392
393         netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x log data size %d\n",
394                    hp->tirn, hp->pair->rqn, hp->pair->peer_mdev->priv.name,
395                    hp->pair->sqn, params.log_data_size);
396
397         hpe->hp = hp;
398         hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist, peer_id);
399
400 attach_flow:
401         flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
402         list_add(&flow->hairpin, &hpe->flows);
403         return 0;
404
405 create_hairpin_err:
406         kfree(hpe);
407         return err;
408 }
409
410 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
411                                    struct mlx5e_tc_flow *flow)
412 {
413         struct list_head *next = flow->hairpin.next;
414
415         list_del(&flow->hairpin);
416
417         /* no more hairpin flows for us, release the hairpin pair */
418         if (list_empty(next)) {
419                 struct mlx5e_hairpin_entry *hpe;
420
421                 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
422
423                 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
424                            hpe->hp->pair->peer_mdev->priv.name);
425
426                 mlx5e_hairpin_destroy(hpe->hp);
427                 hash_del(&hpe->hairpin_hlist);
428                 kfree(hpe);
429         }
430 }
431
432 static struct mlx5_flow_handle *
433 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
434                       struct mlx5e_tc_flow_parse_attr *parse_attr,
435                       struct mlx5e_tc_flow *flow)
436 {
437         struct mlx5_nic_flow_attr *attr = flow->nic_attr;
438         struct mlx5_core_dev *dev = priv->mdev;
439         struct mlx5_flow_destination dest[2] = {};
440         struct mlx5_flow_act flow_act = {
441                 .action = attr->action,
442                 .flow_tag = attr->flow_tag,
443                 .encap_id = 0,
444         };
445         struct mlx5_fc *counter = NULL;
446         struct mlx5_flow_handle *rule;
447         bool table_created = false;
448         int err, dest_ix = 0;
449
450         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
451                 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
452                         err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
453                         if (err) {
454                                 rule = ERR_PTR(err);
455                                 goto err_add_hairpin_flow;
456                         }
457                         dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
458                         dest[dest_ix].tir_num = attr->hairpin_tirn;
459                 } else {
460                         dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
461                         dest[dest_ix].ft = priv->fs.vlan.ft.t;
462                 }
463                 dest_ix++;
464         }
465
466         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
467                 counter = mlx5_fc_create(dev, true);
468                 if (IS_ERR(counter)) {
469                         rule = ERR_CAST(counter);
470                         goto err_fc_create;
471                 }
472                 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
473                 dest[dest_ix].counter = counter;
474                 dest_ix++;
475         }
476
477         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
478                 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
479                 flow_act.modify_id = attr->mod_hdr_id;
480                 kfree(parse_attr->mod_hdr_actions);
481                 if (err) {
482                         rule = ERR_PTR(err);
483                         goto err_create_mod_hdr_id;
484                 }
485         }
486
487         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
488                 int tc_grp_size, tc_tbl_size;
489                 u32 max_flow_counter;
490
491                 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
492                                     MLX5_CAP_GEN(dev, max_flow_counter_15_0);
493
494                 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
495
496                 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
497                                     BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
498
499                 priv->fs.tc.t =
500                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
501                                                             MLX5E_TC_PRIO,
502                                                             tc_tbl_size,
503                                                             MLX5E_TC_TABLE_NUM_GROUPS,
504                                                             0, 0);
505                 if (IS_ERR(priv->fs.tc.t)) {
506                         netdev_err(priv->netdev,
507                                    "Failed to create tc offload table\n");
508                         rule = ERR_CAST(priv->fs.tc.t);
509                         goto err_create_ft;
510                 }
511
512                 table_created = true;
513         }
514
515         parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
516         rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
517                                    &flow_act, dest, dest_ix);
518
519         if (IS_ERR(rule))
520                 goto err_add_rule;
521
522         return rule;
523
524 err_add_rule:
525         if (table_created) {
526                 mlx5_destroy_flow_table(priv->fs.tc.t);
527                 priv->fs.tc.t = NULL;
528         }
529 err_create_ft:
530         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
531                 mlx5e_detach_mod_hdr(priv, flow);
532 err_create_mod_hdr_id:
533         mlx5_fc_destroy(dev, counter);
534 err_fc_create:
535         if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
536                 mlx5e_hairpin_flow_del(priv, flow);
537 err_add_hairpin_flow:
538         return rule;
539 }
540
541 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
542                                   struct mlx5e_tc_flow *flow)
543 {
544         struct mlx5_nic_flow_attr *attr = flow->nic_attr;
545         struct mlx5_fc *counter = NULL;
546
547         counter = mlx5_flow_rule_counter(flow->rule);
548         mlx5_del_flow_rules(flow->rule);
549         mlx5_fc_destroy(priv->mdev, counter);
550
551         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
552                 mlx5_destroy_flow_table(priv->fs.tc.t);
553                 priv->fs.tc.t = NULL;
554         }
555
556         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
557                 mlx5e_detach_mod_hdr(priv, flow);
558
559         if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
560                 mlx5e_hairpin_flow_del(priv, flow);
561 }
562
563 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
564                                struct mlx5e_tc_flow *flow);
565
566 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
567                               struct ip_tunnel_info *tun_info,
568                               struct net_device *mirred_dev,
569                               struct net_device **encap_dev,
570                               struct mlx5e_tc_flow *flow);
571
572 static struct mlx5_flow_handle *
573 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
574                       struct mlx5e_tc_flow_parse_attr *parse_attr,
575                       struct mlx5e_tc_flow *flow)
576 {
577         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
578         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
579         struct net_device *out_dev, *encap_dev = NULL;
580         struct mlx5_flow_handle *rule = NULL;
581         struct mlx5e_rep_priv *rpriv;
582         struct mlx5e_priv *out_priv;
583         int err;
584
585         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
586                 out_dev = __dev_get_by_index(dev_net(priv->netdev),
587                                              attr->parse_attr->mirred_ifindex);
588                 err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
589                                          out_dev, &encap_dev, flow);
590                 if (err) {
591                         rule = ERR_PTR(err);
592                         if (err != -EAGAIN)
593                                 goto err_attach_encap;
594                 }
595                 out_priv = netdev_priv(encap_dev);
596                 rpriv = out_priv->ppriv;
597                 attr->out_rep = rpriv->rep;
598         }
599
600         err = mlx5_eswitch_add_vlan_action(esw, attr);
601         if (err) {
602                 rule = ERR_PTR(err);
603                 goto err_add_vlan;
604         }
605
606         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
607                 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
608                 kfree(parse_attr->mod_hdr_actions);
609                 if (err) {
610                         rule = ERR_PTR(err);
611                         goto err_mod_hdr;
612                 }
613         }
614
615         /* we get here if (1) there's no error (rule being null) or when
616          * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
617          */
618         if (rule != ERR_PTR(-EAGAIN)) {
619                 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
620                 if (IS_ERR(rule))
621                         goto err_add_rule;
622         }
623         return rule;
624
625 err_add_rule:
626         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
627                 mlx5e_detach_mod_hdr(priv, flow);
628 err_mod_hdr:
629         mlx5_eswitch_del_vlan_action(esw, attr);
630 err_add_vlan:
631         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
632                 mlx5e_detach_encap(priv, flow);
633 err_attach_encap:
634         return rule;
635 }
636
637 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
638                                   struct mlx5e_tc_flow *flow)
639 {
640         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
641         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
642
643         if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
644                 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
645                 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
646         }
647
648         mlx5_eswitch_del_vlan_action(esw, attr);
649
650         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
651                 mlx5e_detach_encap(priv, flow);
652                 kvfree(attr->parse_attr);
653         }
654
655         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
656                 mlx5e_detach_mod_hdr(priv, flow);
657 }
658
659 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
660                               struct mlx5e_encap_entry *e)
661 {
662         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
663         struct mlx5_esw_flow_attr *esw_attr;
664         struct mlx5e_tc_flow *flow;
665         int err;
666
667         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
668                                e->encap_size, e->encap_header,
669                                &e->encap_id);
670         if (err) {
671                 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
672                                err);
673                 return;
674         }
675         e->flags |= MLX5_ENCAP_ENTRY_VALID;
676         mlx5e_rep_queue_neigh_stats_work(priv);
677
678         list_for_each_entry(flow, &e->flows, encap) {
679                 esw_attr = flow->esw_attr;
680                 esw_attr->encap_id = e->encap_id;
681                 flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
682                 if (IS_ERR(flow->rule)) {
683                         err = PTR_ERR(flow->rule);
684                         mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
685                                        err);
686                         continue;
687                 }
688                 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
689         }
690 }
691
692 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
693                               struct mlx5e_encap_entry *e)
694 {
695         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
696         struct mlx5e_tc_flow *flow;
697
698         list_for_each_entry(flow, &e->flows, encap) {
699                 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
700                         flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
701                         mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
702                 }
703         }
704
705         if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
706                 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
707                 mlx5_encap_dealloc(priv->mdev, e->encap_id);
708         }
709 }
710
711 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
712 {
713         struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
714         u64 bytes, packets, lastuse = 0;
715         struct mlx5e_tc_flow *flow;
716         struct mlx5e_encap_entry *e;
717         struct mlx5_fc *counter;
718         struct neigh_table *tbl;
719         bool neigh_used = false;
720         struct neighbour *n;
721
722         if (m_neigh->family == AF_INET)
723                 tbl = &arp_tbl;
724 #if IS_ENABLED(CONFIG_IPV6)
725         else if (m_neigh->family == AF_INET6)
726                 tbl = ipv6_stub->nd_tbl;
727 #endif
728         else
729                 return;
730
731         list_for_each_entry(e, &nhe->encap_list, encap_list) {
732                 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
733                         continue;
734                 list_for_each_entry(flow, &e->flows, encap) {
735                         if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
736                                 counter = mlx5_flow_rule_counter(flow->rule);
737                                 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
738                                 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
739                                         neigh_used = true;
740                                         break;
741                                 }
742                         }
743                 }
744         }
745
746         if (neigh_used) {
747                 nhe->reported_lastuse = jiffies;
748
749                 /* find the relevant neigh according to the cached device and
750                  * dst ip pair
751                  */
752                 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
753                 if (!n) {
754                         WARN(1, "The neighbour already freed\n");
755                         return;
756                 }
757
758                 neigh_event_send(n, NULL);
759                 neigh_release(n);
760         }
761 }
762
763 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
764                                struct mlx5e_tc_flow *flow)
765 {
766         struct list_head *next = flow->encap.next;
767
768         list_del(&flow->encap);
769         if (list_empty(next)) {
770                 struct mlx5e_encap_entry *e;
771
772                 e = list_entry(next, struct mlx5e_encap_entry, flows);
773                 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
774
775                 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
776                         mlx5_encap_dealloc(priv->mdev, e->encap_id);
777
778                 hash_del_rcu(&e->encap_hlist);
779                 kfree(e->encap_header);
780                 kfree(e);
781         }
782 }
783
784 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
785                               struct mlx5e_tc_flow *flow)
786 {
787         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
788                 mlx5e_tc_del_fdb_flow(priv, flow);
789         else
790                 mlx5e_tc_del_nic_flow(priv, flow);
791 }
792
793 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
794                              struct tc_cls_flower_offload *f)
795 {
796         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
797                                        outer_headers);
798         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
799                                        outer_headers);
800         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
801                                     misc_parameters);
802         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
803                                     misc_parameters);
804
805         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
806         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
807
808         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
809                 struct flow_dissector_key_keyid *key =
810                         skb_flow_dissector_target(f->dissector,
811                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
812                                                   f->key);
813                 struct flow_dissector_key_keyid *mask =
814                         skb_flow_dissector_target(f->dissector,
815                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
816                                                   f->mask);
817                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
818                          be32_to_cpu(mask->keyid));
819                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
820                          be32_to_cpu(key->keyid));
821         }
822 }
823
824 static int parse_tunnel_attr(struct mlx5e_priv *priv,
825                              struct mlx5_flow_spec *spec,
826                              struct tc_cls_flower_offload *f)
827 {
828         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
829                                        outer_headers);
830         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
831                                        outer_headers);
832
833         struct flow_dissector_key_control *enc_control =
834                 skb_flow_dissector_target(f->dissector,
835                                           FLOW_DISSECTOR_KEY_ENC_CONTROL,
836                                           f->key);
837
838         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
839                 struct flow_dissector_key_ports *key =
840                         skb_flow_dissector_target(f->dissector,
841                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
842                                                   f->key);
843                 struct flow_dissector_key_ports *mask =
844                         skb_flow_dissector_target(f->dissector,
845                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
846                                                   f->mask);
847                 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
848                 struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
849                 struct net_device *up_dev = uplink_rpriv->netdev;
850                 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
851
852                 /* Full udp dst port must be given */
853                 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
854                         goto vxlan_match_offload_err;
855
856                 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
857                     MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
858                         parse_vxlan_attr(spec, f);
859                 else {
860                         netdev_warn(priv->netdev,
861                                     "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
862                         return -EOPNOTSUPP;
863                 }
864
865                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
866                          udp_dport, ntohs(mask->dst));
867                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
868                          udp_dport, ntohs(key->dst));
869
870                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
871                          udp_sport, ntohs(mask->src));
872                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
873                          udp_sport, ntohs(key->src));
874         } else { /* udp dst port must be given */
875 vxlan_match_offload_err:
876                 netdev_warn(priv->netdev,
877                             "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
878                 return -EOPNOTSUPP;
879         }
880
881         if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
882                 struct flow_dissector_key_ipv4_addrs *key =
883                         skb_flow_dissector_target(f->dissector,
884                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
885                                                   f->key);
886                 struct flow_dissector_key_ipv4_addrs *mask =
887                         skb_flow_dissector_target(f->dissector,
888                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
889                                                   f->mask);
890                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
891                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
892                          ntohl(mask->src));
893                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
894                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
895                          ntohl(key->src));
896
897                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
898                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
899                          ntohl(mask->dst));
900                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
901                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
902                          ntohl(key->dst));
903
904                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
905                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
906         } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
907                 struct flow_dissector_key_ipv6_addrs *key =
908                         skb_flow_dissector_target(f->dissector,
909                                                   FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
910                                                   f->key);
911                 struct flow_dissector_key_ipv6_addrs *mask =
912                         skb_flow_dissector_target(f->dissector,
913                                                   FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
914                                                   f->mask);
915
916                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
917                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
918                        &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
919                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
920                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
921                        &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
922
923                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
924                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
925                        &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
926                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
927                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
928                        &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
929
930                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
931                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
932         }
933
934         /* Enforce DMAC when offloading incoming tunneled flows.
935          * Flow counters require a match on the DMAC.
936          */
937         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
938         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
939         ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
940                                      dmac_47_16), priv->netdev->dev_addr);
941
942         /* let software handle IP fragments */
943         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
944         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
945
946         return 0;
947 }
948
949 static int __parse_cls_flower(struct mlx5e_priv *priv,
950                               struct mlx5_flow_spec *spec,
951                               struct tc_cls_flower_offload *f,
952                               u8 *min_inline)
953 {
954         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
955                                        outer_headers);
956         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
957                                        outer_headers);
958         u16 addr_type = 0;
959         u8 ip_proto = 0;
960
961         *min_inline = MLX5_INLINE_MODE_L2;
962
963         if (f->dissector->used_keys &
964             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
965               BIT(FLOW_DISSECTOR_KEY_BASIC) |
966               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
967               BIT(FLOW_DISSECTOR_KEY_VLAN) |
968               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
969               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
970               BIT(FLOW_DISSECTOR_KEY_PORTS) |
971               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
972               BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
973               BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
974               BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
975               BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
976               BIT(FLOW_DISSECTOR_KEY_TCP) |
977               BIT(FLOW_DISSECTOR_KEY_IP))) {
978                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
979                             f->dissector->used_keys);
980                 return -EOPNOTSUPP;
981         }
982
983         if ((dissector_uses_key(f->dissector,
984                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
985              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
986              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
987             dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
988                 struct flow_dissector_key_control *key =
989                         skb_flow_dissector_target(f->dissector,
990                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
991                                                   f->key);
992                 switch (key->addr_type) {
993                 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
994                 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
995                         if (parse_tunnel_attr(priv, spec, f))
996                                 return -EOPNOTSUPP;
997                         break;
998                 default:
999                         return -EOPNOTSUPP;
1000                 }
1001
1002                 /* In decap flow, header pointers should point to the inner
1003                  * headers, outer header were already set by parse_tunnel_attr
1004                  */
1005                 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1006                                          inner_headers);
1007                 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1008                                          inner_headers);
1009         }
1010
1011         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
1012                 struct flow_dissector_key_control *key =
1013                         skb_flow_dissector_target(f->dissector,
1014                                                   FLOW_DISSECTOR_KEY_CONTROL,
1015                                                   f->key);
1016
1017                 struct flow_dissector_key_control *mask =
1018                         skb_flow_dissector_target(f->dissector,
1019                                                   FLOW_DISSECTOR_KEY_CONTROL,
1020                                                   f->mask);
1021                 addr_type = key->addr_type;
1022
1023                 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
1024                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1025                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1026                                  key->flags & FLOW_DIS_IS_FRAGMENT);
1027
1028                         /* the HW doesn't need L3 inline to match on frag=no */
1029                         if (key->flags & FLOW_DIS_IS_FRAGMENT)
1030                                 *min_inline = MLX5_INLINE_MODE_IP;
1031                 }
1032         }
1033
1034         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1035                 struct flow_dissector_key_basic *key =
1036                         skb_flow_dissector_target(f->dissector,
1037                                                   FLOW_DISSECTOR_KEY_BASIC,
1038                                                   f->key);
1039                 struct flow_dissector_key_basic *mask =
1040                         skb_flow_dissector_target(f->dissector,
1041                                                   FLOW_DISSECTOR_KEY_BASIC,
1042                                                   f->mask);
1043                 ip_proto = key->ip_proto;
1044
1045                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1046                          ntohs(mask->n_proto));
1047                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1048                          ntohs(key->n_proto));
1049
1050                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1051                          mask->ip_proto);
1052                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1053                          key->ip_proto);
1054
1055                 if (mask->ip_proto)
1056                         *min_inline = MLX5_INLINE_MODE_IP;
1057         }
1058
1059         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1060                 struct flow_dissector_key_eth_addrs *key =
1061                         skb_flow_dissector_target(f->dissector,
1062                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
1063                                                   f->key);
1064                 struct flow_dissector_key_eth_addrs *mask =
1065                         skb_flow_dissector_target(f->dissector,
1066                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
1067                                                   f->mask);
1068
1069                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1070                                              dmac_47_16),
1071                                 mask->dst);
1072                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1073                                              dmac_47_16),
1074                                 key->dst);
1075
1076                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1077                                              smac_47_16),
1078                                 mask->src);
1079                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1080                                              smac_47_16),
1081                                 key->src);
1082         }
1083
1084         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
1085                 struct flow_dissector_key_vlan *key =
1086                         skb_flow_dissector_target(f->dissector,
1087                                                   FLOW_DISSECTOR_KEY_VLAN,
1088                                                   f->key);
1089                 struct flow_dissector_key_vlan *mask =
1090                         skb_flow_dissector_target(f->dissector,
1091                                                   FLOW_DISSECTOR_KEY_VLAN,
1092                                                   f->mask);
1093                 if (mask->vlan_id || mask->vlan_priority) {
1094                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1095                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
1096
1097                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
1098                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
1099
1100                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
1101                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
1102                 }
1103         }
1104
1105         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1106                 struct flow_dissector_key_ipv4_addrs *key =
1107                         skb_flow_dissector_target(f->dissector,
1108                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1109                                                   f->key);
1110                 struct flow_dissector_key_ipv4_addrs *mask =
1111                         skb_flow_dissector_target(f->dissector,
1112                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1113                                                   f->mask);
1114
1115                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1116                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
1117                        &mask->src, sizeof(mask->src));
1118                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1119                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
1120                        &key->src, sizeof(key->src));
1121                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1122                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1123                        &mask->dst, sizeof(mask->dst));
1124                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1125                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1126                        &key->dst, sizeof(key->dst));
1127
1128                 if (mask->src || mask->dst)
1129                         *min_inline = MLX5_INLINE_MODE_IP;
1130         }
1131
1132         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1133                 struct flow_dissector_key_ipv6_addrs *key =
1134                         skb_flow_dissector_target(f->dissector,
1135                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1136                                                   f->key);
1137                 struct flow_dissector_key_ipv6_addrs *mask =
1138                         skb_flow_dissector_target(f->dissector,
1139                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1140                                                   f->mask);
1141
1142                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1143                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
1144                        &mask->src, sizeof(mask->src));
1145                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1146                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
1147                        &key->src, sizeof(key->src));
1148
1149                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1150                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1151                        &mask->dst, sizeof(mask->dst));
1152                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1153                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1154                        &key->dst, sizeof(key->dst));
1155
1156                 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
1157                     ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
1158                         *min_inline = MLX5_INLINE_MODE_IP;
1159         }
1160
1161         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
1162                 struct flow_dissector_key_ip *key =
1163                         skb_flow_dissector_target(f->dissector,
1164                                                   FLOW_DISSECTOR_KEY_IP,
1165                                                   f->key);
1166                 struct flow_dissector_key_ip *mask =
1167                         skb_flow_dissector_target(f->dissector,
1168                                                   FLOW_DISSECTOR_KEY_IP,
1169                                                   f->mask);
1170
1171                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1172                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1173
1174                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1175                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos  >> 2);
1176
1177                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1178                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1179
1180                 if (mask->ttl &&
1181                     !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1182                                                 ft_field_support.outer_ipv4_ttl))
1183                         return -EOPNOTSUPP;
1184
1185                 if (mask->tos || mask->ttl)
1186                         *min_inline = MLX5_INLINE_MODE_IP;
1187         }
1188
1189         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
1190                 struct flow_dissector_key_ports *key =
1191                         skb_flow_dissector_target(f->dissector,
1192                                                   FLOW_DISSECTOR_KEY_PORTS,
1193                                                   f->key);
1194                 struct flow_dissector_key_ports *mask =
1195                         skb_flow_dissector_target(f->dissector,
1196                                                   FLOW_DISSECTOR_KEY_PORTS,
1197                                                   f->mask);
1198                 switch (ip_proto) {
1199                 case IPPROTO_TCP:
1200                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1201                                  tcp_sport, ntohs(mask->src));
1202                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1203                                  tcp_sport, ntohs(key->src));
1204
1205                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1206                                  tcp_dport, ntohs(mask->dst));
1207                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1208                                  tcp_dport, ntohs(key->dst));
1209                         break;
1210
1211                 case IPPROTO_UDP:
1212                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1213                                  udp_sport, ntohs(mask->src));
1214                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1215                                  udp_sport, ntohs(key->src));
1216
1217                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1218                                  udp_dport, ntohs(mask->dst));
1219                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1220                                  udp_dport, ntohs(key->dst));
1221                         break;
1222                 default:
1223                         netdev_err(priv->netdev,
1224                                    "Only UDP and TCP transport are supported\n");
1225                         return -EINVAL;
1226                 }
1227
1228                 if (mask->src || mask->dst)
1229                         *min_inline = MLX5_INLINE_MODE_TCP_UDP;
1230         }
1231
1232         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1233                 struct flow_dissector_key_tcp *key =
1234                         skb_flow_dissector_target(f->dissector,
1235                                                   FLOW_DISSECTOR_KEY_TCP,
1236                                                   f->key);
1237                 struct flow_dissector_key_tcp *mask =
1238                         skb_flow_dissector_target(f->dissector,
1239                                                   FLOW_DISSECTOR_KEY_TCP,
1240                                                   f->mask);
1241
1242                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1243                          ntohs(mask->flags));
1244                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1245                          ntohs(key->flags));
1246
1247                 if (mask->flags)
1248                         *min_inline = MLX5_INLINE_MODE_TCP_UDP;
1249         }
1250
1251         return 0;
1252 }
1253
1254 static int parse_cls_flower(struct mlx5e_priv *priv,
1255                             struct mlx5e_tc_flow *flow,
1256                             struct mlx5_flow_spec *spec,
1257                             struct tc_cls_flower_offload *f)
1258 {
1259         struct mlx5_core_dev *dev = priv->mdev;
1260         struct mlx5_eswitch *esw = dev->priv.eswitch;
1261         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1262         struct mlx5_eswitch_rep *rep;
1263         u8 min_inline;
1264         int err;
1265
1266         err = __parse_cls_flower(priv, spec, f, &min_inline);
1267
1268         if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1269                 rep = rpriv->rep;
1270                 if (rep->vport != FDB_UPLINK_VPORT &&
1271                     (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1272                     esw->offloads.inline_mode < min_inline)) {
1273                         netdev_warn(priv->netdev,
1274                                     "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1275                                     min_inline, esw->offloads.inline_mode);
1276                         return -EOPNOTSUPP;
1277                 }
1278         }
1279
1280         return err;
1281 }
1282
1283 struct pedit_headers {
1284         struct ethhdr  eth;
1285         struct iphdr   ip4;
1286         struct ipv6hdr ip6;
1287         struct tcphdr  tcp;
1288         struct udphdr  udp;
1289 };
1290
1291 static int pedit_header_offsets[] = {
1292         [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1293         [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1294         [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1295         [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1296         [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1297 };
1298
1299 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1300
1301 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1302                          struct pedit_headers *masks,
1303                          struct pedit_headers *vals)
1304 {
1305         u32 *curr_pmask, *curr_pval;
1306
1307         if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1308                 goto out_err;
1309
1310         curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1311         curr_pval  = (u32 *)(pedit_header(vals, hdr_type) + offset);
1312
1313         if (*curr_pmask & mask)  /* disallow acting twice on the same location */
1314                 goto out_err;
1315
1316         *curr_pmask |= mask;
1317         *curr_pval  |= (val & mask);
1318
1319         return 0;
1320
1321 out_err:
1322         return -EOPNOTSUPP;
1323 }
1324
1325 struct mlx5_fields {
1326         u8  field;
1327         u8  size;
1328         u32 offset;
1329 };
1330
1331 #define OFFLOAD(fw_field, size, field, off) \
1332                 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1333
1334 static struct mlx5_fields fields[] = {
1335         OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1336         OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1337         OFFLOAD(DMAC_15_0,  2, eth.h_dest[4], 0),
1338         OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1339         OFFLOAD(SMAC_15_0,  2, eth.h_source[4], 0),
1340         OFFLOAD(ETHERTYPE,  2, eth.h_proto, 0),
1341
1342         OFFLOAD(IP_TTL, 1, ip4.ttl,   0),
1343         OFFLOAD(SIPV4,  4, ip4.saddr, 0),
1344         OFFLOAD(DIPV4,  4, ip4.daddr, 0),
1345
1346         OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1347         OFFLOAD(SIPV6_95_64,  4, ip6.saddr.s6_addr32[1], 0),
1348         OFFLOAD(SIPV6_63_32,  4, ip6.saddr.s6_addr32[2], 0),
1349         OFFLOAD(SIPV6_31_0,   4, ip6.saddr.s6_addr32[3], 0),
1350         OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1351         OFFLOAD(DIPV6_95_64,  4, ip6.daddr.s6_addr32[1], 0),
1352         OFFLOAD(DIPV6_63_32,  4, ip6.daddr.s6_addr32[2], 0),
1353         OFFLOAD(DIPV6_31_0,   4, ip6.daddr.s6_addr32[3], 0),
1354         OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1355
1356         OFFLOAD(TCP_SPORT, 2, tcp.source,  0),
1357         OFFLOAD(TCP_DPORT, 2, tcp.dest,    0),
1358         OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1359
1360         OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1361         OFFLOAD(UDP_DPORT, 2, udp.dest,   0),
1362 };
1363
1364 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1365  * max from the SW pedit action. On success, it says how many HW actions were
1366  * actually parsed.
1367  */
1368 static int offload_pedit_fields(struct pedit_headers *masks,
1369                                 struct pedit_headers *vals,
1370                                 struct mlx5e_tc_flow_parse_attr *parse_attr)
1371 {
1372         struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1373         int i, action_size, nactions, max_actions, first, last, next_z;
1374         void *s_masks_p, *a_masks_p, *vals_p;
1375         struct mlx5_fields *f;
1376         u8 cmd, field_bsize;
1377         u32 s_mask, a_mask;
1378         unsigned long mask;
1379         __be32 mask_be32;
1380         __be16 mask_be16;
1381         void *action;
1382
1383         set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1384         add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1385         set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1386         add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1387
1388         action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1389         action = parse_attr->mod_hdr_actions;
1390         max_actions = parse_attr->num_mod_hdr_actions;
1391         nactions = 0;
1392
1393         for (i = 0; i < ARRAY_SIZE(fields); i++) {
1394                 f = &fields[i];
1395                 /* avoid seeing bits set from previous iterations */
1396                 s_mask = 0;
1397                 a_mask = 0;
1398
1399                 s_masks_p = (void *)set_masks + f->offset;
1400                 a_masks_p = (void *)add_masks + f->offset;
1401
1402                 memcpy(&s_mask, s_masks_p, f->size);
1403                 memcpy(&a_mask, a_masks_p, f->size);
1404
1405                 if (!s_mask && !a_mask) /* nothing to offload here */
1406                         continue;
1407
1408                 if (s_mask && a_mask) {
1409                         printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1410                         return -EOPNOTSUPP;
1411                 }
1412
1413                 if (nactions == max_actions) {
1414                         printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1415                         return -EOPNOTSUPP;
1416                 }
1417
1418                 if (s_mask) {
1419                         cmd  = MLX5_ACTION_TYPE_SET;
1420                         mask = s_mask;
1421                         vals_p = (void *)set_vals + f->offset;
1422                         /* clear to denote we consumed this field */
1423                         memset(s_masks_p, 0, f->size);
1424                 } else {
1425                         cmd  = MLX5_ACTION_TYPE_ADD;
1426                         mask = a_mask;
1427                         vals_p = (void *)add_vals + f->offset;
1428                         /* clear to denote we consumed this field */
1429                         memset(a_masks_p, 0, f->size);
1430                 }
1431
1432                 field_bsize = f->size * BITS_PER_BYTE;
1433
1434                 if (field_bsize == 32) {
1435                         mask_be32 = *(__be32 *)&mask;
1436                         mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1437                 } else if (field_bsize == 16) {
1438                         mask_be16 = *(__be16 *)&mask;
1439                         mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1440                 }
1441
1442                 first = find_first_bit(&mask, field_bsize);
1443                 next_z = find_next_zero_bit(&mask, field_bsize, first);
1444                 last  = find_last_bit(&mask, field_bsize);
1445                 if (first < next_z && next_z < last) {
1446                         printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
1447                                mask);
1448                         return -EOPNOTSUPP;
1449                 }
1450
1451                 MLX5_SET(set_action_in, action, action_type, cmd);
1452                 MLX5_SET(set_action_in, action, field, f->field);
1453
1454                 if (cmd == MLX5_ACTION_TYPE_SET) {
1455                         MLX5_SET(set_action_in, action, offset, first);
1456                         /* length is num of bits to be written, zero means length of 32 */
1457                         MLX5_SET(set_action_in, action, length, (last - first + 1));
1458                 }
1459
1460                 if (field_bsize == 32)
1461                         MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
1462                 else if (field_bsize == 16)
1463                         MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
1464                 else if (field_bsize == 8)
1465                         MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
1466
1467                 action += action_size;
1468                 nactions++;
1469         }
1470
1471         parse_attr->num_mod_hdr_actions = nactions;
1472         return 0;
1473 }
1474
1475 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1476                                  const struct tc_action *a, int namespace,
1477                                  struct mlx5e_tc_flow_parse_attr *parse_attr)
1478 {
1479         int nkeys, action_size, max_actions;
1480
1481         nkeys = tcf_pedit_nkeys(a);
1482         action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1483
1484         if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1485                 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1486         else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1487                 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1488
1489         /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1490         max_actions = min(max_actions, nkeys * 16);
1491
1492         parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1493         if (!parse_attr->mod_hdr_actions)
1494                 return -ENOMEM;
1495
1496         parse_attr->num_mod_hdr_actions = max_actions;
1497         return 0;
1498 }
1499
1500 static const struct pedit_headers zero_masks = {};
1501
1502 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1503                                  const struct tc_action *a, int namespace,
1504                                  struct mlx5e_tc_flow_parse_attr *parse_attr)
1505 {
1506         struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1507         int nkeys, i, err = -EOPNOTSUPP;
1508         u32 mask, val, offset;
1509         u8 cmd, htype;
1510
1511         nkeys = tcf_pedit_nkeys(a);
1512
1513         memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1514         memset(vals,  0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1515
1516         for (i = 0; i < nkeys; i++) {
1517                 htype = tcf_pedit_htype(a, i);
1518                 cmd = tcf_pedit_cmd(a, i);
1519                 err = -EOPNOTSUPP; /* can't be all optimistic */
1520
1521                 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1522                         printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1523                         goto out_err;
1524                 }
1525
1526                 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1527                         printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1528                         goto out_err;
1529                 }
1530
1531                 mask = tcf_pedit_mask(a, i);
1532                 val = tcf_pedit_val(a, i);
1533                 offset = tcf_pedit_offset(a, i);
1534
1535                 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1536                 if (err)
1537                         goto out_err;
1538         }
1539
1540         err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1541         if (err)
1542                 goto out_err;
1543
1544         err = offload_pedit_fields(masks, vals, parse_attr);
1545         if (err < 0)
1546                 goto out_dealloc_parsed_actions;
1547
1548         for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1549                 cmd_masks = &masks[cmd];
1550                 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1551                         printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1552                                cmd);
1553                         print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1554                                        16, 1, cmd_masks, sizeof(zero_masks), true);
1555                         err = -EOPNOTSUPP;
1556                         goto out_dealloc_parsed_actions;
1557                 }
1558         }
1559
1560         return 0;
1561
1562 out_dealloc_parsed_actions:
1563         kfree(parse_attr->mod_hdr_actions);
1564 out_err:
1565         return err;
1566 }
1567
1568 static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1569 {
1570         u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1571                          TCA_CSUM_UPDATE_FLAG_UDP;
1572
1573         /*  The HW recalcs checksums only if re-writing headers */
1574         if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1575                 netdev_warn(priv->netdev,
1576                             "TC csum action is only offloaded with pedit\n");
1577                 return false;
1578         }
1579
1580         if (update_flags & ~prot_flags) {
1581                 netdev_warn(priv->netdev,
1582                             "can't offload TC csum action for some header/s - flags %#x\n",
1583                             update_flags);
1584                 return false;
1585         }
1586
1587         return true;
1588 }
1589
1590 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1591                                           struct tcf_exts *exts)
1592 {
1593         const struct tc_action *a;
1594         bool modify_ip_header;
1595         LIST_HEAD(actions);
1596         u8 htype, ip_proto;
1597         void *headers_v;
1598         u16 ethertype;
1599         int nkeys, i;
1600
1601         headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1602         ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1603
1604         /* for non-IP we only re-write MACs, so we're okay */
1605         if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
1606                 goto out_ok;
1607
1608         modify_ip_header = false;
1609         tcf_exts_to_list(exts, &actions);
1610         list_for_each_entry(a, &actions, list) {
1611                 if (!is_tcf_pedit(a))
1612                         continue;
1613
1614                 nkeys = tcf_pedit_nkeys(a);
1615                 for (i = 0; i < nkeys; i++) {
1616                         htype = tcf_pedit_htype(a, i);
1617                         if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1618                             htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1619                                 modify_ip_header = true;
1620                                 break;
1621                         }
1622                 }
1623         }
1624
1625         ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1626         if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
1627                 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
1628                 return false;
1629         }
1630
1631 out_ok:
1632         return true;
1633 }
1634
1635 static bool actions_match_supported(struct mlx5e_priv *priv,
1636                                     struct tcf_exts *exts,
1637                                     struct mlx5e_tc_flow_parse_attr *parse_attr,
1638                                     struct mlx5e_tc_flow *flow)
1639 {
1640         u32 actions;
1641
1642         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1643                 actions = flow->esw_attr->action;
1644         else
1645                 actions = flow->nic_attr->action;
1646
1647         if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1648                 return modify_header_match_supported(&parse_attr->spec, exts);
1649
1650         return true;
1651 }
1652
1653 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
1654 {
1655         struct mlx5_core_dev *fmdev, *pmdev;
1656         u16 func_id, peer_id;
1657
1658         fmdev = priv->mdev;
1659         pmdev = peer_priv->mdev;
1660
1661         func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
1662         peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
1663
1664         return (func_id == peer_id);
1665 }
1666
1667 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1668                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
1669                                 struct mlx5e_tc_flow *flow)
1670 {
1671         struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1672         const struct tc_action *a;
1673         LIST_HEAD(actions);
1674         int err;
1675
1676         if (!tcf_exts_has_actions(exts))
1677                 return -EINVAL;
1678
1679         attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1680         attr->action = 0;
1681
1682         tcf_exts_to_list(exts, &actions);
1683         list_for_each_entry(a, &actions, list) {
1684                 if (is_tcf_gact_shot(a)) {
1685                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1686                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
1687                                                flow_table_properties_nic_receive.flow_counter))
1688                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1689                         continue;
1690                 }
1691
1692                 if (is_tcf_pedit(a)) {
1693                         err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1694                                                     parse_attr);
1695                         if (err)
1696                                 return err;
1697
1698                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1699                                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1700                         continue;
1701                 }
1702
1703                 if (is_tcf_csum(a)) {
1704                         if (csum_offload_supported(priv, attr->action,
1705                                                    tcf_csum_update_flags(a)))
1706                                 continue;
1707
1708                         return -EOPNOTSUPP;
1709                 }
1710
1711                 if (is_tcf_mirred_egress_redirect(a)) {
1712                         struct net_device *peer_dev = tcf_mirred_dev(a);
1713
1714                         if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
1715                             same_hw_devs(priv, netdev_priv(peer_dev))) {
1716                                 parse_attr->mirred_ifindex = peer_dev->ifindex;
1717                                 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
1718                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1719                                                 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1720                         } else {
1721                                 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
1722                                             peer_dev->name);
1723                                 return -EINVAL;
1724                         }
1725                         continue;
1726                 }
1727
1728                 if (is_tcf_skbedit_mark(a)) {
1729                         u32 mark = tcf_skbedit_mark(a);
1730
1731                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1732                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1733                                             mark);
1734                                 return -EINVAL;
1735                         }
1736
1737                         attr->flow_tag = mark;
1738                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1739                         continue;
1740                 }
1741
1742                 return -EINVAL;
1743         }
1744
1745         if (!actions_match_supported(priv, exts, parse_attr, flow))
1746                 return -EOPNOTSUPP;
1747
1748         return 0;
1749 }
1750
1751 static inline int cmp_encap_info(struct ip_tunnel_key *a,
1752                                  struct ip_tunnel_key *b)
1753 {
1754         return memcmp(a, b, sizeof(*a));
1755 }
1756
1757 static inline int hash_encap_info(struct ip_tunnel_key *key)
1758 {
1759         return jhash(key, sizeof(*key), 0);
1760 }
1761
1762 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1763                                    struct net_device *mirred_dev,
1764                                    struct net_device **out_dev,
1765                                    struct flowi4 *fl4,
1766                                    struct neighbour **out_n,
1767                                    int *out_ttl)
1768 {
1769         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1770         struct mlx5e_rep_priv *uplink_rpriv;
1771         struct rtable *rt;
1772         struct neighbour *n = NULL;
1773
1774 #if IS_ENABLED(CONFIG_INET)
1775         int ret;
1776
1777         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
1778         ret = PTR_ERR_OR_ZERO(rt);
1779         if (ret)
1780                 return ret;
1781 #else
1782         return -EOPNOTSUPP;
1783 #endif
1784         uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1785         /* if the egress device isn't on the same HW e-switch, we use the uplink */
1786         if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1787                 *out_dev = uplink_rpriv->netdev;
1788         else
1789                 *out_dev = rt->dst.dev;
1790
1791         *out_ttl = ip4_dst_hoplimit(&rt->dst);
1792         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1793         ip_rt_put(rt);
1794         if (!n)
1795                 return -ENOMEM;
1796
1797         *out_n = n;
1798         return 0;
1799 }
1800
1801 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1802                                    struct net_device *mirred_dev,
1803                                    struct net_device **out_dev,
1804                                    struct flowi6 *fl6,
1805                                    struct neighbour **out_n,
1806                                    int *out_ttl)
1807 {
1808         struct neighbour *n = NULL;
1809         struct dst_entry *dst;
1810
1811 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1812         struct mlx5e_rep_priv *uplink_rpriv;
1813         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1814         int ret;
1815
1816         ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
1817                                          fl6);
1818         if (ret < 0)
1819                 return ret;
1820
1821         *out_ttl = ip6_dst_hoplimit(dst);
1822
1823         uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1824         /* if the egress device isn't on the same HW e-switch, we use the uplink */
1825         if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1826                 *out_dev = uplink_rpriv->netdev;
1827         else
1828                 *out_dev = dst->dev;
1829 #else
1830         return -EOPNOTSUPP;
1831 #endif
1832
1833         n = dst_neigh_lookup(dst, &fl6->daddr);
1834         dst_release(dst);
1835         if (!n)
1836                 return -ENOMEM;
1837
1838         *out_n = n;
1839         return 0;
1840 }
1841
1842 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1843                                   char buf[], int encap_size,
1844                                   unsigned char h_dest[ETH_ALEN],
1845                                   int ttl,
1846                                   __be32 daddr,
1847                                   __be32 saddr,
1848                                   __be16 udp_dst_port,
1849                                   __be32 vx_vni)
1850 {
1851         struct ethhdr *eth = (struct ethhdr *)buf;
1852         struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1853         struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1854         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1855
1856         memset(buf, 0, encap_size);
1857
1858         ether_addr_copy(eth->h_dest, h_dest);
1859         ether_addr_copy(eth->h_source, out_dev->dev_addr);
1860         eth->h_proto = htons(ETH_P_IP);
1861
1862         ip->daddr = daddr;
1863         ip->saddr = saddr;
1864
1865         ip->ttl = ttl;
1866         ip->protocol = IPPROTO_UDP;
1867         ip->version = 0x4;
1868         ip->ihl = 0x5;
1869
1870         udp->dest = udp_dst_port;
1871         vxh->vx_flags = VXLAN_HF_VNI;
1872         vxh->vx_vni = vxlan_vni_field(vx_vni);
1873 }
1874
1875 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1876                                   char buf[], int encap_size,
1877                                   unsigned char h_dest[ETH_ALEN],
1878                                   int ttl,
1879                                   struct in6_addr *daddr,
1880                                   struct in6_addr *saddr,
1881                                   __be16 udp_dst_port,
1882                                   __be32 vx_vni)
1883 {
1884         struct ethhdr *eth = (struct ethhdr *)buf;
1885         struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1886         struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1887         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1888
1889         memset(buf, 0, encap_size);
1890
1891         ether_addr_copy(eth->h_dest, h_dest);
1892         ether_addr_copy(eth->h_source, out_dev->dev_addr);
1893         eth->h_proto = htons(ETH_P_IPV6);
1894
1895         ip6_flow_hdr(ip6h, 0, 0);
1896         /* the HW fills up ipv6 payload len */
1897         ip6h->nexthdr     = IPPROTO_UDP;
1898         ip6h->hop_limit   = ttl;
1899         ip6h->daddr       = *daddr;
1900         ip6h->saddr       = *saddr;
1901
1902         udp->dest = udp_dst_port;
1903         vxh->vx_flags = VXLAN_HF_VNI;
1904         vxh->vx_vni = vxlan_vni_field(vx_vni);
1905 }
1906
1907 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1908                                           struct net_device *mirred_dev,
1909                                           struct mlx5e_encap_entry *e)
1910 {
1911         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1912         int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
1913         struct ip_tunnel_key *tun_key = &e->tun_info.key;
1914         struct net_device *out_dev;
1915         struct neighbour *n = NULL;
1916         struct flowi4 fl4 = {};
1917         char *encap_header;
1918         int ttl, err;
1919         u8 nud_state;
1920
1921         if (max_encap_size < ipv4_encap_size) {
1922                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1923                                ipv4_encap_size, max_encap_size);
1924                 return -EOPNOTSUPP;
1925         }
1926
1927         encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
1928         if (!encap_header)
1929                 return -ENOMEM;
1930
1931         switch (e->tunnel_type) {
1932         case MLX5_HEADER_TYPE_VXLAN:
1933                 fl4.flowi4_proto = IPPROTO_UDP;
1934                 fl4.fl4_dport = tun_key->tp_dst;
1935                 break;
1936         default:
1937                 err = -EOPNOTSUPP;
1938                 goto free_encap;
1939         }
1940         fl4.flowi4_tos = tun_key->tos;
1941         fl4.daddr = tun_key->u.ipv4.dst;
1942         fl4.saddr = tun_key->u.ipv4.src;
1943
1944         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
1945                                       &fl4, &n, &ttl);
1946         if (err)
1947                 goto free_encap;
1948
1949         /* used by mlx5e_detach_encap to lookup a neigh hash table
1950          * entry in the neigh hash table when a user deletes a rule
1951          */
1952         e->m_neigh.dev = n->dev;
1953         e->m_neigh.family = n->ops->family;
1954         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1955         e->out_dev = out_dev;
1956
1957         /* It's importent to add the neigh to the hash table before checking
1958          * the neigh validity state. So if we'll get a notification, in case the
1959          * neigh changes it's validity state, we would find the relevant neigh
1960          * in the hash.
1961          */
1962         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1963         if (err)
1964                 goto free_encap;
1965
1966         read_lock_bh(&n->lock);
1967         nud_state = n->nud_state;
1968         ether_addr_copy(e->h_dest, n->ha);
1969         read_unlock_bh(&n->lock);
1970
1971         switch (e->tunnel_type) {
1972         case MLX5_HEADER_TYPE_VXLAN:
1973                 gen_vxlan_header_ipv4(out_dev, encap_header,
1974                                       ipv4_encap_size, e->h_dest, ttl,
1975                                       fl4.daddr,
1976                                       fl4.saddr, tun_key->tp_dst,
1977                                       tunnel_id_to_key32(tun_key->tun_id));
1978                 break;
1979         default:
1980                 err = -EOPNOTSUPP;
1981                 goto destroy_neigh_entry;
1982         }
1983         e->encap_size = ipv4_encap_size;
1984         e->encap_header = encap_header;
1985
1986         if (!(nud_state & NUD_VALID)) {
1987                 neigh_event_send(n, NULL);
1988                 err = -EAGAIN;
1989                 goto out;
1990         }
1991
1992         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1993                                ipv4_encap_size, encap_header, &e->encap_id);
1994         if (err)
1995                 goto destroy_neigh_entry;
1996
1997         e->flags |= MLX5_ENCAP_ENTRY_VALID;
1998         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1999         neigh_release(n);
2000         return err;
2001
2002 destroy_neigh_entry:
2003         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2004 free_encap:
2005         kfree(encap_header);
2006 out:
2007         if (n)
2008                 neigh_release(n);
2009         return err;
2010 }
2011
2012 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
2013                                           struct net_device *mirred_dev,
2014                                           struct mlx5e_encap_entry *e)
2015 {
2016         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2017         int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
2018         struct ip_tunnel_key *tun_key = &e->tun_info.key;
2019         struct net_device *out_dev;
2020         struct neighbour *n = NULL;
2021         struct flowi6 fl6 = {};
2022         char *encap_header;
2023         int err, ttl = 0;
2024         u8 nud_state;
2025
2026         if (max_encap_size < ipv6_encap_size) {
2027                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2028                                ipv6_encap_size, max_encap_size);
2029                 return -EOPNOTSUPP;
2030         }
2031
2032         encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
2033         if (!encap_header)
2034                 return -ENOMEM;
2035
2036         switch (e->tunnel_type) {
2037         case MLX5_HEADER_TYPE_VXLAN:
2038                 fl6.flowi6_proto = IPPROTO_UDP;
2039                 fl6.fl6_dport = tun_key->tp_dst;
2040                 break;
2041         default:
2042                 err = -EOPNOTSUPP;
2043                 goto free_encap;
2044         }
2045
2046         fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
2047         fl6.daddr = tun_key->u.ipv6.dst;
2048         fl6.saddr = tun_key->u.ipv6.src;
2049
2050         err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
2051                                       &fl6, &n, &ttl);
2052         if (err)
2053                 goto free_encap;
2054
2055         /* used by mlx5e_detach_encap to lookup a neigh hash table
2056          * entry in the neigh hash table when a user deletes a rule
2057          */
2058         e->m_neigh.dev = n->dev;
2059         e->m_neigh.family = n->ops->family;
2060         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2061         e->out_dev = out_dev;
2062
2063         /* It's importent to add the neigh to the hash table before checking
2064          * the neigh validity state. So if we'll get a notification, in case the
2065          * neigh changes it's validity state, we would find the relevant neigh
2066          * in the hash.
2067          */
2068         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2069         if (err)
2070                 goto free_encap;
2071
2072         read_lock_bh(&n->lock);
2073         nud_state = n->nud_state;
2074         ether_addr_copy(e->h_dest, n->ha);
2075         read_unlock_bh(&n->lock);
2076
2077         switch (e->tunnel_type) {
2078         case MLX5_HEADER_TYPE_VXLAN:
2079                 gen_vxlan_header_ipv6(out_dev, encap_header,
2080                                       ipv6_encap_size, e->h_dest, ttl,
2081                                       &fl6.daddr,
2082                                       &fl6.saddr, tun_key->tp_dst,
2083                                       tunnel_id_to_key32(tun_key->tun_id));
2084                 break;
2085         default:
2086                 err = -EOPNOTSUPP;
2087                 goto destroy_neigh_entry;
2088         }
2089
2090         e->encap_size = ipv6_encap_size;
2091         e->encap_header = encap_header;
2092
2093         if (!(nud_state & NUD_VALID)) {
2094                 neigh_event_send(n, NULL);
2095                 err = -EAGAIN;
2096                 goto out;
2097         }
2098
2099         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
2100                                ipv6_encap_size, encap_header, &e->encap_id);
2101         if (err)
2102                 goto destroy_neigh_entry;
2103
2104         e->flags |= MLX5_ENCAP_ENTRY_VALID;
2105         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2106         neigh_release(n);
2107         return err;
2108
2109 destroy_neigh_entry:
2110         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2111 free_encap:
2112         kfree(encap_header);
2113 out:
2114         if (n)
2115                 neigh_release(n);
2116         return err;
2117 }
2118
2119 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2120                               struct ip_tunnel_info *tun_info,
2121                               struct net_device *mirred_dev,
2122                               struct net_device **encap_dev,
2123                               struct mlx5e_tc_flow *flow)
2124 {
2125         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2126         struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
2127                                                                            REP_ETH);
2128         struct net_device *up_dev = uplink_rpriv->netdev;
2129         unsigned short family = ip_tunnel_info_af(tun_info);
2130         struct mlx5e_priv *up_priv = netdev_priv(up_dev);
2131         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2132         struct ip_tunnel_key *key = &tun_info->key;
2133         struct mlx5e_encap_entry *e;
2134         int tunnel_type, err = 0;
2135         uintptr_t hash_key;
2136         bool found = false;
2137
2138         /* udp dst port must be set */
2139         if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2140                 goto vxlan_encap_offload_err;
2141
2142         /* setting udp src port isn't supported */
2143         if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
2144 vxlan_encap_offload_err:
2145                 netdev_warn(priv->netdev,
2146                             "must set udp dst port and not set udp src port\n");
2147                 return -EOPNOTSUPP;
2148         }
2149
2150         if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
2151             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
2152                 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
2153         } else {
2154                 netdev_warn(priv->netdev,
2155                             "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
2156                 return -EOPNOTSUPP;
2157         }
2158
2159         hash_key = hash_encap_info(key);
2160
2161         hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2162                                    encap_hlist, hash_key) {
2163                 if (!cmp_encap_info(&e->tun_info.key, key)) {
2164                         found = true;
2165                         break;
2166                 }
2167         }
2168
2169         /* must verify if encap is valid or not */
2170         if (found)
2171                 goto attach_flow;
2172
2173         e = kzalloc(sizeof(*e), GFP_KERNEL);
2174         if (!e)
2175                 return -ENOMEM;
2176
2177         e->tun_info = *tun_info;
2178         e->tunnel_type = tunnel_type;
2179         INIT_LIST_HEAD(&e->flows);
2180
2181         if (family == AF_INET)
2182                 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
2183         else if (family == AF_INET6)
2184                 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
2185
2186         if (err && err != -EAGAIN)
2187                 goto out_err;
2188
2189         hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2190
2191 attach_flow:
2192         list_add(&flow->encap, &e->flows);
2193         *encap_dev = e->out_dev;
2194         if (e->flags & MLX5_ENCAP_ENTRY_VALID)
2195                 attr->encap_id = e->encap_id;
2196         else
2197                 err = -EAGAIN;
2198
2199         return err;
2200
2201 out_err:
2202         kfree(e);
2203         return err;
2204 }
2205
2206 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2207                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
2208                                 struct mlx5e_tc_flow *flow)
2209 {
2210         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2211         struct mlx5e_rep_priv *rpriv = priv->ppriv;
2212         struct ip_tunnel_info *info = NULL;
2213         const struct tc_action *a;
2214         LIST_HEAD(actions);
2215         bool encap = false;
2216         int err = 0;
2217
2218         if (!tcf_exts_has_actions(exts))
2219                 return -EINVAL;
2220
2221         memset(attr, 0, sizeof(*attr));
2222         attr->in_rep = rpriv->rep;
2223
2224         tcf_exts_to_list(exts, &actions);
2225         list_for_each_entry(a, &actions, list) {
2226                 if (is_tcf_gact_shot(a)) {
2227                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2228                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
2229                         continue;
2230                 }
2231
2232                 if (is_tcf_pedit(a)) {
2233                         err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
2234                                                     parse_attr);
2235                         if (err)
2236                                 return err;
2237
2238                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2239                         continue;
2240                 }
2241
2242                 if (is_tcf_csum(a)) {
2243                         if (csum_offload_supported(priv, attr->action,
2244                                                    tcf_csum_update_flags(a)))
2245                                 continue;
2246
2247                         return -EOPNOTSUPP;
2248                 }
2249
2250                 if (is_tcf_mirred_egress_redirect(a)) {
2251                         struct net_device *out_dev;
2252                         struct mlx5e_priv *out_priv;
2253
2254                         out_dev = tcf_mirred_dev(a);
2255
2256                         if (switchdev_port_same_parent_id(priv->netdev,
2257                                                           out_dev)) {
2258                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2259                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
2260                                 out_priv = netdev_priv(out_dev);
2261                                 rpriv = out_priv->ppriv;
2262                                 attr->out_rep = rpriv->rep;
2263                         } else if (encap) {
2264                                 parse_attr->mirred_ifindex = out_dev->ifindex;
2265                                 parse_attr->tun_info = *info;
2266                                 attr->parse_attr = parse_attr;
2267                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
2268                                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2269                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
2270                                 /* attr->out_rep is resolved when we handle encap */
2271                         } else {
2272                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2273                                        priv->netdev->name, out_dev->name);
2274                                 return -EINVAL;
2275                         }
2276                         continue;
2277                 }
2278
2279                 if (is_tcf_tunnel_set(a)) {
2280                         info = tcf_tunnel_info(a);
2281                         if (info)
2282                                 encap = true;
2283                         else
2284                                 return -EOPNOTSUPP;
2285                         continue;
2286                 }
2287
2288                 if (is_tcf_vlan(a)) {
2289                         if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2290                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2291                         } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2292                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
2293                                         return -EOPNOTSUPP;
2294
2295                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2296                                 attr->vlan = tcf_vlan_push_vid(a);
2297                         } else { /* action is TCA_VLAN_ACT_MODIFY */
2298                                 return -EOPNOTSUPP;
2299                         }
2300                         continue;
2301                 }
2302
2303                 if (is_tcf_tunnel_release(a)) {
2304                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2305                         continue;
2306                 }
2307
2308                 return -EINVAL;
2309         }
2310
2311         if (!actions_match_supported(priv, exts, parse_attr, flow))
2312                 return -EOPNOTSUPP;
2313
2314         return err;
2315 }
2316
2317 int mlx5e_configure_flower(struct mlx5e_priv *priv,
2318                            struct tc_cls_flower_offload *f)
2319 {
2320         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2321         struct mlx5e_tc_flow_parse_attr *parse_attr;
2322         struct mlx5e_tc_table *tc = &priv->fs.tc;
2323         struct mlx5e_tc_flow *flow;
2324         int attr_size, err = 0;
2325         u8 flow_flags = 0;
2326
2327         if (esw && esw->mode == SRIOV_OFFLOADS) {
2328                 flow_flags = MLX5E_TC_FLOW_ESWITCH;
2329                 attr_size  = sizeof(struct mlx5_esw_flow_attr);
2330         } else {
2331                 flow_flags = MLX5E_TC_FLOW_NIC;
2332                 attr_size  = sizeof(struct mlx5_nic_flow_attr);
2333         }
2334
2335         flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2336         parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2337         if (!parse_attr || !flow) {
2338                 err = -ENOMEM;
2339                 goto err_free;
2340         }
2341
2342         flow->cookie = f->cookie;
2343         flow->flags = flow_flags;
2344
2345         err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
2346         if (err < 0)
2347                 goto err_free;
2348
2349         if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
2350                 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
2351                 if (err < 0)
2352                         goto err_free;
2353                 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
2354         } else {
2355                 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
2356                 if (err < 0)
2357                         goto err_free;
2358                 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
2359         }
2360
2361         if (IS_ERR(flow->rule)) {
2362                 err = PTR_ERR(flow->rule);
2363                 if (err != -EAGAIN)
2364                         goto err_free;
2365         }
2366
2367         if (err != -EAGAIN)
2368                 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2369
2370         err = rhashtable_insert_fast(&tc->ht, &flow->node,
2371                                      tc->ht_params);
2372         if (err)
2373                 goto err_del_rule;
2374
2375         if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
2376             !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
2377                 kvfree(parse_attr);
2378         return err;
2379
2380 err_del_rule:
2381         mlx5e_tc_del_flow(priv, flow);
2382
2383 err_free:
2384         kvfree(parse_attr);
2385         kfree(flow);
2386         return err;
2387 }
2388
2389 int mlx5e_delete_flower(struct mlx5e_priv *priv,
2390                         struct tc_cls_flower_offload *f)
2391 {
2392         struct mlx5e_tc_flow *flow;
2393         struct mlx5e_tc_table *tc = &priv->fs.tc;
2394
2395         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2396                                       tc->ht_params);
2397         if (!flow)
2398                 return -EINVAL;
2399
2400         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
2401
2402         mlx5e_tc_del_flow(priv, flow);
2403
2404         kfree(flow);
2405
2406         return 0;
2407 }
2408
2409 int mlx5e_stats_flower(struct mlx5e_priv *priv,
2410                        struct tc_cls_flower_offload *f)
2411 {
2412         struct mlx5e_tc_table *tc = &priv->fs.tc;
2413         struct mlx5e_tc_flow *flow;
2414         struct mlx5_fc *counter;
2415         u64 bytes;
2416         u64 packets;
2417         u64 lastuse;
2418
2419         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2420                                       tc->ht_params);
2421         if (!flow)
2422                 return -EINVAL;
2423
2424         if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
2425                 return 0;
2426
2427         counter = mlx5_flow_rule_counter(flow->rule);
2428         if (!counter)
2429                 return 0;
2430
2431         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
2432
2433         tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
2434
2435         return 0;
2436 }
2437
2438 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
2439         .head_offset = offsetof(struct mlx5e_tc_flow, node),
2440         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2441         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2442         .automatic_shrinking = true,
2443 };
2444
2445 int mlx5e_tc_init(struct mlx5e_priv *priv)
2446 {
2447         struct mlx5e_tc_table *tc = &priv->fs.tc;
2448
2449         hash_init(tc->mod_hdr_tbl);
2450         hash_init(tc->hairpin_tbl);
2451
2452         tc->ht_params = mlx5e_tc_flow_ht_params;
2453         return rhashtable_init(&tc->ht, &tc->ht_params);
2454 }
2455
2456 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
2457 {
2458         struct mlx5e_tc_flow *flow = ptr;
2459         struct mlx5e_priv *priv = arg;
2460
2461         mlx5e_tc_del_flow(priv, flow);
2462         kfree(flow);
2463 }
2464
2465 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
2466 {
2467         struct mlx5e_tc_table *tc = &priv->fs.tc;
2468
2469         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
2470
2471         if (!IS_ERR_OR_NULL(tc->t)) {
2472                 mlx5_destroy_flow_table(tc->t);
2473                 tc->t = NULL;
2474         }
2475 }