Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/vxlan.h>
46 #include "en.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "vxlan.h"
50
51 enum {
52         MLX5E_TC_FLOW_ESWITCH   = BIT(0),
53 };
54
55 struct mlx5e_tc_flow {
56         struct rhash_head       node;
57         u64                     cookie;
58         u8                      flags;
59         struct mlx5_flow_handle *rule;
60         struct list_head        encap; /* flows sharing the same encap */
61         struct mlx5_esw_flow_attr *attr;
62 };
63
64 enum {
65         MLX5_HEADER_TYPE_VXLAN = 0x0,
66         MLX5_HEADER_TYPE_NVGRE = 0x1,
67 };
68
69 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
70 #define MLX5E_TC_TABLE_NUM_GROUPS 4
71
72 static struct mlx5_flow_handle *
73 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
74                       struct mlx5_flow_spec *spec,
75                       u32 action, u32 flow_tag)
76 {
77         struct mlx5_core_dev *dev = priv->mdev;
78         struct mlx5_flow_destination dest = { 0 };
79         struct mlx5_flow_act flow_act = {
80                 .action = action,
81                 .flow_tag = flow_tag,
82                 .encap_id = 0,
83         };
84         struct mlx5_fc *counter = NULL;
85         struct mlx5_flow_handle *rule;
86         bool table_created = false;
87
88         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
89                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
90                 dest.ft = priv->fs.vlan.ft.t;
91         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
92                 counter = mlx5_fc_create(dev, true);
93                 if (IS_ERR(counter))
94                         return ERR_CAST(counter);
95
96                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
97                 dest.counter = counter;
98         }
99
100         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
101                 priv->fs.tc.t =
102                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
103                                                             MLX5E_TC_PRIO,
104                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
105                                                             MLX5E_TC_TABLE_NUM_GROUPS,
106                                                             0, 0);
107                 if (IS_ERR(priv->fs.tc.t)) {
108                         netdev_err(priv->netdev,
109                                    "Failed to create tc offload table\n");
110                         rule = ERR_CAST(priv->fs.tc.t);
111                         goto err_create_ft;
112                 }
113
114                 table_created = true;
115         }
116
117         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
118         rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
119
120         if (IS_ERR(rule))
121                 goto err_add_rule;
122
123         return rule;
124
125 err_add_rule:
126         if (table_created) {
127                 mlx5_destroy_flow_table(priv->fs.tc.t);
128                 priv->fs.tc.t = NULL;
129         }
130 err_create_ft:
131         mlx5_fc_destroy(dev, counter);
132
133         return rule;
134 }
135
136 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
137                                   struct mlx5e_tc_flow *flow)
138 {
139         struct mlx5_fc *counter = NULL;
140
141         if (!IS_ERR(flow->rule)) {
142                 counter = mlx5_flow_rule_counter(flow->rule);
143                 mlx5_del_flow_rules(flow->rule);
144                 mlx5_fc_destroy(priv->mdev, counter);
145         }
146
147         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
148                 mlx5_destroy_flow_table(priv->fs.tc.t);
149                 priv->fs.tc.t = NULL;
150         }
151 }
152
153 static struct mlx5_flow_handle *
154 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
155                       struct mlx5_flow_spec *spec,
156                       struct mlx5_esw_flow_attr *attr)
157 {
158         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
159         int err;
160
161         err = mlx5_eswitch_add_vlan_action(esw, attr);
162         if (err)
163                 return ERR_PTR(err);
164
165         return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
166 }
167
168 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
169                                struct mlx5e_tc_flow *flow);
170
171 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
172                                   struct mlx5e_tc_flow *flow)
173 {
174         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
175
176         mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
177
178         mlx5_eswitch_del_vlan_action(esw, flow->attr);
179
180         if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
181                 mlx5e_detach_encap(priv, flow);
182 }
183
184 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
185                                struct mlx5e_tc_flow *flow)
186 {
187         struct list_head *next = flow->encap.next;
188
189         list_del(&flow->encap);
190         if (list_empty(next)) {
191                 struct mlx5_encap_entry *e;
192
193                 e = list_entry(next, struct mlx5_encap_entry, flows);
194                 if (e->n) {
195                         mlx5_encap_dealloc(priv->mdev, e->encap_id);
196                         neigh_release(e->n);
197                 }
198                 hlist_del_rcu(&e->encap_hlist);
199                 kfree(e);
200         }
201 }
202
203 /* we get here also when setting rule to the FW failed, etc. It means that the
204  * flow rule itself might not exist, but some offloading related to the actions
205  * should be cleaned.
206  */
207 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
208                               struct mlx5e_tc_flow *flow)
209 {
210         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
211                 mlx5e_tc_del_fdb_flow(priv, flow);
212         else
213                 mlx5e_tc_del_nic_flow(priv, flow);
214 }
215
216 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
217                              struct tc_cls_flower_offload *f)
218 {
219         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
220                                        outer_headers);
221         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
222                                        outer_headers);
223         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
224                                     misc_parameters);
225         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
226                                     misc_parameters);
227
228         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
229         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
230
231         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
232                 struct flow_dissector_key_keyid *key =
233                         skb_flow_dissector_target(f->dissector,
234                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
235                                                   f->key);
236                 struct flow_dissector_key_keyid *mask =
237                         skb_flow_dissector_target(f->dissector,
238                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
239                                                   f->mask);
240                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
241                          be32_to_cpu(mask->keyid));
242                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
243                          be32_to_cpu(key->keyid));
244         }
245 }
246
247 static int parse_tunnel_attr(struct mlx5e_priv *priv,
248                              struct mlx5_flow_spec *spec,
249                              struct tc_cls_flower_offload *f)
250 {
251         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
252                                        outer_headers);
253         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
254                                        outer_headers);
255
256         struct flow_dissector_key_control *enc_control =
257                 skb_flow_dissector_target(f->dissector,
258                                           FLOW_DISSECTOR_KEY_ENC_CONTROL,
259                                           f->key);
260
261         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
262                 struct flow_dissector_key_ports *key =
263                         skb_flow_dissector_target(f->dissector,
264                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
265                                                   f->key);
266                 struct flow_dissector_key_ports *mask =
267                         skb_flow_dissector_target(f->dissector,
268                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
269                                                   f->mask);
270                 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
271                 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
272                 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
273
274                 /* Full udp dst port must be given */
275                 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
276                         goto vxlan_match_offload_err;
277
278                 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
279                     MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
280                         parse_vxlan_attr(spec, f);
281                 else {
282                         netdev_warn(priv->netdev,
283                                     "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
284                         return -EOPNOTSUPP;
285                 }
286
287                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
288                          udp_dport, ntohs(mask->dst));
289                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
290                          udp_dport, ntohs(key->dst));
291
292                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
293                          udp_sport, ntohs(mask->src));
294                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
295                          udp_sport, ntohs(key->src));
296         } else { /* udp dst port must be given */
297 vxlan_match_offload_err:
298                 netdev_warn(priv->netdev,
299                             "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
300                 return -EOPNOTSUPP;
301         }
302
303         if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
304                 struct flow_dissector_key_ipv4_addrs *key =
305                         skb_flow_dissector_target(f->dissector,
306                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
307                                                   f->key);
308                 struct flow_dissector_key_ipv4_addrs *mask =
309                         skb_flow_dissector_target(f->dissector,
310                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
311                                                   f->mask);
312                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
313                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
314                          ntohl(mask->src));
315                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
316                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
317                          ntohl(key->src));
318
319                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
320                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
321                          ntohl(mask->dst));
322                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
323                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
324                          ntohl(key->dst));
325
326                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
327                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
328         } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
329                 struct flow_dissector_key_ipv6_addrs *key =
330                         skb_flow_dissector_target(f->dissector,
331                                                   FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
332                                                   f->key);
333                 struct flow_dissector_key_ipv6_addrs *mask =
334                         skb_flow_dissector_target(f->dissector,
335                                                   FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
336                                                   f->mask);
337
338                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
339                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
340                        &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
341                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
342                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
343                        &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
344
345                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
346                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
347                        &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
348                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
349                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
350                        &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
351
352                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
353                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
354         }
355
356         /* Enforce DMAC when offloading incoming tunneled flows.
357          * Flow counters require a match on the DMAC.
358          */
359         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
360         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
361         ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
362                                      dmac_47_16), priv->netdev->dev_addr);
363
364         /* let software handle IP fragments */
365         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
366         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
367
368         return 0;
369 }
370
371 static int __parse_cls_flower(struct mlx5e_priv *priv,
372                               struct mlx5_flow_spec *spec,
373                               struct tc_cls_flower_offload *f,
374                               u8 *min_inline)
375 {
376         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
377                                        outer_headers);
378         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
379                                        outer_headers);
380         u16 addr_type = 0;
381         u8 ip_proto = 0;
382
383         *min_inline = MLX5_INLINE_MODE_L2;
384
385         if (f->dissector->used_keys &
386             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
387               BIT(FLOW_DISSECTOR_KEY_BASIC) |
388               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
389               BIT(FLOW_DISSECTOR_KEY_VLAN) |
390               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
391               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
392               BIT(FLOW_DISSECTOR_KEY_PORTS) |
393               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
394               BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
395               BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
396               BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
397               BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
398                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
399                             f->dissector->used_keys);
400                 return -EOPNOTSUPP;
401         }
402
403         if ((dissector_uses_key(f->dissector,
404                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
405              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
406              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
407             dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
408                 struct flow_dissector_key_control *key =
409                         skb_flow_dissector_target(f->dissector,
410                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
411                                                   f->key);
412                 switch (key->addr_type) {
413                 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
414                 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
415                         if (parse_tunnel_attr(priv, spec, f))
416                                 return -EOPNOTSUPP;
417                         break;
418                 default:
419                         return -EOPNOTSUPP;
420                 }
421
422                 /* In decap flow, header pointers should point to the inner
423                  * headers, outer header were already set by parse_tunnel_attr
424                  */
425                 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
426                                          inner_headers);
427                 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
428                                          inner_headers);
429         }
430
431         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
432                 struct flow_dissector_key_control *key =
433                         skb_flow_dissector_target(f->dissector,
434                                                   FLOW_DISSECTOR_KEY_CONTROL,
435                                                   f->key);
436
437                 struct flow_dissector_key_control *mask =
438                         skb_flow_dissector_target(f->dissector,
439                                                   FLOW_DISSECTOR_KEY_CONTROL,
440                                                   f->mask);
441                 addr_type = key->addr_type;
442
443                 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
444                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
445                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
446                                  key->flags & FLOW_DIS_IS_FRAGMENT);
447
448                         /* the HW doesn't need L3 inline to match on frag=no */
449                         if (key->flags & FLOW_DIS_IS_FRAGMENT)
450                                 *min_inline = MLX5_INLINE_MODE_IP;
451                 }
452         }
453
454         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
455                 struct flow_dissector_key_basic *key =
456                         skb_flow_dissector_target(f->dissector,
457                                                   FLOW_DISSECTOR_KEY_BASIC,
458                                                   f->key);
459                 struct flow_dissector_key_basic *mask =
460                         skb_flow_dissector_target(f->dissector,
461                                                   FLOW_DISSECTOR_KEY_BASIC,
462                                                   f->mask);
463                 ip_proto = key->ip_proto;
464
465                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
466                          ntohs(mask->n_proto));
467                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
468                          ntohs(key->n_proto));
469
470                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
471                          mask->ip_proto);
472                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
473                          key->ip_proto);
474
475                 if (mask->ip_proto)
476                         *min_inline = MLX5_INLINE_MODE_IP;
477         }
478
479         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
480                 struct flow_dissector_key_eth_addrs *key =
481                         skb_flow_dissector_target(f->dissector,
482                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
483                                                   f->key);
484                 struct flow_dissector_key_eth_addrs *mask =
485                         skb_flow_dissector_target(f->dissector,
486                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
487                                                   f->mask);
488
489                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
490                                              dmac_47_16),
491                                 mask->dst);
492                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
493                                              dmac_47_16),
494                                 key->dst);
495
496                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
497                                              smac_47_16),
498                                 mask->src);
499                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
500                                              smac_47_16),
501                                 key->src);
502         }
503
504         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
505                 struct flow_dissector_key_vlan *key =
506                         skb_flow_dissector_target(f->dissector,
507                                                   FLOW_DISSECTOR_KEY_VLAN,
508                                                   f->key);
509                 struct flow_dissector_key_vlan *mask =
510                         skb_flow_dissector_target(f->dissector,
511                                                   FLOW_DISSECTOR_KEY_VLAN,
512                                                   f->mask);
513                 if (mask->vlan_id || mask->vlan_priority) {
514                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
515                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
516
517                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
518                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
519
520                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
521                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
522                 }
523         }
524
525         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
526                 struct flow_dissector_key_ipv4_addrs *key =
527                         skb_flow_dissector_target(f->dissector,
528                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
529                                                   f->key);
530                 struct flow_dissector_key_ipv4_addrs *mask =
531                         skb_flow_dissector_target(f->dissector,
532                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
533                                                   f->mask);
534
535                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
536                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
537                        &mask->src, sizeof(mask->src));
538                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
539                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
540                        &key->src, sizeof(key->src));
541                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
542                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
543                        &mask->dst, sizeof(mask->dst));
544                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
545                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
546                        &key->dst, sizeof(key->dst));
547
548                 if (mask->src || mask->dst)
549                         *min_inline = MLX5_INLINE_MODE_IP;
550         }
551
552         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
553                 struct flow_dissector_key_ipv6_addrs *key =
554                         skb_flow_dissector_target(f->dissector,
555                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
556                                                   f->key);
557                 struct flow_dissector_key_ipv6_addrs *mask =
558                         skb_flow_dissector_target(f->dissector,
559                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
560                                                   f->mask);
561
562                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
563                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
564                        &mask->src, sizeof(mask->src));
565                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
566                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
567                        &key->src, sizeof(key->src));
568
569                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
570                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
571                        &mask->dst, sizeof(mask->dst));
572                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
573                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
574                        &key->dst, sizeof(key->dst));
575
576                 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
577                     ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
578                         *min_inline = MLX5_INLINE_MODE_IP;
579         }
580
581         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
582                 struct flow_dissector_key_ports *key =
583                         skb_flow_dissector_target(f->dissector,
584                                                   FLOW_DISSECTOR_KEY_PORTS,
585                                                   f->key);
586                 struct flow_dissector_key_ports *mask =
587                         skb_flow_dissector_target(f->dissector,
588                                                   FLOW_DISSECTOR_KEY_PORTS,
589                                                   f->mask);
590                 switch (ip_proto) {
591                 case IPPROTO_TCP:
592                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
593                                  tcp_sport, ntohs(mask->src));
594                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
595                                  tcp_sport, ntohs(key->src));
596
597                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
598                                  tcp_dport, ntohs(mask->dst));
599                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
600                                  tcp_dport, ntohs(key->dst));
601                         break;
602
603                 case IPPROTO_UDP:
604                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
605                                  udp_sport, ntohs(mask->src));
606                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
607                                  udp_sport, ntohs(key->src));
608
609                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
610                                  udp_dport, ntohs(mask->dst));
611                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
612                                  udp_dport, ntohs(key->dst));
613                         break;
614                 default:
615                         netdev_err(priv->netdev,
616                                    "Only UDP and TCP transport are supported\n");
617                         return -EINVAL;
618                 }
619
620                 if (mask->src || mask->dst)
621                         *min_inline = MLX5_INLINE_MODE_TCP_UDP;
622         }
623
624         return 0;
625 }
626
627 static int parse_cls_flower(struct mlx5e_priv *priv,
628                             struct mlx5e_tc_flow *flow,
629                             struct mlx5_flow_spec *spec,
630                             struct tc_cls_flower_offload *f)
631 {
632         struct mlx5_core_dev *dev = priv->mdev;
633         struct mlx5_eswitch *esw = dev->priv.eswitch;
634         struct mlx5_eswitch_rep *rep = priv->ppriv;
635         u8 min_inline;
636         int err;
637
638         err = __parse_cls_flower(priv, spec, f, &min_inline);
639
640         if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
641             rep->vport != FDB_UPLINK_VPORT) {
642                 if (min_inline > esw->offloads.inline_mode) {
643                         netdev_warn(priv->netdev,
644                                     "Flow is not offloaded due to min inline setting, required %d actual %d\n",
645                                     min_inline, esw->offloads.inline_mode);
646                         return -EOPNOTSUPP;
647                 }
648         }
649
650         return err;
651 }
652
653 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
654                                 u32 *action, u32 *flow_tag)
655 {
656         const struct tc_action *a;
657         LIST_HEAD(actions);
658
659         if (tc_no_actions(exts))
660                 return -EINVAL;
661
662         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
663         *action = 0;
664
665         tcf_exts_to_list(exts, &actions);
666         list_for_each_entry(a, &actions, list) {
667                 /* Only support a single action per rule */
668                 if (*action)
669                         return -EINVAL;
670
671                 if (is_tcf_gact_shot(a)) {
672                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
673                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
674                                                flow_table_properties_nic_receive.flow_counter))
675                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
676                         continue;
677                 }
678
679                 if (is_tcf_skbedit_mark(a)) {
680                         u32 mark = tcf_skbedit_mark(a);
681
682                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
683                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
684                                             mark);
685                                 return -EINVAL;
686                         }
687
688                         *flow_tag = mark;
689                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
690                         continue;
691                 }
692
693                 return -EINVAL;
694         }
695
696         return 0;
697 }
698
699 static inline int cmp_encap_info(struct ip_tunnel_key *a,
700                                  struct ip_tunnel_key *b)
701 {
702         return memcmp(a, b, sizeof(*a));
703 }
704
705 static inline int hash_encap_info(struct ip_tunnel_key *key)
706 {
707         return jhash(key, sizeof(*key), 0);
708 }
709
710 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
711                                    struct net_device *mirred_dev,
712                                    struct net_device **out_dev,
713                                    struct flowi4 *fl4,
714                                    struct neighbour **out_n,
715                                    int *out_ttl)
716 {
717         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
718         struct rtable *rt;
719         struct neighbour *n = NULL;
720
721 #if IS_ENABLED(CONFIG_INET)
722         int ret;
723
724         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
725         ret = PTR_ERR_OR_ZERO(rt);
726         if (ret)
727                 return ret;
728 #else
729         return -EOPNOTSUPP;
730 #endif
731         /* if the egress device isn't on the same HW e-switch, we use the uplink */
732         if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
733                 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
734         else
735                 *out_dev = rt->dst.dev;
736
737         *out_ttl = ip4_dst_hoplimit(&rt->dst);
738         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
739         ip_rt_put(rt);
740         if (!n)
741                 return -ENOMEM;
742
743         *out_n = n;
744         return 0;
745 }
746
747 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
748                                    struct net_device *mirred_dev,
749                                    struct net_device **out_dev,
750                                    struct flowi6 *fl6,
751                                    struct neighbour **out_n,
752                                    int *out_ttl)
753 {
754         struct neighbour *n = NULL;
755         struct dst_entry *dst;
756
757 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
758         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
759         int ret;
760
761         dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
762         ret = dst->error;
763         if (ret) {
764                 dst_release(dst);
765                 return ret;
766         }
767
768         *out_ttl = ip6_dst_hoplimit(dst);
769
770         /* if the egress device isn't on the same HW e-switch, we use the uplink */
771         if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
772                 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
773         else
774                 *out_dev = dst->dev;
775 #else
776         return -EOPNOTSUPP;
777 #endif
778
779         n = dst_neigh_lookup(dst, &fl6->daddr);
780         dst_release(dst);
781         if (!n)
782                 return -ENOMEM;
783
784         *out_n = n;
785         return 0;
786 }
787
788 static int gen_vxlan_header_ipv4(struct net_device *out_dev,
789                                  char buf[],
790                                  unsigned char h_dest[ETH_ALEN],
791                                  int ttl,
792                                  __be32 daddr,
793                                  __be32 saddr,
794                                  __be16 udp_dst_port,
795                                  __be32 vx_vni)
796 {
797         int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
798         struct ethhdr *eth = (struct ethhdr *)buf;
799         struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
800         struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
801         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
802
803         memset(buf, 0, encap_size);
804
805         ether_addr_copy(eth->h_dest, h_dest);
806         ether_addr_copy(eth->h_source, out_dev->dev_addr);
807         eth->h_proto = htons(ETH_P_IP);
808
809         ip->daddr = daddr;
810         ip->saddr = saddr;
811
812         ip->ttl = ttl;
813         ip->protocol = IPPROTO_UDP;
814         ip->version = 0x4;
815         ip->ihl = 0x5;
816
817         udp->dest = udp_dst_port;
818         vxh->vx_flags = VXLAN_HF_VNI;
819         vxh->vx_vni = vxlan_vni_field(vx_vni);
820
821         return encap_size;
822 }
823
824 static int gen_vxlan_header_ipv6(struct net_device *out_dev,
825                                  char buf[],
826                                  unsigned char h_dest[ETH_ALEN],
827                                  int ttl,
828                                  struct in6_addr *daddr,
829                                  struct in6_addr *saddr,
830                                  __be16 udp_dst_port,
831                                  __be32 vx_vni)
832 {
833         int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
834         struct ethhdr *eth = (struct ethhdr *)buf;
835         struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
836         struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
837         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
838
839         memset(buf, 0, encap_size);
840
841         ether_addr_copy(eth->h_dest, h_dest);
842         ether_addr_copy(eth->h_source, out_dev->dev_addr);
843         eth->h_proto = htons(ETH_P_IPV6);
844
845         ip6_flow_hdr(ip6h, 0, 0);
846         /* the HW fills up ipv6 payload len */
847         ip6h->nexthdr     = IPPROTO_UDP;
848         ip6h->hop_limit   = ttl;
849         ip6h->daddr       = *daddr;
850         ip6h->saddr       = *saddr;
851
852         udp->dest = udp_dst_port;
853         vxh->vx_flags = VXLAN_HF_VNI;
854         vxh->vx_vni = vxlan_vni_field(vx_vni);
855
856         return encap_size;
857 }
858
859 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
860                                           struct net_device *mirred_dev,
861                                           struct mlx5_encap_entry *e,
862                                           struct net_device **out_dev)
863 {
864         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
865         struct ip_tunnel_key *tun_key = &e->tun_info.key;
866         int encap_size, ttl, err;
867         struct neighbour *n = NULL;
868         struct flowi4 fl4 = {};
869         char *encap_header;
870
871         encap_header = kzalloc(max_encap_size, GFP_KERNEL);
872         if (!encap_header)
873                 return -ENOMEM;
874
875         switch (e->tunnel_type) {
876         case MLX5_HEADER_TYPE_VXLAN:
877                 fl4.flowi4_proto = IPPROTO_UDP;
878                 fl4.fl4_dport = tun_key->tp_dst;
879                 break;
880         default:
881                 err = -EOPNOTSUPP;
882                 goto out;
883         }
884         fl4.flowi4_tos = tun_key->tos;
885         fl4.daddr = tun_key->u.ipv4.dst;
886         fl4.saddr = tun_key->u.ipv4.src;
887
888         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
889                                       &fl4, &n, &ttl);
890         if (err)
891                 goto out;
892
893         if (!(n->nud_state & NUD_VALID)) {
894                 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
895                 err = -EOPNOTSUPP;
896                 goto out;
897         }
898
899         e->n = n;
900         e->out_dev = *out_dev;
901
902         neigh_ha_snapshot(e->h_dest, n, *out_dev);
903
904         switch (e->tunnel_type) {
905         case MLX5_HEADER_TYPE_VXLAN:
906                 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
907                                                    e->h_dest, ttl,
908                                                    fl4.daddr,
909                                                    fl4.saddr, tun_key->tp_dst,
910                                                    tunnel_id_to_key32(tun_key->tun_id));
911                 break;
912         default:
913                 err = -EOPNOTSUPP;
914                 goto out;
915         }
916
917         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
918                                encap_size, encap_header, &e->encap_id);
919 out:
920         if (err && n)
921                 neigh_release(n);
922         kfree(encap_header);
923         return err;
924 }
925
926 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
927                                           struct net_device *mirred_dev,
928                                           struct mlx5_encap_entry *e,
929                                           struct net_device **out_dev)
930
931 {
932         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
933         struct ip_tunnel_key *tun_key = &e->tun_info.key;
934         int encap_size, err, ttl = 0;
935         struct neighbour *n = NULL;
936         struct flowi6 fl6 = {};
937         char *encap_header;
938
939         encap_header = kzalloc(max_encap_size, GFP_KERNEL);
940         if (!encap_header)
941                 return -ENOMEM;
942
943         switch (e->tunnel_type) {
944         case MLX5_HEADER_TYPE_VXLAN:
945                 fl6.flowi6_proto = IPPROTO_UDP;
946                 fl6.fl6_dport = tun_key->tp_dst;
947                 break;
948         default:
949                 err = -EOPNOTSUPP;
950                 goto out;
951         }
952
953         fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
954         fl6.daddr = tun_key->u.ipv6.dst;
955         fl6.saddr = tun_key->u.ipv6.src;
956
957         err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
958                                       &fl6, &n, &ttl);
959         if (err)
960                 goto out;
961
962         if (!(n->nud_state & NUD_VALID)) {
963                 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
964                 err = -EOPNOTSUPP;
965                 goto out;
966         }
967
968         e->n = n;
969         e->out_dev = *out_dev;
970
971         neigh_ha_snapshot(e->h_dest, n, *out_dev);
972
973         switch (e->tunnel_type) {
974         case MLX5_HEADER_TYPE_VXLAN:
975                 encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
976                                                    e->h_dest, ttl,
977                                                    &fl6.daddr,
978                                                    &fl6.saddr, tun_key->tp_dst,
979                                                    tunnel_id_to_key32(tun_key->tun_id));
980                 break;
981         default:
982                 err = -EOPNOTSUPP;
983                 goto out;
984         }
985
986         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
987                                encap_size, encap_header, &e->encap_id);
988 out:
989         if (err && n)
990                 neigh_release(n);
991         kfree(encap_header);
992         return err;
993 }
994
995 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
996                               struct ip_tunnel_info *tun_info,
997                               struct net_device *mirred_dev,
998                               struct mlx5_esw_flow_attr *attr)
999 {
1000         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1001         struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1002         struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1003         unsigned short family = ip_tunnel_info_af(tun_info);
1004         struct ip_tunnel_key *key = &tun_info->key;
1005         struct mlx5_encap_entry *e;
1006         struct net_device *out_dev;
1007         int tunnel_type, err = -EOPNOTSUPP;
1008         uintptr_t hash_key;
1009         bool found = false;
1010
1011         /* udp dst port must be set */
1012         if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
1013                 goto vxlan_encap_offload_err;
1014
1015         /* setting udp src port isn't supported */
1016         if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1017 vxlan_encap_offload_err:
1018                 netdev_warn(priv->netdev,
1019                             "must set udp dst port and not set udp src port\n");
1020                 return -EOPNOTSUPP;
1021         }
1022
1023         if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
1024             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
1025                 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1026         } else {
1027                 netdev_warn(priv->netdev,
1028                             "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
1029                 return -EOPNOTSUPP;
1030         }
1031
1032         hash_key = hash_encap_info(key);
1033
1034         hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1035                                    encap_hlist, hash_key) {
1036                 if (!cmp_encap_info(&e->tun_info.key, key)) {
1037                         found = true;
1038                         break;
1039                 }
1040         }
1041
1042         if (found) {
1043                 attr->encap = e;
1044                 return 0;
1045         }
1046
1047         e = kzalloc(sizeof(*e), GFP_KERNEL);
1048         if (!e)
1049                 return -ENOMEM;
1050
1051         e->tun_info = *tun_info;
1052         e->tunnel_type = tunnel_type;
1053         INIT_LIST_HEAD(&e->flows);
1054
1055         if (family == AF_INET)
1056                 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
1057         else if (family == AF_INET6)
1058                 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
1059
1060         if (err)
1061                 goto out_err;
1062
1063         attr->encap = e;
1064         hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1065
1066         return err;
1067
1068 out_err:
1069         kfree(e);
1070         return err;
1071 }
1072
1073 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1074                                 struct mlx5e_tc_flow *flow)
1075 {
1076         struct mlx5_esw_flow_attr *attr = flow->attr;
1077         struct ip_tunnel_info *info = NULL;
1078         const struct tc_action *a;
1079         LIST_HEAD(actions);
1080         bool encap = false;
1081         int err;
1082
1083         if (tc_no_actions(exts))
1084                 return -EINVAL;
1085
1086         memset(attr, 0, sizeof(*attr));
1087         attr->in_rep = priv->ppriv;
1088
1089         tcf_exts_to_list(exts, &actions);
1090         list_for_each_entry(a, &actions, list) {
1091                 if (is_tcf_gact_shot(a)) {
1092                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1093                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
1094                         continue;
1095                 }
1096
1097                 if (is_tcf_mirred_egress_redirect(a)) {
1098                         int ifindex = tcf_mirred_ifindex(a);
1099                         struct net_device *out_dev;
1100                         struct mlx5e_priv *out_priv;
1101
1102                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1103
1104                         if (switchdev_port_same_parent_id(priv->netdev,
1105                                                           out_dev)) {
1106                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1107                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
1108                                 out_priv = netdev_priv(out_dev);
1109                                 attr->out_rep = out_priv->ppriv;
1110                         } else if (encap) {
1111                                 err = mlx5e_attach_encap(priv, info,
1112                                                          out_dev, attr);
1113                                 if (err)
1114                                         return err;
1115                                 list_add(&flow->encap, &attr->encap->flows);
1116                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1117                                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1118                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
1119                                 out_priv = netdev_priv(attr->encap->out_dev);
1120                                 attr->out_rep = out_priv->ppriv;
1121                         } else {
1122                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1123                                        priv->netdev->name, out_dev->name);
1124                                 return -EINVAL;
1125                         }
1126                         continue;
1127                 }
1128
1129                 if (is_tcf_tunnel_set(a)) {
1130                         info = tcf_tunnel_info(a);
1131                         if (info)
1132                                 encap = true;
1133                         else
1134                                 return -EOPNOTSUPP;
1135                         continue;
1136                 }
1137
1138                 if (is_tcf_vlan(a)) {
1139                         if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
1140                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
1141                         } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
1142                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1143                                         return -EOPNOTSUPP;
1144
1145                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1146                                 attr->vlan = tcf_vlan_push_vid(a);
1147                         } else { /* action is TCA_VLAN_ACT_MODIFY */
1148                                 return -EOPNOTSUPP;
1149                         }
1150                         continue;
1151                 }
1152
1153                 if (is_tcf_tunnel_release(a)) {
1154                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1155                         continue;
1156                 }
1157
1158                 return -EINVAL;
1159         }
1160         return 0;
1161 }
1162
1163 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1164                            struct tc_cls_flower_offload *f)
1165 {
1166         struct mlx5e_tc_table *tc = &priv->fs.tc;
1167         int err, attr_size = 0;
1168         u32 flow_tag, action;
1169         struct mlx5e_tc_flow *flow;
1170         struct mlx5_flow_spec *spec;
1171         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1172         u8 flow_flags = 0;
1173
1174         if (esw && esw->mode == SRIOV_OFFLOADS) {
1175                 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1176                 attr_size  = sizeof(struct mlx5_esw_flow_attr);
1177         }
1178
1179         flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1180         spec = mlx5_vzalloc(sizeof(*spec));
1181         if (!spec || !flow) {
1182                 err = -ENOMEM;
1183                 goto err_free;
1184         }
1185
1186         flow->cookie = f->cookie;
1187         flow->flags = flow_flags;
1188
1189         err = parse_cls_flower(priv, flow, spec, f);
1190         if (err < 0)
1191                 goto err_free;
1192
1193         if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1194                 flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
1195                 err = parse_tc_fdb_actions(priv, f->exts, flow);
1196                 if (err < 0)
1197                         goto err_free;
1198                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
1199         } else {
1200                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1201                 if (err < 0)
1202                         goto err_free;
1203                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1204         }
1205
1206         if (IS_ERR(flow->rule)) {
1207                 err = PTR_ERR(flow->rule);
1208                 goto err_del_rule;
1209         }
1210
1211         err = rhashtable_insert_fast(&tc->ht, &flow->node,
1212                                      tc->ht_params);
1213         if (err)
1214                 goto err_del_rule;
1215
1216         goto out;
1217
1218 err_del_rule:
1219         mlx5e_tc_del_flow(priv, flow);
1220
1221 err_free:
1222         kfree(flow);
1223 out:
1224         kvfree(spec);
1225         return err;
1226 }
1227
1228 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1229                         struct tc_cls_flower_offload *f)
1230 {
1231         struct mlx5e_tc_flow *flow;
1232         struct mlx5e_tc_table *tc = &priv->fs.tc;
1233
1234         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1235                                       tc->ht_params);
1236         if (!flow)
1237                 return -EINVAL;
1238
1239         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1240
1241         mlx5e_tc_del_flow(priv, flow);
1242
1243
1244         kfree(flow);
1245
1246         return 0;
1247 }
1248
1249 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1250                        struct tc_cls_flower_offload *f)
1251 {
1252         struct mlx5e_tc_table *tc = &priv->fs.tc;
1253         struct mlx5e_tc_flow *flow;
1254         struct tc_action *a;
1255         struct mlx5_fc *counter;
1256         LIST_HEAD(actions);
1257         u64 bytes;
1258         u64 packets;
1259         u64 lastuse;
1260
1261         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1262                                       tc->ht_params);
1263         if (!flow)
1264                 return -EINVAL;
1265
1266         counter = mlx5_flow_rule_counter(flow->rule);
1267         if (!counter)
1268                 return 0;
1269
1270         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1271
1272         preempt_disable();
1273
1274         tcf_exts_to_list(f->exts, &actions);
1275         list_for_each_entry(a, &actions, list)
1276                 tcf_action_stats_update(a, bytes, packets, lastuse);
1277
1278         preempt_enable();
1279
1280         return 0;
1281 }
1282
1283 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1284         .head_offset = offsetof(struct mlx5e_tc_flow, node),
1285         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1286         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1287         .automatic_shrinking = true,
1288 };
1289
1290 int mlx5e_tc_init(struct mlx5e_priv *priv)
1291 {
1292         struct mlx5e_tc_table *tc = &priv->fs.tc;
1293
1294         tc->ht_params = mlx5e_tc_flow_ht_params;
1295         return rhashtable_init(&tc->ht, &tc->ht_params);
1296 }
1297
1298 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1299 {
1300         struct mlx5e_tc_flow *flow = ptr;
1301         struct mlx5e_priv *priv = arg;
1302
1303         mlx5e_tc_del_flow(priv, flow);
1304         kfree(flow);
1305 }
1306
1307 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1308 {
1309         struct mlx5e_tc_table *tc = &priv->fs.tc;
1310
1311         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1312
1313         if (!IS_ERR_OR_NULL(tc->t)) {
1314                 mlx5_destroy_flow_table(tc->t);
1315                 tc->t = NULL;
1316         }
1317 }