Merge tag 'platform-drivers-x86-v4.12-2' of git://git.infradead.org/linux-platform...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
48 #include <net/arp.h>
49 #include "en.h"
50 #include "en_rep.h"
51 #include "en_tc.h"
52 #include "eswitch.h"
53 #include "vxlan.h"
54
55 struct mlx5_nic_flow_attr {
56         u32 action;
57         u32 flow_tag;
58         u32 mod_hdr_id;
59 };
60
61 enum {
62         MLX5E_TC_FLOW_ESWITCH   = BIT(0),
63         MLX5E_TC_FLOW_NIC       = BIT(1),
64         MLX5E_TC_FLOW_OFFLOADED = BIT(2),
65 };
66
67 struct mlx5e_tc_flow {
68         struct rhash_head       node;
69         u64                     cookie;
70         u8                      flags;
71         struct mlx5_flow_handle *rule;
72         struct list_head        encap; /* flows sharing the same encap */
73         union {
74                 struct mlx5_esw_flow_attr esw_attr[0];
75                 struct mlx5_nic_flow_attr nic_attr[0];
76         };
77 };
78
79 struct mlx5e_tc_flow_parse_attr {
80         struct mlx5_flow_spec spec;
81         int num_mod_hdr_actions;
82         void *mod_hdr_actions;
83 };
84
85 enum {
86         MLX5_HEADER_TYPE_VXLAN = 0x0,
87         MLX5_HEADER_TYPE_NVGRE = 0x1,
88 };
89
90 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
91 #define MLX5E_TC_TABLE_NUM_GROUPS 4
92
93 static struct mlx5_flow_handle *
94 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
95                       struct mlx5e_tc_flow_parse_attr *parse_attr,
96                       struct mlx5e_tc_flow *flow)
97 {
98         struct mlx5_nic_flow_attr *attr = flow->nic_attr;
99         struct mlx5_core_dev *dev = priv->mdev;
100         struct mlx5_flow_destination dest = {};
101         struct mlx5_flow_act flow_act = {
102                 .action = attr->action,
103                 .flow_tag = attr->flow_tag,
104                 .encap_id = 0,
105         };
106         struct mlx5_fc *counter = NULL;
107         struct mlx5_flow_handle *rule;
108         bool table_created = false;
109         int err;
110
111         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
112                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
113                 dest.ft = priv->fs.vlan.ft.t;
114         } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
115                 counter = mlx5_fc_create(dev, true);
116                 if (IS_ERR(counter))
117                         return ERR_CAST(counter);
118
119                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
120                 dest.counter = counter;
121         }
122
123         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
124                 err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
125                                                parse_attr->num_mod_hdr_actions,
126                                                parse_attr->mod_hdr_actions,
127                                                &attr->mod_hdr_id);
128                 flow_act.modify_id = attr->mod_hdr_id;
129                 kfree(parse_attr->mod_hdr_actions);
130                 if (err) {
131                         rule = ERR_PTR(err);
132                         goto err_create_mod_hdr_id;
133                 }
134         }
135
136         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
137                 priv->fs.tc.t =
138                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
139                                                             MLX5E_TC_PRIO,
140                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
141                                                             MLX5E_TC_TABLE_NUM_GROUPS,
142                                                             0, 0);
143                 if (IS_ERR(priv->fs.tc.t)) {
144                         netdev_err(priv->netdev,
145                                    "Failed to create tc offload table\n");
146                         rule = ERR_CAST(priv->fs.tc.t);
147                         goto err_create_ft;
148                 }
149
150                 table_created = true;
151         }
152
153         parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
154         rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
155                                    &flow_act, &dest, 1);
156
157         if (IS_ERR(rule))
158                 goto err_add_rule;
159
160         return rule;
161
162 err_add_rule:
163         if (table_created) {
164                 mlx5_destroy_flow_table(priv->fs.tc.t);
165                 priv->fs.tc.t = NULL;
166         }
167 err_create_ft:
168         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
169                 mlx5_modify_header_dealloc(priv->mdev,
170                                            attr->mod_hdr_id);
171 err_create_mod_hdr_id:
172         mlx5_fc_destroy(dev, counter);
173
174         return rule;
175 }
176
177 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
178                                   struct mlx5e_tc_flow *flow)
179 {
180         struct mlx5_fc *counter = NULL;
181
182         counter = mlx5_flow_rule_counter(flow->rule);
183         mlx5_del_flow_rules(flow->rule);
184         mlx5_fc_destroy(priv->mdev, counter);
185
186         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
187                 mlx5_destroy_flow_table(priv->fs.tc.t);
188                 priv->fs.tc.t = NULL;
189         }
190
191         if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
192                 mlx5_modify_header_dealloc(priv->mdev,
193                                            flow->nic_attr->mod_hdr_id);
194 }
195
196 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
197                                struct mlx5e_tc_flow *flow);
198
199 static struct mlx5_flow_handle *
200 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
201                       struct mlx5e_tc_flow_parse_attr *parse_attr,
202                       struct mlx5e_tc_flow *flow)
203 {
204         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
205         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
206         struct mlx5_flow_handle *rule;
207         int err;
208
209         err = mlx5_eswitch_add_vlan_action(esw, attr);
210         if (err) {
211                 rule = ERR_PTR(err);
212                 goto err_add_vlan;
213         }
214
215         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
216                 err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
217                                                parse_attr->num_mod_hdr_actions,
218                                                parse_attr->mod_hdr_actions,
219                                                &attr->mod_hdr_id);
220                 kfree(parse_attr->mod_hdr_actions);
221                 if (err) {
222                         rule = ERR_PTR(err);
223                         goto err_mod_hdr;
224                 }
225         }
226
227         rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
228         if (IS_ERR(rule))
229                 goto err_add_rule;
230
231         return rule;
232
233 err_add_rule:
234         if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
235                 mlx5_modify_header_dealloc(priv->mdev,
236                                            attr->mod_hdr_id);
237 err_mod_hdr:
238         mlx5_eswitch_del_vlan_action(esw, attr);
239 err_add_vlan:
240         if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
241                 mlx5e_detach_encap(priv, flow);
242         return rule;
243 }
244
245 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
246                                   struct mlx5e_tc_flow *flow)
247 {
248         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
249         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
250
251         if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
252                 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
253                 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
254         }
255
256         mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
257
258         if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
259                 mlx5e_detach_encap(priv, flow);
260                 kvfree(flow->esw_attr->parse_attr);
261         }
262
263         if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
264                 mlx5_modify_header_dealloc(priv->mdev,
265                                            attr->mod_hdr_id);
266 }
267
268 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
269                               struct mlx5e_encap_entry *e)
270 {
271         struct mlx5e_tc_flow *flow;
272         int err;
273
274         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
275                                e->encap_size, e->encap_header,
276                                &e->encap_id);
277         if (err) {
278                 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
279                                err);
280                 return;
281         }
282         e->flags |= MLX5_ENCAP_ENTRY_VALID;
283         mlx5e_rep_queue_neigh_stats_work(priv);
284
285         list_for_each_entry(flow, &e->flows, encap) {
286                 flow->esw_attr->encap_id = e->encap_id;
287                 flow->rule = mlx5e_tc_add_fdb_flow(priv,
288                                                    flow->esw_attr->parse_attr,
289                                                    flow);
290                 if (IS_ERR(flow->rule)) {
291                         err = PTR_ERR(flow->rule);
292                         mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
293                                        err);
294                         continue;
295                 }
296                 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
297         }
298 }
299
300 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
301                               struct mlx5e_encap_entry *e)
302 {
303         struct mlx5e_tc_flow *flow;
304         struct mlx5_fc *counter;
305
306         list_for_each_entry(flow, &e->flows, encap) {
307                 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
308                         flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
309                         counter = mlx5_flow_rule_counter(flow->rule);
310                         mlx5_del_flow_rules(flow->rule);
311                         mlx5_fc_destroy(priv->mdev, counter);
312                 }
313         }
314
315         if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
316                 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
317                 mlx5_encap_dealloc(priv->mdev, e->encap_id);
318         }
319 }
320
321 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
322 {
323         struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
324         u64 bytes, packets, lastuse = 0;
325         struct mlx5e_tc_flow *flow;
326         struct mlx5e_encap_entry *e;
327         struct mlx5_fc *counter;
328         struct neigh_table *tbl;
329         bool neigh_used = false;
330         struct neighbour *n;
331
332         if (m_neigh->family == AF_INET)
333                 tbl = &arp_tbl;
334 #if IS_ENABLED(CONFIG_IPV6)
335         else if (m_neigh->family == AF_INET6)
336                 tbl = ipv6_stub->nd_tbl;
337 #endif
338         else
339                 return;
340
341         list_for_each_entry(e, &nhe->encap_list, encap_list) {
342                 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
343                         continue;
344                 list_for_each_entry(flow, &e->flows, encap) {
345                         if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
346                                 counter = mlx5_flow_rule_counter(flow->rule);
347                                 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
348                                 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
349                                         neigh_used = true;
350                                         break;
351                                 }
352                         }
353                 }
354         }
355
356         if (neigh_used) {
357                 nhe->reported_lastuse = jiffies;
358
359                 /* find the relevant neigh according to the cached device and
360                  * dst ip pair
361                  */
362                 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
363                 if (!n) {
364                         WARN(1, "The neighbour already freed\n");
365                         return;
366                 }
367
368                 neigh_event_send(n, NULL);
369                 neigh_release(n);
370         }
371 }
372
373 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
374                                struct mlx5e_tc_flow *flow)
375 {
376         struct list_head *next = flow->encap.next;
377
378         list_del(&flow->encap);
379         if (list_empty(next)) {
380                 struct mlx5e_encap_entry *e;
381
382                 e = list_entry(next, struct mlx5e_encap_entry, flows);
383                 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
384
385                 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
386                         mlx5_encap_dealloc(priv->mdev, e->encap_id);
387
388                 hash_del_rcu(&e->encap_hlist);
389                 kfree(e->encap_header);
390                 kfree(e);
391         }
392 }
393
394 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
395                               struct mlx5e_tc_flow *flow)
396 {
397         if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
398                 mlx5e_tc_del_fdb_flow(priv, flow);
399         else
400                 mlx5e_tc_del_nic_flow(priv, flow);
401 }
402
403 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
404                              struct tc_cls_flower_offload *f)
405 {
406         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
407                                        outer_headers);
408         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
409                                        outer_headers);
410         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
411                                     misc_parameters);
412         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
413                                     misc_parameters);
414
415         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
416         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
417
418         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
419                 struct flow_dissector_key_keyid *key =
420                         skb_flow_dissector_target(f->dissector,
421                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
422                                                   f->key);
423                 struct flow_dissector_key_keyid *mask =
424                         skb_flow_dissector_target(f->dissector,
425                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
426                                                   f->mask);
427                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
428                          be32_to_cpu(mask->keyid));
429                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
430                          be32_to_cpu(key->keyid));
431         }
432 }
433
434 static int parse_tunnel_attr(struct mlx5e_priv *priv,
435                              struct mlx5_flow_spec *spec,
436                              struct tc_cls_flower_offload *f)
437 {
438         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
439                                        outer_headers);
440         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
441                                        outer_headers);
442
443         struct flow_dissector_key_control *enc_control =
444                 skb_flow_dissector_target(f->dissector,
445                                           FLOW_DISSECTOR_KEY_ENC_CONTROL,
446                                           f->key);
447
448         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
449                 struct flow_dissector_key_ports *key =
450                         skb_flow_dissector_target(f->dissector,
451                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
452                                                   f->key);
453                 struct flow_dissector_key_ports *mask =
454                         skb_flow_dissector_target(f->dissector,
455                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
456                                                   f->mask);
457                 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
458                 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
459                 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
460
461                 /* Full udp dst port must be given */
462                 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
463                         goto vxlan_match_offload_err;
464
465                 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
466                     MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
467                         parse_vxlan_attr(spec, f);
468                 else {
469                         netdev_warn(priv->netdev,
470                                     "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
471                         return -EOPNOTSUPP;
472                 }
473
474                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
475                          udp_dport, ntohs(mask->dst));
476                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
477                          udp_dport, ntohs(key->dst));
478
479                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
480                          udp_sport, ntohs(mask->src));
481                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
482                          udp_sport, ntohs(key->src));
483         } else { /* udp dst port must be given */
484 vxlan_match_offload_err:
485                 netdev_warn(priv->netdev,
486                             "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
487                 return -EOPNOTSUPP;
488         }
489
490         if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
491                 struct flow_dissector_key_ipv4_addrs *key =
492                         skb_flow_dissector_target(f->dissector,
493                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
494                                                   f->key);
495                 struct flow_dissector_key_ipv4_addrs *mask =
496                         skb_flow_dissector_target(f->dissector,
497                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
498                                                   f->mask);
499                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
500                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
501                          ntohl(mask->src));
502                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
503                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
504                          ntohl(key->src));
505
506                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
507                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
508                          ntohl(mask->dst));
509                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
510                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
511                          ntohl(key->dst));
512
513                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
514                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
515         } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
516                 struct flow_dissector_key_ipv6_addrs *key =
517                         skb_flow_dissector_target(f->dissector,
518                                                   FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
519                                                   f->key);
520                 struct flow_dissector_key_ipv6_addrs *mask =
521                         skb_flow_dissector_target(f->dissector,
522                                                   FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
523                                                   f->mask);
524
525                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
526                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
527                        &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
528                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
529                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
530                        &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
531
532                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
533                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
534                        &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
535                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
536                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
537                        &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
538
539                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
540                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
541         }
542
543         /* Enforce DMAC when offloading incoming tunneled flows.
544          * Flow counters require a match on the DMAC.
545          */
546         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
547         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
548         ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
549                                      dmac_47_16), priv->netdev->dev_addr);
550
551         /* let software handle IP fragments */
552         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
553         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
554
555         return 0;
556 }
557
558 static int __parse_cls_flower(struct mlx5e_priv *priv,
559                               struct mlx5_flow_spec *spec,
560                               struct tc_cls_flower_offload *f,
561                               u8 *min_inline)
562 {
563         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
564                                        outer_headers);
565         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
566                                        outer_headers);
567         u16 addr_type = 0;
568         u8 ip_proto = 0;
569
570         *min_inline = MLX5_INLINE_MODE_L2;
571
572         if (f->dissector->used_keys &
573             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
574               BIT(FLOW_DISSECTOR_KEY_BASIC) |
575               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
576               BIT(FLOW_DISSECTOR_KEY_VLAN) |
577               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
578               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
579               BIT(FLOW_DISSECTOR_KEY_PORTS) |
580               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
581               BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
582               BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
583               BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
584               BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
585                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
586                             f->dissector->used_keys);
587                 return -EOPNOTSUPP;
588         }
589
590         if ((dissector_uses_key(f->dissector,
591                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
592              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
593              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
594             dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
595                 struct flow_dissector_key_control *key =
596                         skb_flow_dissector_target(f->dissector,
597                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
598                                                   f->key);
599                 switch (key->addr_type) {
600                 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
601                 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
602                         if (parse_tunnel_attr(priv, spec, f))
603                                 return -EOPNOTSUPP;
604                         break;
605                 default:
606                         return -EOPNOTSUPP;
607                 }
608
609                 /* In decap flow, header pointers should point to the inner
610                  * headers, outer header were already set by parse_tunnel_attr
611                  */
612                 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
613                                          inner_headers);
614                 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
615                                          inner_headers);
616         }
617
618         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
619                 struct flow_dissector_key_control *key =
620                         skb_flow_dissector_target(f->dissector,
621                                                   FLOW_DISSECTOR_KEY_CONTROL,
622                                                   f->key);
623
624                 struct flow_dissector_key_control *mask =
625                         skb_flow_dissector_target(f->dissector,
626                                                   FLOW_DISSECTOR_KEY_CONTROL,
627                                                   f->mask);
628                 addr_type = key->addr_type;
629
630                 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
631                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
632                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
633                                  key->flags & FLOW_DIS_IS_FRAGMENT);
634
635                         /* the HW doesn't need L3 inline to match on frag=no */
636                         if (key->flags & FLOW_DIS_IS_FRAGMENT)
637                                 *min_inline = MLX5_INLINE_MODE_IP;
638                 }
639         }
640
641         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
642                 struct flow_dissector_key_basic *key =
643                         skb_flow_dissector_target(f->dissector,
644                                                   FLOW_DISSECTOR_KEY_BASIC,
645                                                   f->key);
646                 struct flow_dissector_key_basic *mask =
647                         skb_flow_dissector_target(f->dissector,
648                                                   FLOW_DISSECTOR_KEY_BASIC,
649                                                   f->mask);
650                 ip_proto = key->ip_proto;
651
652                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
653                          ntohs(mask->n_proto));
654                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
655                          ntohs(key->n_proto));
656
657                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
658                          mask->ip_proto);
659                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
660                          key->ip_proto);
661
662                 if (mask->ip_proto)
663                         *min_inline = MLX5_INLINE_MODE_IP;
664         }
665
666         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
667                 struct flow_dissector_key_eth_addrs *key =
668                         skb_flow_dissector_target(f->dissector,
669                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
670                                                   f->key);
671                 struct flow_dissector_key_eth_addrs *mask =
672                         skb_flow_dissector_target(f->dissector,
673                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
674                                                   f->mask);
675
676                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
677                                              dmac_47_16),
678                                 mask->dst);
679                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
680                                              dmac_47_16),
681                                 key->dst);
682
683                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
684                                              smac_47_16),
685                                 mask->src);
686                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
687                                              smac_47_16),
688                                 key->src);
689         }
690
691         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
692                 struct flow_dissector_key_vlan *key =
693                         skb_flow_dissector_target(f->dissector,
694                                                   FLOW_DISSECTOR_KEY_VLAN,
695                                                   f->key);
696                 struct flow_dissector_key_vlan *mask =
697                         skb_flow_dissector_target(f->dissector,
698                                                   FLOW_DISSECTOR_KEY_VLAN,
699                                                   f->mask);
700                 if (mask->vlan_id || mask->vlan_priority) {
701                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
702                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
703
704                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
705                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
706
707                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
708                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
709                 }
710         }
711
712         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
713                 struct flow_dissector_key_ipv4_addrs *key =
714                         skb_flow_dissector_target(f->dissector,
715                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
716                                                   f->key);
717                 struct flow_dissector_key_ipv4_addrs *mask =
718                         skb_flow_dissector_target(f->dissector,
719                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
720                                                   f->mask);
721
722                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
723                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
724                        &mask->src, sizeof(mask->src));
725                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
726                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
727                        &key->src, sizeof(key->src));
728                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
729                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
730                        &mask->dst, sizeof(mask->dst));
731                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
732                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
733                        &key->dst, sizeof(key->dst));
734
735                 if (mask->src || mask->dst)
736                         *min_inline = MLX5_INLINE_MODE_IP;
737         }
738
739         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
740                 struct flow_dissector_key_ipv6_addrs *key =
741                         skb_flow_dissector_target(f->dissector,
742                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
743                                                   f->key);
744                 struct flow_dissector_key_ipv6_addrs *mask =
745                         skb_flow_dissector_target(f->dissector,
746                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
747                                                   f->mask);
748
749                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
750                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
751                        &mask->src, sizeof(mask->src));
752                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
753                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
754                        &key->src, sizeof(key->src));
755
756                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
757                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
758                        &mask->dst, sizeof(mask->dst));
759                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
760                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
761                        &key->dst, sizeof(key->dst));
762
763                 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
764                     ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
765                         *min_inline = MLX5_INLINE_MODE_IP;
766         }
767
768         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
769                 struct flow_dissector_key_ports *key =
770                         skb_flow_dissector_target(f->dissector,
771                                                   FLOW_DISSECTOR_KEY_PORTS,
772                                                   f->key);
773                 struct flow_dissector_key_ports *mask =
774                         skb_flow_dissector_target(f->dissector,
775                                                   FLOW_DISSECTOR_KEY_PORTS,
776                                                   f->mask);
777                 switch (ip_proto) {
778                 case IPPROTO_TCP:
779                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
780                                  tcp_sport, ntohs(mask->src));
781                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
782                                  tcp_sport, ntohs(key->src));
783
784                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
785                                  tcp_dport, ntohs(mask->dst));
786                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
787                                  tcp_dport, ntohs(key->dst));
788                         break;
789
790                 case IPPROTO_UDP:
791                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
792                                  udp_sport, ntohs(mask->src));
793                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
794                                  udp_sport, ntohs(key->src));
795
796                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
797                                  udp_dport, ntohs(mask->dst));
798                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
799                                  udp_dport, ntohs(key->dst));
800                         break;
801                 default:
802                         netdev_err(priv->netdev,
803                                    "Only UDP and TCP transport are supported\n");
804                         return -EINVAL;
805                 }
806
807                 if (mask->src || mask->dst)
808                         *min_inline = MLX5_INLINE_MODE_TCP_UDP;
809         }
810
811         return 0;
812 }
813
814 static int parse_cls_flower(struct mlx5e_priv *priv,
815                             struct mlx5e_tc_flow *flow,
816                             struct mlx5_flow_spec *spec,
817                             struct tc_cls_flower_offload *f)
818 {
819         struct mlx5_core_dev *dev = priv->mdev;
820         struct mlx5_eswitch *esw = dev->priv.eswitch;
821         struct mlx5e_rep_priv *rpriv = priv->ppriv;
822         struct mlx5_eswitch_rep *rep;
823         u8 min_inline;
824         int err;
825
826         err = __parse_cls_flower(priv, spec, f, &min_inline);
827
828         if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
829                 rep = rpriv->rep;
830                 if (rep->vport != FDB_UPLINK_VPORT &&
831                     (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
832                     esw->offloads.inline_mode < min_inline)) {
833                         netdev_warn(priv->netdev,
834                                     "Flow is not offloaded due to min inline setting, required %d actual %d\n",
835                                     min_inline, esw->offloads.inline_mode);
836                         return -EOPNOTSUPP;
837                 }
838         }
839
840         return err;
841 }
842
843 struct pedit_headers {
844         struct ethhdr  eth;
845         struct iphdr   ip4;
846         struct ipv6hdr ip6;
847         struct tcphdr  tcp;
848         struct udphdr  udp;
849 };
850
851 static int pedit_header_offsets[] = {
852         [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
853         [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
854         [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
855         [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
856         [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
857 };
858
859 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
860
861 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
862                          struct pedit_headers *masks,
863                          struct pedit_headers *vals)
864 {
865         u32 *curr_pmask, *curr_pval;
866
867         if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
868                 goto out_err;
869
870         curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
871         curr_pval  = (u32 *)(pedit_header(vals, hdr_type) + offset);
872
873         if (*curr_pmask & mask)  /* disallow acting twice on the same location */
874                 goto out_err;
875
876         *curr_pmask |= mask;
877         *curr_pval  |= (val & mask);
878
879         return 0;
880
881 out_err:
882         return -EOPNOTSUPP;
883 }
884
885 struct mlx5_fields {
886         u8  field;
887         u8  size;
888         u32 offset;
889 };
890
891 static struct mlx5_fields fields[] = {
892         {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
893         {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0,  2, offsetof(struct pedit_headers, eth.h_dest[4])},
894         {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
895         {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0,  2, offsetof(struct pedit_headers, eth.h_source[4])},
896         {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE,  2, offsetof(struct pedit_headers, eth.h_proto)},
897
898         {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
899         {MLX5_ACTION_IN_FIELD_OUT_IP_TTL,  1, offsetof(struct pedit_headers, ip4.ttl)},
900         {MLX5_ACTION_IN_FIELD_OUT_SIPV4,   4, offsetof(struct pedit_headers, ip4.saddr)},
901         {MLX5_ACTION_IN_FIELD_OUT_DIPV4,   4, offsetof(struct pedit_headers, ip4.daddr)},
902
903         {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
904         {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64,  4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
905         {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32,  4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
906         {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0,   4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
907         {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
908         {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64,  4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
909         {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32,  4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
910         {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0,   4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
911
912         {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
913         {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
914         {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
915
916         {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
917         {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
918 };
919
920 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
921  * max from the SW pedit action. On success, it says how many HW actions were
922  * actually parsed.
923  */
924 static int offload_pedit_fields(struct pedit_headers *masks,
925                                 struct pedit_headers *vals,
926                                 struct mlx5e_tc_flow_parse_attr *parse_attr)
927 {
928         struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
929         int i, action_size, nactions, max_actions, first, last, first_z;
930         void *s_masks_p, *a_masks_p, *vals_p;
931         struct mlx5_fields *f;
932         u8 cmd, field_bsize;
933         u32 s_mask, a_mask;
934         unsigned long mask;
935         void *action;
936
937         set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
938         add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
939         set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
940         add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
941
942         action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
943         action = parse_attr->mod_hdr_actions;
944         max_actions = parse_attr->num_mod_hdr_actions;
945         nactions = 0;
946
947         for (i = 0; i < ARRAY_SIZE(fields); i++) {
948                 f = &fields[i];
949                 /* avoid seeing bits set from previous iterations */
950                 s_mask = 0;
951                 a_mask = 0;
952
953                 s_masks_p = (void *)set_masks + f->offset;
954                 a_masks_p = (void *)add_masks + f->offset;
955
956                 memcpy(&s_mask, s_masks_p, f->size);
957                 memcpy(&a_mask, a_masks_p, f->size);
958
959                 if (!s_mask && !a_mask) /* nothing to offload here */
960                         continue;
961
962                 if (s_mask && a_mask) {
963                         printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
964                         return -EOPNOTSUPP;
965                 }
966
967                 if (nactions == max_actions) {
968                         printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
969                         return -EOPNOTSUPP;
970                 }
971
972                 if (s_mask) {
973                         cmd  = MLX5_ACTION_TYPE_SET;
974                         mask = s_mask;
975                         vals_p = (void *)set_vals + f->offset;
976                         /* clear to denote we consumed this field */
977                         memset(s_masks_p, 0, f->size);
978                 } else {
979                         cmd  = MLX5_ACTION_TYPE_ADD;
980                         mask = a_mask;
981                         vals_p = (void *)add_vals + f->offset;
982                         /* clear to denote we consumed this field */
983                         memset(a_masks_p, 0, f->size);
984                 }
985
986                 field_bsize = f->size * BITS_PER_BYTE;
987
988                 first_z = find_first_zero_bit(&mask, field_bsize);
989                 first = find_first_bit(&mask, field_bsize);
990                 last  = find_last_bit(&mask, field_bsize);
991                 if (first > 0 || last != (field_bsize - 1) || first_z < last) {
992                         printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
993                                mask);
994                         return -EOPNOTSUPP;
995                 }
996
997                 MLX5_SET(set_action_in, action, action_type, cmd);
998                 MLX5_SET(set_action_in, action, field, f->field);
999
1000                 if (cmd == MLX5_ACTION_TYPE_SET) {
1001                         MLX5_SET(set_action_in, action, offset, 0);
1002                         /* length is num of bits to be written, zero means length of 32 */
1003                         MLX5_SET(set_action_in, action, length, field_bsize);
1004                 }
1005
1006                 if (field_bsize == 32)
1007                         MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
1008                 else if (field_bsize == 16)
1009                         MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
1010                 else if (field_bsize == 8)
1011                         MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
1012
1013                 action += action_size;
1014                 nactions++;
1015         }
1016
1017         parse_attr->num_mod_hdr_actions = nactions;
1018         return 0;
1019 }
1020
1021 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1022                                  const struct tc_action *a, int namespace,
1023                                  struct mlx5e_tc_flow_parse_attr *parse_attr)
1024 {
1025         int nkeys, action_size, max_actions;
1026
1027         nkeys = tcf_pedit_nkeys(a);
1028         action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1029
1030         if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1031                 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1032         else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1033                 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1034
1035         /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1036         max_actions = min(max_actions, nkeys * 16);
1037
1038         parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1039         if (!parse_attr->mod_hdr_actions)
1040                 return -ENOMEM;
1041
1042         parse_attr->num_mod_hdr_actions = max_actions;
1043         return 0;
1044 }
1045
1046 static const struct pedit_headers zero_masks = {};
1047
1048 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1049                                  const struct tc_action *a, int namespace,
1050                                  struct mlx5e_tc_flow_parse_attr *parse_attr)
1051 {
1052         struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1053         int nkeys, i, err = -EOPNOTSUPP;
1054         u32 mask, val, offset;
1055         u8 cmd, htype;
1056
1057         nkeys = tcf_pedit_nkeys(a);
1058
1059         memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1060         memset(vals,  0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1061
1062         for (i = 0; i < nkeys; i++) {
1063                 htype = tcf_pedit_htype(a, i);
1064                 cmd = tcf_pedit_cmd(a, i);
1065                 err = -EOPNOTSUPP; /* can't be all optimistic */
1066
1067                 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1068                         printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1069                         goto out_err;
1070                 }
1071
1072                 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1073                         printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1074                         goto out_err;
1075                 }
1076
1077                 mask = tcf_pedit_mask(a, i);
1078                 val = tcf_pedit_val(a, i);
1079                 offset = tcf_pedit_offset(a, i);
1080
1081                 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1082                 if (err)
1083                         goto out_err;
1084         }
1085
1086         err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1087         if (err)
1088                 goto out_err;
1089
1090         err = offload_pedit_fields(masks, vals, parse_attr);
1091         if (err < 0)
1092                 goto out_dealloc_parsed_actions;
1093
1094         for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1095                 cmd_masks = &masks[cmd];
1096                 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1097                         printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1098                                cmd);
1099                         print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1100                                        16, 1, cmd_masks, sizeof(zero_masks), true);
1101                         err = -EOPNOTSUPP;
1102                         goto out_dealloc_parsed_actions;
1103                 }
1104         }
1105
1106         return 0;
1107
1108 out_dealloc_parsed_actions:
1109         kfree(parse_attr->mod_hdr_actions);
1110 out_err:
1111         return err;
1112 }
1113
1114 static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1115 {
1116         u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1117                          TCA_CSUM_UPDATE_FLAG_UDP;
1118
1119         /*  The HW recalcs checksums only if re-writing headers */
1120         if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1121                 netdev_warn(priv->netdev,
1122                             "TC csum action is only offloaded with pedit\n");
1123                 return false;
1124         }
1125
1126         if (update_flags & ~prot_flags) {
1127                 netdev_warn(priv->netdev,
1128                             "can't offload TC csum action for some header/s - flags %#x\n",
1129                             update_flags);
1130                 return false;
1131         }
1132
1133         return true;
1134 }
1135
1136 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1137                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
1138                                 struct mlx5e_tc_flow *flow)
1139 {
1140         struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1141         const struct tc_action *a;
1142         LIST_HEAD(actions);
1143         int err;
1144
1145         if (tc_no_actions(exts))
1146                 return -EINVAL;
1147
1148         attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1149         attr->action = 0;
1150
1151         tcf_exts_to_list(exts, &actions);
1152         list_for_each_entry(a, &actions, list) {
1153                 /* Only support a single action per rule */
1154                 if (attr->action)
1155                         return -EINVAL;
1156
1157                 if (is_tcf_gact_shot(a)) {
1158                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1159                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
1160                                                flow_table_properties_nic_receive.flow_counter))
1161                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1162                         continue;
1163                 }
1164
1165                 if (is_tcf_pedit(a)) {
1166                         err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1167                                                     parse_attr);
1168                         if (err)
1169                                 return err;
1170
1171                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1172                                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1173                         continue;
1174                 }
1175
1176                 if (is_tcf_csum(a)) {
1177                         if (csum_offload_supported(priv, attr->action,
1178                                                    tcf_csum_update_flags(a)))
1179                                 continue;
1180
1181                         return -EOPNOTSUPP;
1182                 }
1183
1184                 if (is_tcf_skbedit_mark(a)) {
1185                         u32 mark = tcf_skbedit_mark(a);
1186
1187                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1188                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1189                                             mark);
1190                                 return -EINVAL;
1191                         }
1192
1193                         attr->flow_tag = mark;
1194                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1195                         continue;
1196                 }
1197
1198                 return -EINVAL;
1199         }
1200
1201         return 0;
1202 }
1203
1204 static inline int cmp_encap_info(struct ip_tunnel_key *a,
1205                                  struct ip_tunnel_key *b)
1206 {
1207         return memcmp(a, b, sizeof(*a));
1208 }
1209
1210 static inline int hash_encap_info(struct ip_tunnel_key *key)
1211 {
1212         return jhash(key, sizeof(*key), 0);
1213 }
1214
1215 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1216                                    struct net_device *mirred_dev,
1217                                    struct net_device **out_dev,
1218                                    struct flowi4 *fl4,
1219                                    struct neighbour **out_n,
1220                                    int *out_ttl)
1221 {
1222         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1223         struct rtable *rt;
1224         struct neighbour *n = NULL;
1225
1226 #if IS_ENABLED(CONFIG_INET)
1227         int ret;
1228
1229         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
1230         ret = PTR_ERR_OR_ZERO(rt);
1231         if (ret)
1232                 return ret;
1233 #else
1234         return -EOPNOTSUPP;
1235 #endif
1236         /* if the egress device isn't on the same HW e-switch, we use the uplink */
1237         if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1238                 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1239         else
1240                 *out_dev = rt->dst.dev;
1241
1242         *out_ttl = ip4_dst_hoplimit(&rt->dst);
1243         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1244         ip_rt_put(rt);
1245         if (!n)
1246                 return -ENOMEM;
1247
1248         *out_n = n;
1249         return 0;
1250 }
1251
1252 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1253                                    struct net_device *mirred_dev,
1254                                    struct net_device **out_dev,
1255                                    struct flowi6 *fl6,
1256                                    struct neighbour **out_n,
1257                                    int *out_ttl)
1258 {
1259         struct neighbour *n = NULL;
1260         struct dst_entry *dst;
1261
1262 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1263         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1264         int ret;
1265
1266         dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
1267         ret = dst->error;
1268         if (ret) {
1269                 dst_release(dst);
1270                 return ret;
1271         }
1272
1273         *out_ttl = ip6_dst_hoplimit(dst);
1274
1275         /* if the egress device isn't on the same HW e-switch, we use the uplink */
1276         if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1277                 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1278         else
1279                 *out_dev = dst->dev;
1280 #else
1281         return -EOPNOTSUPP;
1282 #endif
1283
1284         n = dst_neigh_lookup(dst, &fl6->daddr);
1285         dst_release(dst);
1286         if (!n)
1287                 return -ENOMEM;
1288
1289         *out_n = n;
1290         return 0;
1291 }
1292
1293 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1294                                   char buf[], int encap_size,
1295                                   unsigned char h_dest[ETH_ALEN],
1296                                   int ttl,
1297                                   __be32 daddr,
1298                                   __be32 saddr,
1299                                   __be16 udp_dst_port,
1300                                   __be32 vx_vni)
1301 {
1302         struct ethhdr *eth = (struct ethhdr *)buf;
1303         struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1304         struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1305         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1306
1307         memset(buf, 0, encap_size);
1308
1309         ether_addr_copy(eth->h_dest, h_dest);
1310         ether_addr_copy(eth->h_source, out_dev->dev_addr);
1311         eth->h_proto = htons(ETH_P_IP);
1312
1313         ip->daddr = daddr;
1314         ip->saddr = saddr;
1315
1316         ip->ttl = ttl;
1317         ip->protocol = IPPROTO_UDP;
1318         ip->version = 0x4;
1319         ip->ihl = 0x5;
1320
1321         udp->dest = udp_dst_port;
1322         vxh->vx_flags = VXLAN_HF_VNI;
1323         vxh->vx_vni = vxlan_vni_field(vx_vni);
1324 }
1325
1326 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1327                                   char buf[], int encap_size,
1328                                   unsigned char h_dest[ETH_ALEN],
1329                                   int ttl,
1330                                   struct in6_addr *daddr,
1331                                   struct in6_addr *saddr,
1332                                   __be16 udp_dst_port,
1333                                   __be32 vx_vni)
1334 {
1335         struct ethhdr *eth = (struct ethhdr *)buf;
1336         struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1337         struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1338         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1339
1340         memset(buf, 0, encap_size);
1341
1342         ether_addr_copy(eth->h_dest, h_dest);
1343         ether_addr_copy(eth->h_source, out_dev->dev_addr);
1344         eth->h_proto = htons(ETH_P_IPV6);
1345
1346         ip6_flow_hdr(ip6h, 0, 0);
1347         /* the HW fills up ipv6 payload len */
1348         ip6h->nexthdr     = IPPROTO_UDP;
1349         ip6h->hop_limit   = ttl;
1350         ip6h->daddr       = *daddr;
1351         ip6h->saddr       = *saddr;
1352
1353         udp->dest = udp_dst_port;
1354         vxh->vx_flags = VXLAN_HF_VNI;
1355         vxh->vx_vni = vxlan_vni_field(vx_vni);
1356 }
1357
1358 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1359                                           struct net_device *mirred_dev,
1360                                           struct mlx5e_encap_entry *e)
1361 {
1362         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1363         int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
1364         struct ip_tunnel_key *tun_key = &e->tun_info.key;
1365         struct net_device *out_dev;
1366         struct neighbour *n = NULL;
1367         struct flowi4 fl4 = {};
1368         char *encap_header;
1369         int ttl, err;
1370         u8 nud_state;
1371
1372         if (max_encap_size < ipv4_encap_size) {
1373                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1374                                ipv4_encap_size, max_encap_size);
1375                 return -EOPNOTSUPP;
1376         }
1377
1378         encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
1379         if (!encap_header)
1380                 return -ENOMEM;
1381
1382         switch (e->tunnel_type) {
1383         case MLX5_HEADER_TYPE_VXLAN:
1384                 fl4.flowi4_proto = IPPROTO_UDP;
1385                 fl4.fl4_dport = tun_key->tp_dst;
1386                 break;
1387         default:
1388                 err = -EOPNOTSUPP;
1389                 goto out;
1390         }
1391         fl4.flowi4_tos = tun_key->tos;
1392         fl4.daddr = tun_key->u.ipv4.dst;
1393         fl4.saddr = tun_key->u.ipv4.src;
1394
1395         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
1396                                       &fl4, &n, &ttl);
1397         if (err)
1398                 goto out;
1399
1400         /* used by mlx5e_detach_encap to lookup a neigh hash table
1401          * entry in the neigh hash table when a user deletes a rule
1402          */
1403         e->m_neigh.dev = n->dev;
1404         e->m_neigh.family = n->ops->family;
1405         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1406         e->out_dev = out_dev;
1407
1408         /* It's importent to add the neigh to the hash table before checking
1409          * the neigh validity state. So if we'll get a notification, in case the
1410          * neigh changes it's validity state, we would find the relevant neigh
1411          * in the hash.
1412          */
1413         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1414         if (err)
1415                 goto out;
1416
1417         read_lock_bh(&n->lock);
1418         nud_state = n->nud_state;
1419         ether_addr_copy(e->h_dest, n->ha);
1420         read_unlock_bh(&n->lock);
1421
1422         switch (e->tunnel_type) {
1423         case MLX5_HEADER_TYPE_VXLAN:
1424                 gen_vxlan_header_ipv4(out_dev, encap_header,
1425                                       ipv4_encap_size, e->h_dest, ttl,
1426                                       fl4.daddr,
1427                                       fl4.saddr, tun_key->tp_dst,
1428                                       tunnel_id_to_key32(tun_key->tun_id));
1429                 break;
1430         default:
1431                 err = -EOPNOTSUPP;
1432                 goto destroy_neigh_entry;
1433         }
1434         e->encap_size = ipv4_encap_size;
1435         e->encap_header = encap_header;
1436
1437         if (!(nud_state & NUD_VALID)) {
1438                 neigh_event_send(n, NULL);
1439                 neigh_release(n);
1440                 return -EAGAIN;
1441         }
1442
1443         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1444                                ipv4_encap_size, encap_header, &e->encap_id);
1445         if (err)
1446                 goto destroy_neigh_entry;
1447
1448         e->flags |= MLX5_ENCAP_ENTRY_VALID;
1449         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1450         neigh_release(n);
1451         return err;
1452
1453 destroy_neigh_entry:
1454         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1455 out:
1456         kfree(encap_header);
1457         if (n)
1458                 neigh_release(n);
1459         return err;
1460 }
1461
1462 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1463                                           struct net_device *mirred_dev,
1464                                           struct mlx5e_encap_entry *e)
1465 {
1466         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1467         int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
1468         struct ip_tunnel_key *tun_key = &e->tun_info.key;
1469         struct net_device *out_dev;
1470         struct neighbour *n = NULL;
1471         struct flowi6 fl6 = {};
1472         char *encap_header;
1473         int err, ttl = 0;
1474         u8 nud_state;
1475
1476         if (max_encap_size < ipv6_encap_size) {
1477                 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1478                                ipv6_encap_size, max_encap_size);
1479                 return -EOPNOTSUPP;
1480         }
1481
1482         encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
1483         if (!encap_header)
1484                 return -ENOMEM;
1485
1486         switch (e->tunnel_type) {
1487         case MLX5_HEADER_TYPE_VXLAN:
1488                 fl6.flowi6_proto = IPPROTO_UDP;
1489                 fl6.fl6_dport = tun_key->tp_dst;
1490                 break;
1491         default:
1492                 err = -EOPNOTSUPP;
1493                 goto out;
1494         }
1495
1496         fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1497         fl6.daddr = tun_key->u.ipv6.dst;
1498         fl6.saddr = tun_key->u.ipv6.src;
1499
1500         err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
1501                                       &fl6, &n, &ttl);
1502         if (err)
1503                 goto out;
1504
1505         /* used by mlx5e_detach_encap to lookup a neigh hash table
1506          * entry in the neigh hash table when a user deletes a rule
1507          */
1508         e->m_neigh.dev = n->dev;
1509         e->m_neigh.family = n->ops->family;
1510         memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1511         e->out_dev = out_dev;
1512
1513         /* It's importent to add the neigh to the hash table before checking
1514          * the neigh validity state. So if we'll get a notification, in case the
1515          * neigh changes it's validity state, we would find the relevant neigh
1516          * in the hash.
1517          */
1518         err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1519         if (err)
1520                 goto out;
1521
1522         read_lock_bh(&n->lock);
1523         nud_state = n->nud_state;
1524         ether_addr_copy(e->h_dest, n->ha);
1525         read_unlock_bh(&n->lock);
1526
1527         switch (e->tunnel_type) {
1528         case MLX5_HEADER_TYPE_VXLAN:
1529                 gen_vxlan_header_ipv6(out_dev, encap_header,
1530                                       ipv6_encap_size, e->h_dest, ttl,
1531                                       &fl6.daddr,
1532                                       &fl6.saddr, tun_key->tp_dst,
1533                                       tunnel_id_to_key32(tun_key->tun_id));
1534                 break;
1535         default:
1536                 err = -EOPNOTSUPP;
1537                 goto destroy_neigh_entry;
1538         }
1539
1540         e->encap_size = ipv6_encap_size;
1541         e->encap_header = encap_header;
1542
1543         if (!(nud_state & NUD_VALID)) {
1544                 neigh_event_send(n, NULL);
1545                 neigh_release(n);
1546                 return -EAGAIN;
1547         }
1548
1549         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1550                                ipv6_encap_size, encap_header, &e->encap_id);
1551         if (err)
1552                 goto destroy_neigh_entry;
1553
1554         e->flags |= MLX5_ENCAP_ENTRY_VALID;
1555         mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1556         neigh_release(n);
1557         return err;
1558
1559 destroy_neigh_entry:
1560         mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1561 out:
1562         kfree(encap_header);
1563         if (n)
1564                 neigh_release(n);
1565         return err;
1566 }
1567
1568 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1569                               struct ip_tunnel_info *tun_info,
1570                               struct net_device *mirred_dev,
1571                               struct net_device **encap_dev,
1572                               struct mlx5e_tc_flow *flow)
1573 {
1574         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1575         struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1576         unsigned short family = ip_tunnel_info_af(tun_info);
1577         struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1578         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1579         struct ip_tunnel_key *key = &tun_info->key;
1580         struct mlx5e_encap_entry *e;
1581         int tunnel_type, err = 0;
1582         uintptr_t hash_key;
1583         bool found = false;
1584
1585         /* udp dst port must be set */
1586         if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
1587                 goto vxlan_encap_offload_err;
1588
1589         /* setting udp src port isn't supported */
1590         if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1591 vxlan_encap_offload_err:
1592                 netdev_warn(priv->netdev,
1593                             "must set udp dst port and not set udp src port\n");
1594                 return -EOPNOTSUPP;
1595         }
1596
1597         if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
1598             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
1599                 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1600         } else {
1601                 netdev_warn(priv->netdev,
1602                             "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
1603                 return -EOPNOTSUPP;
1604         }
1605
1606         hash_key = hash_encap_info(key);
1607
1608         hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1609                                    encap_hlist, hash_key) {
1610                 if (!cmp_encap_info(&e->tun_info.key, key)) {
1611                         found = true;
1612                         break;
1613                 }
1614         }
1615
1616         if (found)
1617                 goto attach_flow;
1618
1619         e = kzalloc(sizeof(*e), GFP_KERNEL);
1620         if (!e)
1621                 return -ENOMEM;
1622
1623         e->tun_info = *tun_info;
1624         e->tunnel_type = tunnel_type;
1625         INIT_LIST_HEAD(&e->flows);
1626
1627         if (family == AF_INET)
1628                 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
1629         else if (family == AF_INET6)
1630                 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
1631
1632         if (err && err != -EAGAIN)
1633                 goto out_err;
1634
1635         hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1636
1637 attach_flow:
1638         list_add(&flow->encap, &e->flows);
1639         *encap_dev = e->out_dev;
1640         if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1641                 attr->encap_id = e->encap_id;
1642
1643         return err;
1644
1645 out_err:
1646         kfree(e);
1647         return err;
1648 }
1649
1650 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1651                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
1652                                 struct mlx5e_tc_flow *flow)
1653 {
1654         struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1655         struct mlx5e_rep_priv *rpriv = priv->ppriv;
1656         struct ip_tunnel_info *info = NULL;
1657         const struct tc_action *a;
1658         LIST_HEAD(actions);
1659         bool encap = false;
1660         int err = 0;
1661
1662         if (tc_no_actions(exts))
1663                 return -EINVAL;
1664
1665         memset(attr, 0, sizeof(*attr));
1666         attr->in_rep = rpriv->rep;
1667
1668         tcf_exts_to_list(exts, &actions);
1669         list_for_each_entry(a, &actions, list) {
1670                 if (is_tcf_gact_shot(a)) {
1671                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1672                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
1673                         continue;
1674                 }
1675
1676                 if (is_tcf_pedit(a)) {
1677                         err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1678                                                     parse_attr);
1679                         if (err)
1680                                 return err;
1681
1682                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1683                         continue;
1684                 }
1685
1686                 if (is_tcf_csum(a)) {
1687                         if (csum_offload_supported(priv, attr->action,
1688                                                    tcf_csum_update_flags(a)))
1689                                 continue;
1690
1691                         return -EOPNOTSUPP;
1692                 }
1693
1694                 if (is_tcf_mirred_egress_redirect(a)) {
1695                         int ifindex = tcf_mirred_ifindex(a);
1696                         struct net_device *out_dev, *encap_dev = NULL;
1697                         struct mlx5e_priv *out_priv;
1698
1699                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1700
1701                         if (switchdev_port_same_parent_id(priv->netdev,
1702                                                           out_dev)) {
1703                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1704                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
1705                                 out_priv = netdev_priv(out_dev);
1706                                 rpriv = out_priv->ppriv;
1707                                 attr->out_rep = rpriv->rep;
1708                         } else if (encap) {
1709                                 err = mlx5e_attach_encap(priv, info,
1710                                                          out_dev, &encap_dev, flow);
1711                                 if (err && err != -EAGAIN)
1712                                         return err;
1713                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1714                                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1715                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
1716                                 out_priv = netdev_priv(encap_dev);
1717                                 rpriv = out_priv->ppriv;
1718                                 attr->out_rep = rpriv->rep;
1719                                 attr->parse_attr = parse_attr;
1720                         } else {
1721                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1722                                        priv->netdev->name, out_dev->name);
1723                                 return -EINVAL;
1724                         }
1725                         continue;
1726                 }
1727
1728                 if (is_tcf_tunnel_set(a)) {
1729                         info = tcf_tunnel_info(a);
1730                         if (info)
1731                                 encap = true;
1732                         else
1733                                 return -EOPNOTSUPP;
1734                         continue;
1735                 }
1736
1737                 if (is_tcf_vlan(a)) {
1738                         if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
1739                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
1740                         } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
1741                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1742                                         return -EOPNOTSUPP;
1743
1744                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1745                                 attr->vlan = tcf_vlan_push_vid(a);
1746                         } else { /* action is TCA_VLAN_ACT_MODIFY */
1747                                 return -EOPNOTSUPP;
1748                         }
1749                         continue;
1750                 }
1751
1752                 if (is_tcf_tunnel_release(a)) {
1753                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1754                         continue;
1755                 }
1756
1757                 return -EINVAL;
1758         }
1759         return err;
1760 }
1761
1762 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1763                            struct tc_cls_flower_offload *f)
1764 {
1765         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1766         struct mlx5e_tc_flow_parse_attr *parse_attr;
1767         struct mlx5e_tc_table *tc = &priv->fs.tc;
1768         struct mlx5e_tc_flow *flow;
1769         int attr_size, err = 0;
1770         u8 flow_flags = 0;
1771
1772         if (esw && esw->mode == SRIOV_OFFLOADS) {
1773                 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1774                 attr_size  = sizeof(struct mlx5_esw_flow_attr);
1775         } else {
1776                 flow_flags = MLX5E_TC_FLOW_NIC;
1777                 attr_size  = sizeof(struct mlx5_nic_flow_attr);
1778         }
1779
1780         flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1781         parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
1782         if (!parse_attr || !flow) {
1783                 err = -ENOMEM;
1784                 goto err_free;
1785         }
1786
1787         flow->cookie = f->cookie;
1788         flow->flags = flow_flags;
1789
1790         err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
1791         if (err < 0)
1792                 goto err_free;
1793
1794         if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1795                 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
1796                 if (err < 0)
1797                         goto err_handle_encap_flow;
1798                 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
1799         } else {
1800                 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
1801                 if (err < 0)
1802                         goto err_free;
1803                 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
1804         }
1805
1806         if (IS_ERR(flow->rule)) {
1807                 err = PTR_ERR(flow->rule);
1808                 goto err_free;
1809         }
1810
1811         flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
1812         err = rhashtable_insert_fast(&tc->ht, &flow->node,
1813                                      tc->ht_params);
1814         if (err)
1815                 goto err_del_rule;
1816
1817         if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
1818             !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
1819                 kvfree(parse_attr);
1820         return err;
1821
1822 err_del_rule:
1823         mlx5e_tc_del_flow(priv, flow);
1824
1825 err_handle_encap_flow:
1826         if (err == -EAGAIN) {
1827                 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1828                                              tc->ht_params);
1829                 if (err)
1830                         mlx5e_tc_del_flow(priv, flow);
1831                 else
1832                         return 0;
1833         }
1834
1835 err_free:
1836         kvfree(parse_attr);
1837         kfree(flow);
1838         return err;
1839 }
1840
1841 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1842                         struct tc_cls_flower_offload *f)
1843 {
1844         struct mlx5e_tc_flow *flow;
1845         struct mlx5e_tc_table *tc = &priv->fs.tc;
1846
1847         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1848                                       tc->ht_params);
1849         if (!flow)
1850                 return -EINVAL;
1851
1852         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1853
1854         mlx5e_tc_del_flow(priv, flow);
1855
1856         kfree(flow);
1857
1858         return 0;
1859 }
1860
1861 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1862                        struct tc_cls_flower_offload *f)
1863 {
1864         struct mlx5e_tc_table *tc = &priv->fs.tc;
1865         struct mlx5e_tc_flow *flow;
1866         struct tc_action *a;
1867         struct mlx5_fc *counter;
1868         LIST_HEAD(actions);
1869         u64 bytes;
1870         u64 packets;
1871         u64 lastuse;
1872
1873         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1874                                       tc->ht_params);
1875         if (!flow)
1876                 return -EINVAL;
1877
1878         if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
1879                 return 0;
1880
1881         counter = mlx5_flow_rule_counter(flow->rule);
1882         if (!counter)
1883                 return 0;
1884
1885         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1886
1887         preempt_disable();
1888
1889         tcf_exts_to_list(f->exts, &actions);
1890         list_for_each_entry(a, &actions, list)
1891                 tcf_action_stats_update(a, bytes, packets, lastuse);
1892
1893         preempt_enable();
1894
1895         return 0;
1896 }
1897
1898 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1899         .head_offset = offsetof(struct mlx5e_tc_flow, node),
1900         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1901         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1902         .automatic_shrinking = true,
1903 };
1904
1905 int mlx5e_tc_init(struct mlx5e_priv *priv)
1906 {
1907         struct mlx5e_tc_table *tc = &priv->fs.tc;
1908
1909         tc->ht_params = mlx5e_tc_flow_ht_params;
1910         return rhashtable_init(&tc->ht, &tc->ht_params);
1911 }
1912
1913 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1914 {
1915         struct mlx5e_tc_flow *flow = ptr;
1916         struct mlx5e_priv *priv = arg;
1917
1918         mlx5e_tc_del_flow(priv, flow);
1919         kfree(flow);
1920 }
1921
1922 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1923 {
1924         struct mlx5e_tc_table *tc = &priv->fs.tc;
1925
1926         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1927
1928         if (!IS_ERR_OR_NULL(tc->t)) {
1929                 mlx5_destroy_flow_table(tc->t);
1930                 tc->t = NULL;
1931         }
1932 }