Merge tag 'kvm-s390-master-4.10-1' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/vxlan.h>
46 #include "en.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "vxlan.h"
50
51 struct mlx5e_tc_flow {
52         struct rhash_head       node;
53         u64                     cookie;
54         struct mlx5_flow_handle *rule;
55         struct list_head        encap; /* flows sharing the same encap */
56         struct mlx5_esw_flow_attr *attr;
57 };
58
59 enum {
60         MLX5_HEADER_TYPE_VXLAN = 0x0,
61         MLX5_HEADER_TYPE_NVGRE = 0x1,
62 };
63
64 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
65 #define MLX5E_TC_TABLE_NUM_GROUPS 4
66
67 static struct mlx5_flow_handle *
68 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
69                       struct mlx5_flow_spec *spec,
70                       u32 action, u32 flow_tag)
71 {
72         struct mlx5_core_dev *dev = priv->mdev;
73         struct mlx5_flow_destination dest = { 0 };
74         struct mlx5_flow_act flow_act = {
75                 .action = action,
76                 .flow_tag = flow_tag,
77                 .encap_id = 0,
78         };
79         struct mlx5_fc *counter = NULL;
80         struct mlx5_flow_handle *rule;
81         bool table_created = false;
82
83         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
84                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
85                 dest.ft = priv->fs.vlan.ft.t;
86         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
87                 counter = mlx5_fc_create(dev, true);
88                 if (IS_ERR(counter))
89                         return ERR_CAST(counter);
90
91                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
92                 dest.counter = counter;
93         }
94
95         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
96                 priv->fs.tc.t =
97                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
98                                                             MLX5E_TC_PRIO,
99                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
100                                                             MLX5E_TC_TABLE_NUM_GROUPS,
101                                                             0, 0);
102                 if (IS_ERR(priv->fs.tc.t)) {
103                         netdev_err(priv->netdev,
104                                    "Failed to create tc offload table\n");
105                         rule = ERR_CAST(priv->fs.tc.t);
106                         goto err_create_ft;
107                 }
108
109                 table_created = true;
110         }
111
112         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
113         rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
114
115         if (IS_ERR(rule))
116                 goto err_add_rule;
117
118         return rule;
119
120 err_add_rule:
121         if (table_created) {
122                 mlx5_destroy_flow_table(priv->fs.tc.t);
123                 priv->fs.tc.t = NULL;
124         }
125 err_create_ft:
126         mlx5_fc_destroy(dev, counter);
127
128         return rule;
129 }
130
131 static struct mlx5_flow_handle *
132 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
133                       struct mlx5_flow_spec *spec,
134                       struct mlx5_esw_flow_attr *attr)
135 {
136         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
137         int err;
138
139         err = mlx5_eswitch_add_vlan_action(esw, attr);
140         if (err)
141                 return ERR_PTR(err);
142
143         return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
144 }
145
146 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
147                                struct mlx5e_tc_flow *flow) {
148         struct list_head *next = flow->encap.next;
149
150         list_del(&flow->encap);
151         if (list_empty(next)) {
152                 struct mlx5_encap_entry *e;
153
154                 e = list_entry(next, struct mlx5_encap_entry, flows);
155                 if (e->n) {
156                         mlx5_encap_dealloc(priv->mdev, e->encap_id);
157                         neigh_release(e->n);
158                 }
159                 hlist_del_rcu(&e->encap_hlist);
160                 kfree(e);
161         }
162 }
163
164 /* we get here also when setting rule to the FW failed, etc. It means that the
165  * flow rule itself might not exist, but some offloading related to the actions
166  * should be cleaned.
167  */
168 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
169                               struct mlx5e_tc_flow *flow)
170 {
171         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
172         struct mlx5_fc *counter = NULL;
173
174         if (!IS_ERR(flow->rule)) {
175                 counter = mlx5_flow_rule_counter(flow->rule);
176                 mlx5_del_flow_rules(flow->rule);
177                 mlx5_fc_destroy(priv->mdev, counter);
178         }
179
180         if (esw && esw->mode == SRIOV_OFFLOADS) {
181                 mlx5_eswitch_del_vlan_action(esw, flow->attr);
182                 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
183                         mlx5e_detach_encap(priv, flow);
184         }
185
186         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
187                 mlx5_destroy_flow_table(priv->fs.tc.t);
188                 priv->fs.tc.t = NULL;
189         }
190 }
191
192 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
193                              struct tc_cls_flower_offload *f)
194 {
195         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196                                        outer_headers);
197         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198                                        outer_headers);
199         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
200                                     misc_parameters);
201         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
202                                     misc_parameters);
203
204         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
205         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
206
207         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
208                 struct flow_dissector_key_keyid *key =
209                         skb_flow_dissector_target(f->dissector,
210                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
211                                                   f->key);
212                 struct flow_dissector_key_keyid *mask =
213                         skb_flow_dissector_target(f->dissector,
214                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
215                                                   f->mask);
216                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
217                          be32_to_cpu(mask->keyid));
218                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
219                          be32_to_cpu(key->keyid));
220         }
221 }
222
223 static int parse_tunnel_attr(struct mlx5e_priv *priv,
224                              struct mlx5_flow_spec *spec,
225                              struct tc_cls_flower_offload *f)
226 {
227         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
228                                        outer_headers);
229         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
230                                        outer_headers);
231
232         struct flow_dissector_key_control *enc_control =
233                 skb_flow_dissector_target(f->dissector,
234                                           FLOW_DISSECTOR_KEY_ENC_CONTROL,
235                                           f->key);
236
237         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
238                 struct flow_dissector_key_ports *key =
239                         skb_flow_dissector_target(f->dissector,
240                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
241                                                   f->key);
242                 struct flow_dissector_key_ports *mask =
243                         skb_flow_dissector_target(f->dissector,
244                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
245                                                   f->mask);
246
247                 /* Full udp dst port must be given */
248                 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
249                         goto vxlan_match_offload_err;
250
251                 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
252                     MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
253                         parse_vxlan_attr(spec, f);
254                 else {
255                         netdev_warn(priv->netdev,
256                                     "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
257                         return -EOPNOTSUPP;
258                 }
259
260                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
261                          udp_dport, ntohs(mask->dst));
262                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
263                          udp_dport, ntohs(key->dst));
264
265                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
266                          udp_sport, ntohs(mask->src));
267                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
268                          udp_sport, ntohs(key->src));
269         } else { /* udp dst port must be given */
270 vxlan_match_offload_err:
271                 netdev_warn(priv->netdev,
272                             "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
273                 return -EOPNOTSUPP;
274         }
275
276         if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
277                 struct flow_dissector_key_ipv4_addrs *key =
278                         skb_flow_dissector_target(f->dissector,
279                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
280                                                   f->key);
281                 struct flow_dissector_key_ipv4_addrs *mask =
282                         skb_flow_dissector_target(f->dissector,
283                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
284                                                   f->mask);
285                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
286                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
287                          ntohl(mask->src));
288                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
289                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
290                          ntohl(key->src));
291
292                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
293                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
294                          ntohl(mask->dst));
295                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
296                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
297                          ntohl(key->dst));
298
299                 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
300                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
301         }
302
303         /* Enforce DMAC when offloading incoming tunneled flows.
304          * Flow counters require a match on the DMAC.
305          */
306         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
307         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
308         ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
309                                      dmac_47_16), priv->netdev->dev_addr);
310
311         /* let software handle IP fragments */
312         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
313         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
314
315         return 0;
316 }
317
318 static int __parse_cls_flower(struct mlx5e_priv *priv,
319                               struct mlx5_flow_spec *spec,
320                               struct tc_cls_flower_offload *f,
321                               u8 *min_inline)
322 {
323         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
324                                        outer_headers);
325         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
326                                        outer_headers);
327         u16 addr_type = 0;
328         u8 ip_proto = 0;
329
330         *min_inline = MLX5_INLINE_MODE_L2;
331
332         if (f->dissector->used_keys &
333             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
334               BIT(FLOW_DISSECTOR_KEY_BASIC) |
335               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
336               BIT(FLOW_DISSECTOR_KEY_VLAN) |
337               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
338               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
339               BIT(FLOW_DISSECTOR_KEY_PORTS) |
340               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
341               BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
342               BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
343               BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
344               BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
345                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
346                             f->dissector->used_keys);
347                 return -EOPNOTSUPP;
348         }
349
350         if ((dissector_uses_key(f->dissector,
351                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
352              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
353              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
354             dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
355                 struct flow_dissector_key_control *key =
356                         skb_flow_dissector_target(f->dissector,
357                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
358                                                   f->key);
359                 switch (key->addr_type) {
360                 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
361                         if (parse_tunnel_attr(priv, spec, f))
362                                 return -EOPNOTSUPP;
363                         break;
364                 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
365                         netdev_warn(priv->netdev,
366                                     "IPv6 tunnel decap offload isn't supported\n");
367                 default:
368                         return -EOPNOTSUPP;
369                 }
370
371                 /* In decap flow, header pointers should point to the inner
372                  * headers, outer header were already set by parse_tunnel_attr
373                  */
374                 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
375                                          inner_headers);
376                 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
377                                          inner_headers);
378         }
379
380         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
381                 struct flow_dissector_key_control *key =
382                         skb_flow_dissector_target(f->dissector,
383                                                   FLOW_DISSECTOR_KEY_CONTROL,
384                                                   f->key);
385
386                 struct flow_dissector_key_control *mask =
387                         skb_flow_dissector_target(f->dissector,
388                                                   FLOW_DISSECTOR_KEY_CONTROL,
389                                                   f->mask);
390                 addr_type = key->addr_type;
391
392                 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
393                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
394                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
395                                  key->flags & FLOW_DIS_IS_FRAGMENT);
396
397                         /* the HW doesn't need L3 inline to match on frag=no */
398                         if (key->flags & FLOW_DIS_IS_FRAGMENT)
399                                 *min_inline = MLX5_INLINE_MODE_IP;
400                 }
401         }
402
403         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
404                 struct flow_dissector_key_basic *key =
405                         skb_flow_dissector_target(f->dissector,
406                                                   FLOW_DISSECTOR_KEY_BASIC,
407                                                   f->key);
408                 struct flow_dissector_key_basic *mask =
409                         skb_flow_dissector_target(f->dissector,
410                                                   FLOW_DISSECTOR_KEY_BASIC,
411                                                   f->mask);
412                 ip_proto = key->ip_proto;
413
414                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
415                          ntohs(mask->n_proto));
416                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
417                          ntohs(key->n_proto));
418
419                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
420                          mask->ip_proto);
421                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
422                          key->ip_proto);
423
424                 if (mask->ip_proto)
425                         *min_inline = MLX5_INLINE_MODE_IP;
426         }
427
428         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
429                 struct flow_dissector_key_eth_addrs *key =
430                         skb_flow_dissector_target(f->dissector,
431                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
432                                                   f->key);
433                 struct flow_dissector_key_eth_addrs *mask =
434                         skb_flow_dissector_target(f->dissector,
435                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
436                                                   f->mask);
437
438                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
439                                              dmac_47_16),
440                                 mask->dst);
441                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
442                                              dmac_47_16),
443                                 key->dst);
444
445                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
446                                              smac_47_16),
447                                 mask->src);
448                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
449                                              smac_47_16),
450                                 key->src);
451         }
452
453         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
454                 struct flow_dissector_key_vlan *key =
455                         skb_flow_dissector_target(f->dissector,
456                                                   FLOW_DISSECTOR_KEY_VLAN,
457                                                   f->key);
458                 struct flow_dissector_key_vlan *mask =
459                         skb_flow_dissector_target(f->dissector,
460                                                   FLOW_DISSECTOR_KEY_VLAN,
461                                                   f->mask);
462                 if (mask->vlan_id || mask->vlan_priority) {
463                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
464                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
465
466                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
467                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
468
469                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
470                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
471                 }
472         }
473
474         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
475                 struct flow_dissector_key_ipv4_addrs *key =
476                         skb_flow_dissector_target(f->dissector,
477                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
478                                                   f->key);
479                 struct flow_dissector_key_ipv4_addrs *mask =
480                         skb_flow_dissector_target(f->dissector,
481                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
482                                                   f->mask);
483
484                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
485                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
486                        &mask->src, sizeof(mask->src));
487                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
488                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
489                        &key->src, sizeof(key->src));
490                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
491                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
492                        &mask->dst, sizeof(mask->dst));
493                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
494                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
495                        &key->dst, sizeof(key->dst));
496
497                 if (mask->src || mask->dst)
498                         *min_inline = MLX5_INLINE_MODE_IP;
499         }
500
501         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
502                 struct flow_dissector_key_ipv6_addrs *key =
503                         skb_flow_dissector_target(f->dissector,
504                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
505                                                   f->key);
506                 struct flow_dissector_key_ipv6_addrs *mask =
507                         skb_flow_dissector_target(f->dissector,
508                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
509                                                   f->mask);
510
511                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
512                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
513                        &mask->src, sizeof(mask->src));
514                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
515                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
516                        &key->src, sizeof(key->src));
517
518                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
519                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
520                        &mask->dst, sizeof(mask->dst));
521                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
522                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
523                        &key->dst, sizeof(key->dst));
524
525                 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
526                     ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
527                         *min_inline = MLX5_INLINE_MODE_IP;
528         }
529
530         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
531                 struct flow_dissector_key_ports *key =
532                         skb_flow_dissector_target(f->dissector,
533                                                   FLOW_DISSECTOR_KEY_PORTS,
534                                                   f->key);
535                 struct flow_dissector_key_ports *mask =
536                         skb_flow_dissector_target(f->dissector,
537                                                   FLOW_DISSECTOR_KEY_PORTS,
538                                                   f->mask);
539                 switch (ip_proto) {
540                 case IPPROTO_TCP:
541                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
542                                  tcp_sport, ntohs(mask->src));
543                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
544                                  tcp_sport, ntohs(key->src));
545
546                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
547                                  tcp_dport, ntohs(mask->dst));
548                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
549                                  tcp_dport, ntohs(key->dst));
550                         break;
551
552                 case IPPROTO_UDP:
553                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
554                                  udp_sport, ntohs(mask->src));
555                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
556                                  udp_sport, ntohs(key->src));
557
558                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
559                                  udp_dport, ntohs(mask->dst));
560                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
561                                  udp_dport, ntohs(key->dst));
562                         break;
563                 default:
564                         netdev_err(priv->netdev,
565                                    "Only UDP and TCP transport are supported\n");
566                         return -EINVAL;
567                 }
568
569                 if (mask->src || mask->dst)
570                         *min_inline = MLX5_INLINE_MODE_TCP_UDP;
571         }
572
573         return 0;
574 }
575
576 static int parse_cls_flower(struct mlx5e_priv *priv,
577                             struct mlx5_flow_spec *spec,
578                             struct tc_cls_flower_offload *f)
579 {
580         struct mlx5_core_dev *dev = priv->mdev;
581         struct mlx5_eswitch *esw = dev->priv.eswitch;
582         struct mlx5_eswitch_rep *rep = priv->ppriv;
583         u8 min_inline;
584         int err;
585
586         err = __parse_cls_flower(priv, spec, f, &min_inline);
587
588         if (!err && esw->mode == SRIOV_OFFLOADS &&
589             rep->vport != FDB_UPLINK_VPORT) {
590                 if (min_inline > esw->offloads.inline_mode) {
591                         netdev_warn(priv->netdev,
592                                     "Flow is not offloaded due to min inline setting, required %d actual %d\n",
593                                     min_inline, esw->offloads.inline_mode);
594                         return -EOPNOTSUPP;
595                 }
596         }
597
598         return err;
599 }
600
601 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
602                                 u32 *action, u32 *flow_tag)
603 {
604         const struct tc_action *a;
605         LIST_HEAD(actions);
606
607         if (tc_no_actions(exts))
608                 return -EINVAL;
609
610         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
611         *action = 0;
612
613         tcf_exts_to_list(exts, &actions);
614         list_for_each_entry(a, &actions, list) {
615                 /* Only support a single action per rule */
616                 if (*action)
617                         return -EINVAL;
618
619                 if (is_tcf_gact_shot(a)) {
620                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
621                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
622                                                flow_table_properties_nic_receive.flow_counter))
623                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
624                         continue;
625                 }
626
627                 if (is_tcf_skbedit_mark(a)) {
628                         u32 mark = tcf_skbedit_mark(a);
629
630                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
631                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
632                                             mark);
633                                 return -EINVAL;
634                         }
635
636                         *flow_tag = mark;
637                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
638                         continue;
639                 }
640
641                 return -EINVAL;
642         }
643
644         return 0;
645 }
646
647 static inline int cmp_encap_info(struct mlx5_encap_info *a,
648                                  struct mlx5_encap_info *b)
649 {
650         return memcmp(a, b, sizeof(*a));
651 }
652
653 static inline int hash_encap_info(struct mlx5_encap_info *info)
654 {
655         return jhash(info, sizeof(*info), 0);
656 }
657
658 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
659                                    struct net_device *mirred_dev,
660                                    struct net_device **out_dev,
661                                    struct flowi4 *fl4,
662                                    struct neighbour **out_n,
663                                    __be32 *saddr,
664                                    int *out_ttl)
665 {
666         struct rtable *rt;
667         struct neighbour *n = NULL;
668         int ttl;
669
670 #if IS_ENABLED(CONFIG_INET)
671         rt = ip_route_output_key(dev_net(mirred_dev), fl4);
672         if (IS_ERR(rt))
673                 return PTR_ERR(rt);
674 #else
675         return -EOPNOTSUPP;
676 #endif
677
678         if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
679                 pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
680                 ip_rt_put(rt);
681                 return -EOPNOTSUPP;
682         }
683
684         ttl = ip4_dst_hoplimit(&rt->dst);
685         n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
686         ip_rt_put(rt);
687         if (!n)
688                 return -ENOMEM;
689
690         *out_n = n;
691         *saddr = fl4->saddr;
692         *out_ttl = ttl;
693         *out_dev = rt->dst.dev;
694
695         return 0;
696 }
697
698 static int gen_vxlan_header_ipv4(struct net_device *out_dev,
699                                  char buf[],
700                                  unsigned char h_dest[ETH_ALEN],
701                                  int ttl,
702                                  __be32 daddr,
703                                  __be32 saddr,
704                                  __be16 udp_dst_port,
705                                  __be32 vx_vni)
706 {
707         int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
708         struct ethhdr *eth = (struct ethhdr *)buf;
709         struct iphdr  *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
710         struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
711         struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
712
713         memset(buf, 0, encap_size);
714
715         ether_addr_copy(eth->h_dest, h_dest);
716         ether_addr_copy(eth->h_source, out_dev->dev_addr);
717         eth->h_proto = htons(ETH_P_IP);
718
719         ip->daddr = daddr;
720         ip->saddr = saddr;
721
722         ip->ttl = ttl;
723         ip->protocol = IPPROTO_UDP;
724         ip->version = 0x4;
725         ip->ihl = 0x5;
726
727         udp->dest = udp_dst_port;
728         vxh->vx_flags = VXLAN_HF_VNI;
729         vxh->vx_vni = vxlan_vni_field(vx_vni);
730
731         return encap_size;
732 }
733
734 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
735                                           struct net_device *mirred_dev,
736                                           struct mlx5_encap_entry *e,
737                                           struct net_device **out_dev)
738 {
739         int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
740         struct neighbour *n = NULL;
741         struct flowi4 fl4 = {};
742         char *encap_header;
743         int encap_size;
744         __be32 saddr = 0;
745         int ttl = 0;
746         int err;
747
748         encap_header = kzalloc(max_encap_size, GFP_KERNEL);
749         if (!encap_header)
750                 return -ENOMEM;
751
752         switch (e->tunnel_type) {
753         case MLX5_HEADER_TYPE_VXLAN:
754                 fl4.flowi4_proto = IPPROTO_UDP;
755                 fl4.fl4_dport = e->tun_info.tp_dst;
756                 break;
757         default:
758                 err = -EOPNOTSUPP;
759                 goto out;
760         }
761         fl4.daddr = e->tun_info.daddr;
762
763         err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
764                                       &fl4, &n, &saddr, &ttl);
765         if (err)
766                 goto out;
767
768         e->n = n;
769         e->out_dev = *out_dev;
770
771         if (!(n->nud_state & NUD_VALID)) {
772                 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
773                 err = -EOPNOTSUPP;
774                 goto out;
775         }
776
777         neigh_ha_snapshot(e->h_dest, n, *out_dev);
778
779         switch (e->tunnel_type) {
780         case MLX5_HEADER_TYPE_VXLAN:
781                 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
782                                                    e->h_dest, ttl,
783                                                    e->tun_info.daddr,
784                                                    saddr, e->tun_info.tp_dst,
785                                                    e->tun_info.tun_id);
786                 break;
787         default:
788                 err = -EOPNOTSUPP;
789                 goto out;
790         }
791
792         err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
793                                encap_size, encap_header, &e->encap_id);
794 out:
795         if (err && n)
796                 neigh_release(n);
797         kfree(encap_header);
798         return err;
799 }
800
801 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
802                               struct ip_tunnel_info *tun_info,
803                               struct net_device *mirred_dev,
804                               struct mlx5_esw_flow_attr *attr)
805 {
806         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
807         unsigned short family = ip_tunnel_info_af(tun_info);
808         struct ip_tunnel_key *key = &tun_info->key;
809         struct mlx5_encap_info info;
810         struct mlx5_encap_entry *e;
811         struct net_device *out_dev;
812         uintptr_t hash_key;
813         bool found = false;
814         int tunnel_type;
815         int err;
816
817         /* udp dst port must be set */
818         if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
819                 goto vxlan_encap_offload_err;
820
821         /* setting udp src port isn't supported */
822         if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
823 vxlan_encap_offload_err:
824                 netdev_warn(priv->netdev,
825                             "must set udp dst port and not set udp src port\n");
826                 return -EOPNOTSUPP;
827         }
828
829         if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
830             MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
831                 info.tp_dst = key->tp_dst;
832                 info.tun_id = tunnel_id_to_key32(key->tun_id);
833                 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
834         } else {
835                 netdev_warn(priv->netdev,
836                             "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
837                 return -EOPNOTSUPP;
838         }
839
840         switch (family) {
841         case AF_INET:
842                 info.daddr = key->u.ipv4.dst;
843                 break;
844         case AF_INET6:
845                 netdev_warn(priv->netdev,
846                             "IPv6 tunnel encap offload isn't supported\n");
847         default:
848                 return -EOPNOTSUPP;
849         }
850
851         hash_key = hash_encap_info(&info);
852
853         hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
854                                    encap_hlist, hash_key) {
855                 if (!cmp_encap_info(&e->tun_info, &info)) {
856                         found = true;
857                         break;
858                 }
859         }
860
861         if (found) {
862                 attr->encap = e;
863                 return 0;
864         }
865
866         e = kzalloc(sizeof(*e), GFP_KERNEL);
867         if (!e)
868                 return -ENOMEM;
869
870         e->tun_info = info;
871         e->tunnel_type = tunnel_type;
872         INIT_LIST_HEAD(&e->flows);
873
874         err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
875         if (err)
876                 goto out_err;
877
878         attr->encap = e;
879         hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
880
881         return err;
882
883 out_err:
884         kfree(e);
885         return err;
886 }
887
888 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
889                                 struct mlx5e_tc_flow *flow)
890 {
891         struct mlx5_esw_flow_attr *attr = flow->attr;
892         struct ip_tunnel_info *info = NULL;
893         const struct tc_action *a;
894         LIST_HEAD(actions);
895         bool encap = false;
896         int err;
897
898         if (tc_no_actions(exts))
899                 return -EINVAL;
900
901         memset(attr, 0, sizeof(*attr));
902         attr->in_rep = priv->ppriv;
903
904         tcf_exts_to_list(exts, &actions);
905         list_for_each_entry(a, &actions, list) {
906                 if (is_tcf_gact_shot(a)) {
907                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
908                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
909                         continue;
910                 }
911
912                 if (is_tcf_mirred_egress_redirect(a)) {
913                         int ifindex = tcf_mirred_ifindex(a);
914                         struct net_device *out_dev;
915                         struct mlx5e_priv *out_priv;
916
917                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
918
919                         if (switchdev_port_same_parent_id(priv->netdev,
920                                                           out_dev)) {
921                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
922                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
923                                 out_priv = netdev_priv(out_dev);
924                                 attr->out_rep = out_priv->ppriv;
925                         } else if (encap) {
926                                 err = mlx5e_attach_encap(priv, info,
927                                                          out_dev, attr);
928                                 if (err)
929                                         return err;
930                                 list_add(&flow->encap, &attr->encap->flows);
931                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
932                                         MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
933                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
934                                 out_priv = netdev_priv(attr->encap->out_dev);
935                                 attr->out_rep = out_priv->ppriv;
936                         } else {
937                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
938                                        priv->netdev->name, out_dev->name);
939                                 return -EINVAL;
940                         }
941                         continue;
942                 }
943
944                 if (is_tcf_tunnel_set(a)) {
945                         info = tcf_tunnel_info(a);
946                         if (info)
947                                 encap = true;
948                         else
949                                 return -EOPNOTSUPP;
950                         continue;
951                 }
952
953                 if (is_tcf_vlan(a)) {
954                         if (tcf_vlan_action(a) == VLAN_F_POP) {
955                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
956                         } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
957                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
958                                         return -EOPNOTSUPP;
959
960                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
961                                 attr->vlan = tcf_vlan_push_vid(a);
962                         }
963                         continue;
964                 }
965
966                 if (is_tcf_tunnel_release(a)) {
967                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
968                         continue;
969                 }
970
971                 return -EINVAL;
972         }
973         return 0;
974 }
975
976 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
977                            struct tc_cls_flower_offload *f)
978 {
979         struct mlx5e_tc_table *tc = &priv->fs.tc;
980         int err = 0;
981         bool fdb_flow = false;
982         u32 flow_tag, action;
983         struct mlx5e_tc_flow *flow;
984         struct mlx5_flow_spec *spec;
985         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
986
987         if (esw && esw->mode == SRIOV_OFFLOADS)
988                 fdb_flow = true;
989
990         if (fdb_flow)
991                 flow = kzalloc(sizeof(*flow) +
992                                sizeof(struct mlx5_esw_flow_attr),
993                                GFP_KERNEL);
994         else
995                 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
996
997         spec = mlx5_vzalloc(sizeof(*spec));
998         if (!spec || !flow) {
999                 err = -ENOMEM;
1000                 goto err_free;
1001         }
1002
1003         flow->cookie = f->cookie;
1004
1005         err = parse_cls_flower(priv, spec, f);
1006         if (err < 0)
1007                 goto err_free;
1008
1009         if (fdb_flow) {
1010                 flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
1011                 err = parse_tc_fdb_actions(priv, f->exts, flow);
1012                 if (err < 0)
1013                         goto err_free;
1014                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
1015         } else {
1016                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1017                 if (err < 0)
1018                         goto err_free;
1019                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1020         }
1021
1022         if (IS_ERR(flow->rule)) {
1023                 err = PTR_ERR(flow->rule);
1024                 goto err_del_rule;
1025         }
1026
1027         err = rhashtable_insert_fast(&tc->ht, &flow->node,
1028                                      tc->ht_params);
1029         if (err)
1030                 goto err_del_rule;
1031
1032         goto out;
1033
1034 err_del_rule:
1035         mlx5e_tc_del_flow(priv, flow);
1036
1037 err_free:
1038         kfree(flow);
1039 out:
1040         kvfree(spec);
1041         return err;
1042 }
1043
1044 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1045                         struct tc_cls_flower_offload *f)
1046 {
1047         struct mlx5e_tc_flow *flow;
1048         struct mlx5e_tc_table *tc = &priv->fs.tc;
1049
1050         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1051                                       tc->ht_params);
1052         if (!flow)
1053                 return -EINVAL;
1054
1055         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1056
1057         mlx5e_tc_del_flow(priv, flow);
1058
1059
1060         kfree(flow);
1061
1062         return 0;
1063 }
1064
1065 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1066                        struct tc_cls_flower_offload *f)
1067 {
1068         struct mlx5e_tc_table *tc = &priv->fs.tc;
1069         struct mlx5e_tc_flow *flow;
1070         struct tc_action *a;
1071         struct mlx5_fc *counter;
1072         LIST_HEAD(actions);
1073         u64 bytes;
1074         u64 packets;
1075         u64 lastuse;
1076
1077         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1078                                       tc->ht_params);
1079         if (!flow)
1080                 return -EINVAL;
1081
1082         counter = mlx5_flow_rule_counter(flow->rule);
1083         if (!counter)
1084                 return 0;
1085
1086         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1087
1088         tcf_exts_to_list(f->exts, &actions);
1089         list_for_each_entry(a, &actions, list)
1090                 tcf_action_stats_update(a, bytes, packets, lastuse);
1091
1092         return 0;
1093 }
1094
1095 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1096         .head_offset = offsetof(struct mlx5e_tc_flow, node),
1097         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1098         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1099         .automatic_shrinking = true,
1100 };
1101
1102 int mlx5e_tc_init(struct mlx5e_priv *priv)
1103 {
1104         struct mlx5e_tc_table *tc = &priv->fs.tc;
1105
1106         tc->ht_params = mlx5e_tc_flow_ht_params;
1107         return rhashtable_init(&tc->ht, &tc->ht_params);
1108 }
1109
1110 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1111 {
1112         struct mlx5e_tc_flow *flow = ptr;
1113         struct mlx5e_priv *priv = arg;
1114
1115         mlx5e_tc_del_flow(priv, flow);
1116         kfree(flow);
1117 }
1118
1119 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1120 {
1121         struct mlx5e_tc_table *tc = &priv->fs.tc;
1122
1123         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1124
1125         if (!IS_ERR_OR_NULL(tc->t)) {
1126                 mlx5_destroy_flow_table(tc->t);
1127                 tc->t = NULL;
1128         }
1129 }