89466539a00c60e2223cc926a98611812f9b9c52
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
1 /*
2  * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <net/flow_dissector.h>
34 #include <net/pkt_cls.h>
35 #include <net/tc_act/tc_gact.h>
36 #include <net/tc_act/tc_skbedit.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <net/switchdev.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include <net/tc_act/tc_tunnel_key.h>
44 #include "en.h"
45 #include "en_tc.h"
46 #include "eswitch.h"
47 #include "vxlan.h"
48
49 struct mlx5e_tc_flow {
50         struct rhash_head       node;
51         u64                     cookie;
52         struct mlx5_flow_handle *rule;
53         struct mlx5_esw_flow_attr *attr;
54 };
55
56 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
57 #define MLX5E_TC_TABLE_NUM_GROUPS 4
58
59 static struct mlx5_flow_handle *
60 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
61                       struct mlx5_flow_spec *spec,
62                       u32 action, u32 flow_tag)
63 {
64         struct mlx5_core_dev *dev = priv->mdev;
65         struct mlx5_flow_destination dest = { 0 };
66         struct mlx5_flow_act flow_act = {
67                 .action = action,
68                 .flow_tag = flow_tag,
69                 .encap_id = 0,
70         };
71         struct mlx5_fc *counter = NULL;
72         struct mlx5_flow_handle *rule;
73         bool table_created = false;
74
75         if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
76                 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
77                 dest.ft = priv->fs.vlan.ft.t;
78         } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
79                 counter = mlx5_fc_create(dev, true);
80                 if (IS_ERR(counter))
81                         return ERR_CAST(counter);
82
83                 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
84                 dest.counter = counter;
85         }
86
87         if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
88                 priv->fs.tc.t =
89                         mlx5_create_auto_grouped_flow_table(priv->fs.ns,
90                                                             MLX5E_TC_PRIO,
91                                                             MLX5E_TC_TABLE_NUM_ENTRIES,
92                                                             MLX5E_TC_TABLE_NUM_GROUPS,
93                                                             0, 0);
94                 if (IS_ERR(priv->fs.tc.t)) {
95                         netdev_err(priv->netdev,
96                                    "Failed to create tc offload table\n");
97                         rule = ERR_CAST(priv->fs.tc.t);
98                         goto err_create_ft;
99                 }
100
101                 table_created = true;
102         }
103
104         spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
105         rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
106
107         if (IS_ERR(rule))
108                 goto err_add_rule;
109
110         return rule;
111
112 err_add_rule:
113         if (table_created) {
114                 mlx5_destroy_flow_table(priv->fs.tc.t);
115                 priv->fs.tc.t = NULL;
116         }
117 err_create_ft:
118         mlx5_fc_destroy(dev, counter);
119
120         return rule;
121 }
122
123 static struct mlx5_flow_handle *
124 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
125                       struct mlx5_flow_spec *spec,
126                       struct mlx5_esw_flow_attr *attr)
127 {
128         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
129         int err;
130
131         err = mlx5_eswitch_add_vlan_action(esw, attr);
132         if (err)
133                 return ERR_PTR(err);
134
135         return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
136 }
137
138 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
139                               struct mlx5_flow_handle *rule,
140                               struct mlx5_esw_flow_attr *attr)
141 {
142         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
143         struct mlx5_fc *counter = NULL;
144
145         counter = mlx5_flow_rule_counter(rule);
146
147         if (esw && esw->mode == SRIOV_OFFLOADS)
148                 mlx5_eswitch_del_vlan_action(esw, attr);
149
150         mlx5_del_flow_rules(rule);
151
152         mlx5_fc_destroy(priv->mdev, counter);
153
154         if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
155                 mlx5_destroy_flow_table(priv->fs.tc.t);
156                 priv->fs.tc.t = NULL;
157         }
158 }
159
160 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
161                              struct tc_cls_flower_offload *f)
162 {
163         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
164                                        outer_headers);
165         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
166                                        outer_headers);
167         void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
168                                     misc_parameters);
169         void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
170                                     misc_parameters);
171
172         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
173         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
174
175         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
176                 struct flow_dissector_key_keyid *key =
177                         skb_flow_dissector_target(f->dissector,
178                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
179                                                   f->key);
180                 struct flow_dissector_key_keyid *mask =
181                         skb_flow_dissector_target(f->dissector,
182                                                   FLOW_DISSECTOR_KEY_ENC_KEYID,
183                                                   f->mask);
184                 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
185                          be32_to_cpu(mask->keyid));
186                 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
187                          be32_to_cpu(key->keyid));
188         }
189 }
190
191 static int parse_tunnel_attr(struct mlx5e_priv *priv,
192                              struct mlx5_flow_spec *spec,
193                              struct tc_cls_flower_offload *f)
194 {
195         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
196                                        outer_headers);
197         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
198                                        outer_headers);
199
200         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
201                 struct flow_dissector_key_ports *key =
202                         skb_flow_dissector_target(f->dissector,
203                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
204                                                   f->key);
205                 struct flow_dissector_key_ports *mask =
206                         skb_flow_dissector_target(f->dissector,
207                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
208                                                   f->mask);
209
210                 /* Full udp dst port must be given */
211                 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
212                         return -EOPNOTSUPP;
213
214                 /* udp src port isn't supported */
215                 if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
216                         return -EOPNOTSUPP;
217
218                 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
219                     MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
220                         parse_vxlan_attr(spec, f);
221                 else
222                         return -EOPNOTSUPP;
223
224                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
225                          udp_dport, ntohs(mask->dst));
226                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
227                          udp_dport, ntohs(key->dst));
228
229         } else { /* udp dst port must be given */
230                         return -EOPNOTSUPP;
231         }
232
233         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
234                 struct flow_dissector_key_ipv4_addrs *key =
235                         skb_flow_dissector_target(f->dissector,
236                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
237                                                   f->key);
238                 struct flow_dissector_key_ipv4_addrs *mask =
239                         skb_flow_dissector_target(f->dissector,
240                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
241                                                   f->mask);
242                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
243                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
244                          ntohl(mask->src));
245                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
246                          src_ipv4_src_ipv6.ipv4_layout.ipv4,
247                          ntohl(key->src));
248
249                 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
250                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
251                          ntohl(mask->dst));
252                 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
253                          dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
254                          ntohl(key->dst));
255         }
256
257         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
258         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
259
260         /* Enforce DMAC when offloading incoming tunneled flows.
261          * Flow counters require a match on the DMAC.
262          */
263         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
264         MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
265         ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
266                                      dmac_47_16), priv->netdev->dev_addr);
267
268         /* let software handle IP fragments */
269         MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
270         MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
271
272         return 0;
273 }
274
275 static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
276                             struct tc_cls_flower_offload *f)
277 {
278         void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
279                                        outer_headers);
280         void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
281                                        outer_headers);
282         u16 addr_type = 0;
283         u8 ip_proto = 0;
284
285         if (f->dissector->used_keys &
286             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
287               BIT(FLOW_DISSECTOR_KEY_BASIC) |
288               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
289               BIT(FLOW_DISSECTOR_KEY_VLAN) |
290               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
291               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
292               BIT(FLOW_DISSECTOR_KEY_PORTS) |
293               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
294               BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
295               BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
296               BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
297               BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
298                 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
299                             f->dissector->used_keys);
300                 return -EOPNOTSUPP;
301         }
302
303         if ((dissector_uses_key(f->dissector,
304                                 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
305              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
306              dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
307             dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
308                 struct flow_dissector_key_control *key =
309                         skb_flow_dissector_target(f->dissector,
310                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
311                                                   f->key);
312                 switch (key->addr_type) {
313                 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
314                         if (parse_tunnel_attr(priv, spec, f))
315                                 return -EOPNOTSUPP;
316                         break;
317                 default:
318                         return -EOPNOTSUPP;
319                 }
320
321                 /* In decap flow, header pointers should point to the inner
322                  * headers, outer header were already set by parse_tunnel_attr
323                  */
324                 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
325                                          inner_headers);
326                 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
327                                          inner_headers);
328         }
329
330         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
331                 struct flow_dissector_key_control *key =
332                         skb_flow_dissector_target(f->dissector,
333                                                   FLOW_DISSECTOR_KEY_CONTROL,
334                                                   f->key);
335                 addr_type = key->addr_type;
336         }
337
338         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
339                 struct flow_dissector_key_basic *key =
340                         skb_flow_dissector_target(f->dissector,
341                                                   FLOW_DISSECTOR_KEY_BASIC,
342                                                   f->key);
343                 struct flow_dissector_key_basic *mask =
344                         skb_flow_dissector_target(f->dissector,
345                                                   FLOW_DISSECTOR_KEY_BASIC,
346                                                   f->mask);
347                 ip_proto = key->ip_proto;
348
349                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
350                          ntohs(mask->n_proto));
351                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
352                          ntohs(key->n_proto));
353
354                 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
355                          mask->ip_proto);
356                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
357                          key->ip_proto);
358         }
359
360         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
361                 struct flow_dissector_key_eth_addrs *key =
362                         skb_flow_dissector_target(f->dissector,
363                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
364                                                   f->key);
365                 struct flow_dissector_key_eth_addrs *mask =
366                         skb_flow_dissector_target(f->dissector,
367                                                   FLOW_DISSECTOR_KEY_ETH_ADDRS,
368                                                   f->mask);
369
370                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
371                                              dmac_47_16),
372                                 mask->dst);
373                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
374                                              dmac_47_16),
375                                 key->dst);
376
377                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
378                                              smac_47_16),
379                                 mask->src);
380                 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
381                                              smac_47_16),
382                                 key->src);
383         }
384
385         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
386                 struct flow_dissector_key_vlan *key =
387                         skb_flow_dissector_target(f->dissector,
388                                                   FLOW_DISSECTOR_KEY_VLAN,
389                                                   f->key);
390                 struct flow_dissector_key_vlan *mask =
391                         skb_flow_dissector_target(f->dissector,
392                                                   FLOW_DISSECTOR_KEY_VLAN,
393                                                   f->mask);
394                 if (mask->vlan_id) {
395                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
396                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
397
398                         MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
399                         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
400                 }
401         }
402
403         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
404                 struct flow_dissector_key_ipv4_addrs *key =
405                         skb_flow_dissector_target(f->dissector,
406                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
407                                                   f->key);
408                 struct flow_dissector_key_ipv4_addrs *mask =
409                         skb_flow_dissector_target(f->dissector,
410                                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS,
411                                                   f->mask);
412
413                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
414                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
415                        &mask->src, sizeof(mask->src));
416                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
417                                     src_ipv4_src_ipv6.ipv4_layout.ipv4),
418                        &key->src, sizeof(key->src));
419                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
420                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
421                        &mask->dst, sizeof(mask->dst));
422                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
423                                     dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
424                        &key->dst, sizeof(key->dst));
425         }
426
427         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
428                 struct flow_dissector_key_ipv6_addrs *key =
429                         skb_flow_dissector_target(f->dissector,
430                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
431                                                   f->key);
432                 struct flow_dissector_key_ipv6_addrs *mask =
433                         skb_flow_dissector_target(f->dissector,
434                                                   FLOW_DISSECTOR_KEY_IPV6_ADDRS,
435                                                   f->mask);
436
437                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
438                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
439                        &mask->src, sizeof(mask->src));
440                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
441                                     src_ipv4_src_ipv6.ipv6_layout.ipv6),
442                        &key->src, sizeof(key->src));
443
444                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
445                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
446                        &mask->dst, sizeof(mask->dst));
447                 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
448                                     dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
449                        &key->dst, sizeof(key->dst));
450         }
451
452         if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
453                 struct flow_dissector_key_ports *key =
454                         skb_flow_dissector_target(f->dissector,
455                                                   FLOW_DISSECTOR_KEY_PORTS,
456                                                   f->key);
457                 struct flow_dissector_key_ports *mask =
458                         skb_flow_dissector_target(f->dissector,
459                                                   FLOW_DISSECTOR_KEY_PORTS,
460                                                   f->mask);
461                 switch (ip_proto) {
462                 case IPPROTO_TCP:
463                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
464                                  tcp_sport, ntohs(mask->src));
465                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
466                                  tcp_sport, ntohs(key->src));
467
468                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
469                                  tcp_dport, ntohs(mask->dst));
470                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
471                                  tcp_dport, ntohs(key->dst));
472                         break;
473
474                 case IPPROTO_UDP:
475                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
476                                  udp_sport, ntohs(mask->src));
477                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
478                                  udp_sport, ntohs(key->src));
479
480                         MLX5_SET(fte_match_set_lyr_2_4, headers_c,
481                                  udp_dport, ntohs(mask->dst));
482                         MLX5_SET(fte_match_set_lyr_2_4, headers_v,
483                                  udp_dport, ntohs(key->dst));
484                         break;
485                 default:
486                         netdev_err(priv->netdev,
487                                    "Only UDP and TCP transport are supported\n");
488                         return -EINVAL;
489                 }
490         }
491
492         return 0;
493 }
494
495 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
496                                 u32 *action, u32 *flow_tag)
497 {
498         const struct tc_action *a;
499         LIST_HEAD(actions);
500
501         if (tc_no_actions(exts))
502                 return -EINVAL;
503
504         *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
505         *action = 0;
506
507         tcf_exts_to_list(exts, &actions);
508         list_for_each_entry(a, &actions, list) {
509                 /* Only support a single action per rule */
510                 if (*action)
511                         return -EINVAL;
512
513                 if (is_tcf_gact_shot(a)) {
514                         *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
515                         if (MLX5_CAP_FLOWTABLE(priv->mdev,
516                                                flow_table_properties_nic_receive.flow_counter))
517                                 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
518                         continue;
519                 }
520
521                 if (is_tcf_skbedit_mark(a)) {
522                         u32 mark = tcf_skbedit_mark(a);
523
524                         if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
525                                 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
526                                             mark);
527                                 return -EINVAL;
528                         }
529
530                         *flow_tag = mark;
531                         *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
532                         continue;
533                 }
534
535                 return -EINVAL;
536         }
537
538         return 0;
539 }
540
541 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
542                                 struct mlx5_esw_flow_attr *attr)
543 {
544         const struct tc_action *a;
545         LIST_HEAD(actions);
546
547         if (tc_no_actions(exts))
548                 return -EINVAL;
549
550         memset(attr, 0, sizeof(*attr));
551         attr->in_rep = priv->ppriv;
552
553         tcf_exts_to_list(exts, &actions);
554         list_for_each_entry(a, &actions, list) {
555                 if (is_tcf_gact_shot(a)) {
556                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
557                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
558                         continue;
559                 }
560
561                 if (is_tcf_mirred_egress_redirect(a)) {
562                         int ifindex = tcf_mirred_ifindex(a);
563                         struct net_device *out_dev;
564                         struct mlx5e_priv *out_priv;
565
566                         out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
567
568                         if (!switchdev_port_same_parent_id(priv->netdev, out_dev)) {
569                                 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
570                                        priv->netdev->name, out_dev->name);
571                                 return -EINVAL;
572                         }
573
574                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
575                                         MLX5_FLOW_CONTEXT_ACTION_COUNT;
576                         out_priv = netdev_priv(out_dev);
577                         attr->out_rep = out_priv->ppriv;
578                         continue;
579                 }
580
581                 if (is_tcf_vlan(a)) {
582                         if (tcf_vlan_action(a) == VLAN_F_POP) {
583                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
584                         } else if (tcf_vlan_action(a) == VLAN_F_PUSH) {
585                                 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
586                                         return -EOPNOTSUPP;
587
588                                 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
589                                 attr->vlan = tcf_vlan_push_vid(a);
590                         }
591                         continue;
592                 }
593
594                 if (is_tcf_tunnel_release(a)) {
595                         attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
596                         continue;
597                 }
598
599                 return -EINVAL;
600         }
601         return 0;
602 }
603
604 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
605                            struct tc_cls_flower_offload *f)
606 {
607         struct mlx5e_tc_table *tc = &priv->fs.tc;
608         int err = 0;
609         bool fdb_flow = false;
610         u32 flow_tag, action;
611         struct mlx5e_tc_flow *flow;
612         struct mlx5_flow_spec *spec;
613         struct mlx5_flow_handle *old = NULL;
614         struct mlx5_esw_flow_attr *old_attr = NULL;
615         struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
616
617         if (esw && esw->mode == SRIOV_OFFLOADS)
618                 fdb_flow = true;
619
620         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
621                                       tc->ht_params);
622         if (flow) {
623                 old = flow->rule;
624                 old_attr = flow->attr;
625         } else {
626                 if (fdb_flow)
627                         flow = kzalloc(sizeof(*flow) + sizeof(struct mlx5_esw_flow_attr),
628                                        GFP_KERNEL);
629                 else
630                         flow = kzalloc(sizeof(*flow), GFP_KERNEL);
631         }
632
633         spec = mlx5_vzalloc(sizeof(*spec));
634         if (!spec || !flow) {
635                 err = -ENOMEM;
636                 goto err_free;
637         }
638
639         flow->cookie = f->cookie;
640
641         err = parse_cls_flower(priv, spec, f);
642         if (err < 0)
643                 goto err_free;
644
645         if (fdb_flow) {
646                 flow->attr  = (struct mlx5_esw_flow_attr *)(flow + 1);
647                 err = parse_tc_fdb_actions(priv, f->exts, flow->attr);
648                 if (err < 0)
649                         goto err_free;
650                 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
651         } else {
652                 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
653                 if (err < 0)
654                         goto err_free;
655                 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
656         }
657
658         if (IS_ERR(flow->rule)) {
659                 err = PTR_ERR(flow->rule);
660                 goto err_free;
661         }
662
663         err = rhashtable_insert_fast(&tc->ht, &flow->node,
664                                      tc->ht_params);
665         if (err)
666                 goto err_del_rule;
667
668         if (old)
669                 mlx5e_tc_del_flow(priv, old, old_attr);
670
671         goto out;
672
673 err_del_rule:
674         mlx5_del_flow_rules(flow->rule);
675
676 err_free:
677         if (!old)
678                 kfree(flow);
679 out:
680         kvfree(spec);
681         return err;
682 }
683
684 int mlx5e_delete_flower(struct mlx5e_priv *priv,
685                         struct tc_cls_flower_offload *f)
686 {
687         struct mlx5e_tc_flow *flow;
688         struct mlx5e_tc_table *tc = &priv->fs.tc;
689
690         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
691                                       tc->ht_params);
692         if (!flow)
693                 return -EINVAL;
694
695         rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
696
697         mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
698
699         kfree(flow);
700
701         return 0;
702 }
703
704 int mlx5e_stats_flower(struct mlx5e_priv *priv,
705                        struct tc_cls_flower_offload *f)
706 {
707         struct mlx5e_tc_table *tc = &priv->fs.tc;
708         struct mlx5e_tc_flow *flow;
709         struct tc_action *a;
710         struct mlx5_fc *counter;
711         LIST_HEAD(actions);
712         u64 bytes;
713         u64 packets;
714         u64 lastuse;
715
716         flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
717                                       tc->ht_params);
718         if (!flow)
719                 return -EINVAL;
720
721         counter = mlx5_flow_rule_counter(flow->rule);
722         if (!counter)
723                 return 0;
724
725         mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
726
727         tcf_exts_to_list(f->exts, &actions);
728         list_for_each_entry(a, &actions, list)
729                 tcf_action_stats_update(a, bytes, packets, lastuse);
730
731         return 0;
732 }
733
734 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
735         .head_offset = offsetof(struct mlx5e_tc_flow, node),
736         .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
737         .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
738         .automatic_shrinking = true,
739 };
740
741 int mlx5e_tc_init(struct mlx5e_priv *priv)
742 {
743         struct mlx5e_tc_table *tc = &priv->fs.tc;
744
745         tc->ht_params = mlx5e_tc_flow_ht_params;
746         return rhashtable_init(&tc->ht, &tc->ht_params);
747 }
748
749 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
750 {
751         struct mlx5e_tc_flow *flow = ptr;
752         struct mlx5e_priv *priv = arg;
753
754         mlx5e_tc_del_flow(priv, flow->rule, flow->attr);
755         kfree(flow);
756 }
757
758 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
759 {
760         struct mlx5e_tc_table *tc = &priv->fs.tc;
761
762         rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
763
764         if (!IS_ERR_OR_NULL(tc->t)) {
765                 mlx5_destroy_flow_table(tc->t);
766                 tc->t = NULL;
767         }
768 }