Merge tag 'imx-dt-5.4' of git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_tc_flower.c
1 /*
2  * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_pedit.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_vlan.h>
39
40 #include "cxgb4.h"
41 #include "cxgb4_filter.h"
42 #include "cxgb4_tc_flower.h"
43
44 #define STATS_CHECK_PERIOD (HZ / 2)
45
46 static struct ch_tc_pedit_fields pedits[] = {
47         PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
48         PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49         PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
50         PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51         PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
52         PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
53         PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
54         PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
55         PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
56         PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
57         PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
58         PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
59         PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
60         PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61         PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
62         PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
63         PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
64         PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
65 };
66
67 static struct ch_tc_flower_entry *allocate_flower_entry(void)
68 {
69         struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
70         if (new)
71                 spin_lock_init(&new->lock);
72         return new;
73 }
74
75 /* Must be called with either RTNL or rcu_read_lock */
76 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
77                                                    unsigned long flower_cookie)
78 {
79         return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
80                                       adap->flower_ht_params);
81 }
82
83 static void cxgb4_process_flow_match(struct net_device *dev,
84                                      struct flow_cls_offload *cls,
85                                      struct ch_filter_specification *fs)
86 {
87         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
88         u16 addr_type = 0;
89
90         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
91                 struct flow_match_control match;
92
93                 flow_rule_match_control(rule, &match);
94                 addr_type = match.key->addr_type;
95         }
96
97         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
98                 struct flow_match_basic match;
99                 u16 ethtype_key, ethtype_mask;
100
101                 flow_rule_match_basic(rule, &match);
102                 ethtype_key = ntohs(match.key->n_proto);
103                 ethtype_mask = ntohs(match.mask->n_proto);
104
105                 if (ethtype_key == ETH_P_ALL) {
106                         ethtype_key = 0;
107                         ethtype_mask = 0;
108                 }
109
110                 if (ethtype_key == ETH_P_IPV6)
111                         fs->type = 1;
112
113                 fs->val.ethtype = ethtype_key;
114                 fs->mask.ethtype = ethtype_mask;
115                 fs->val.proto = match.key->ip_proto;
116                 fs->mask.proto = match.mask->ip_proto;
117         }
118
119         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
120                 struct flow_match_ipv4_addrs match;
121
122                 flow_rule_match_ipv4_addrs(rule, &match);
123                 fs->type = 0;
124                 memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
125                 memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
126                 memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
127                 memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
128
129                 /* also initialize nat_lip/fip to same values */
130                 memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
131                 memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
132         }
133
134         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
135                 struct flow_match_ipv6_addrs match;
136
137                 flow_rule_match_ipv6_addrs(rule, &match);
138                 fs->type = 1;
139                 memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
140                        sizeof(match.key->dst));
141                 memcpy(&fs->val.fip[0], match.key->src.s6_addr,
142                        sizeof(match.key->src));
143                 memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
144                        sizeof(match.mask->dst));
145                 memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
146                        sizeof(match.mask->src));
147
148                 /* also initialize nat_lip/fip to same values */
149                 memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
150                        sizeof(match.key->dst));
151                 memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
152                        sizeof(match.key->src));
153         }
154
155         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
156                 struct flow_match_ports match;
157
158                 flow_rule_match_ports(rule, &match);
159                 fs->val.lport = cpu_to_be16(match.key->dst);
160                 fs->mask.lport = cpu_to_be16(match.mask->dst);
161                 fs->val.fport = cpu_to_be16(match.key->src);
162                 fs->mask.fport = cpu_to_be16(match.mask->src);
163
164                 /* also initialize nat_lport/fport to same values */
165                 fs->nat_lport = cpu_to_be16(match.key->dst);
166                 fs->nat_fport = cpu_to_be16(match.key->src);
167         }
168
169         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
170                 struct flow_match_ip match;
171
172                 flow_rule_match_ip(rule, &match);
173                 fs->val.tos = match.key->tos;
174                 fs->mask.tos = match.mask->tos;
175         }
176
177         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
178                 struct flow_match_enc_keyid match;
179
180                 flow_rule_match_enc_keyid(rule, &match);
181                 fs->val.vni = be32_to_cpu(match.key->keyid);
182                 fs->mask.vni = be32_to_cpu(match.mask->keyid);
183                 if (fs->mask.vni) {
184                         fs->val.encap_vld = 1;
185                         fs->mask.encap_vld = 1;
186                 }
187         }
188
189         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
190                 struct flow_match_vlan match;
191                 u16 vlan_tci, vlan_tci_mask;
192
193                 flow_rule_match_vlan(rule, &match);
194                 vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
195                                                VLAN_PRIO_SHIFT);
196                 vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
197                                                      VLAN_PRIO_SHIFT);
198                 fs->val.ivlan = vlan_tci;
199                 fs->mask.ivlan = vlan_tci_mask;
200
201                 fs->val.ivlan_vld = 1;
202                 fs->mask.ivlan_vld = 1;
203
204                 /* Chelsio adapters use ivlan_vld bit to match vlan packets
205                  * as 802.1Q. Also, when vlan tag is present in packets,
206                  * ethtype match is used then to match on ethtype of inner
207                  * header ie. the header following the vlan header.
208                  * So, set the ivlan_vld based on ethtype info supplied by
209                  * TC for vlan packets if its 802.1Q. And then reset the
210                  * ethtype value else, hw will try to match the supplied
211                  * ethtype value with ethtype of inner header.
212                  */
213                 if (fs->val.ethtype == ETH_P_8021Q) {
214                         fs->val.ethtype = 0;
215                         fs->mask.ethtype = 0;
216                 }
217         }
218
219         /* Match only packets coming from the ingress port where this
220          * filter will be created.
221          */
222         fs->val.iport = netdev2pinfo(dev)->port_id;
223         fs->mask.iport = ~0;
224 }
225
226 static int cxgb4_validate_flow_match(struct net_device *dev,
227                                      struct flow_cls_offload *cls)
228 {
229         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
230         struct flow_dissector *dissector = rule->match.dissector;
231         u16 ethtype_mask = 0;
232         u16 ethtype_key = 0;
233
234         if (dissector->used_keys &
235             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
236               BIT(FLOW_DISSECTOR_KEY_BASIC) |
237               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
238               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
239               BIT(FLOW_DISSECTOR_KEY_PORTS) |
240               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
241               BIT(FLOW_DISSECTOR_KEY_VLAN) |
242               BIT(FLOW_DISSECTOR_KEY_IP))) {
243                 netdev_warn(dev, "Unsupported key used: 0x%x\n",
244                             dissector->used_keys);
245                 return -EOPNOTSUPP;
246         }
247
248         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
249                 struct flow_match_basic match;
250
251                 flow_rule_match_basic(rule, &match);
252                 ethtype_key = ntohs(match.key->n_proto);
253                 ethtype_mask = ntohs(match.mask->n_proto);
254         }
255
256         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
257                 u16 eth_ip_type = ethtype_key & ethtype_mask;
258                 struct flow_match_ip match;
259
260                 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
261                         netdev_err(dev, "IP Key supported only with IPv4/v6");
262                         return -EINVAL;
263                 }
264
265                 flow_rule_match_ip(rule, &match);
266                 if (match.mask->ttl) {
267                         netdev_warn(dev, "ttl match unsupported for offload");
268                         return -EOPNOTSUPP;
269                 }
270         }
271
272         return 0;
273 }
274
275 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
276                           u8 field)
277 {
278         u32 set_val = val & ~mask;
279         u32 offset = 0;
280         u8 size = 1;
281         int i;
282
283         for (i = 0; i < ARRAY_SIZE(pedits); i++) {
284                 if (pedits[i].field == field) {
285                         offset = pedits[i].offset;
286                         size = pedits[i].size;
287                         break;
288                 }
289         }
290         memcpy((u8 *)fs + offset, &set_val, size);
291 }
292
293 static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
294                                 u32 mask, u32 offset, u8 htype)
295 {
296         switch (htype) {
297         case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
298                 switch (offset) {
299                 case PEDIT_ETH_DMAC_31_0:
300                         fs->newdmac = 1;
301                         offload_pedit(fs, val, mask, ETH_DMAC_31_0);
302                         break;
303                 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
304                         if (~mask & PEDIT_ETH_DMAC_MASK)
305                                 offload_pedit(fs, val, mask, ETH_DMAC_47_32);
306                         else
307                                 offload_pedit(fs, val >> 16, mask >> 16,
308                                               ETH_SMAC_15_0);
309                         break;
310                 case PEDIT_ETH_SMAC_47_16:
311                         fs->newsmac = 1;
312                         offload_pedit(fs, val, mask, ETH_SMAC_47_16);
313                 }
314                 break;
315         case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
316                 switch (offset) {
317                 case PEDIT_IP4_SRC:
318                         offload_pedit(fs, val, mask, IP4_SRC);
319                         break;
320                 case PEDIT_IP4_DST:
321                         offload_pedit(fs, val, mask, IP4_DST);
322                 }
323                 fs->nat_mode = NAT_MODE_ALL;
324                 break;
325         case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
326                 switch (offset) {
327                 case PEDIT_IP6_SRC_31_0:
328                         offload_pedit(fs, val, mask, IP6_SRC_31_0);
329                         break;
330                 case PEDIT_IP6_SRC_63_32:
331                         offload_pedit(fs, val, mask, IP6_SRC_63_32);
332                         break;
333                 case PEDIT_IP6_SRC_95_64:
334                         offload_pedit(fs, val, mask, IP6_SRC_95_64);
335                         break;
336                 case PEDIT_IP6_SRC_127_96:
337                         offload_pedit(fs, val, mask, IP6_SRC_127_96);
338                         break;
339                 case PEDIT_IP6_DST_31_0:
340                         offload_pedit(fs, val, mask, IP6_DST_31_0);
341                         break;
342                 case PEDIT_IP6_DST_63_32:
343                         offload_pedit(fs, val, mask, IP6_DST_63_32);
344                         break;
345                 case PEDIT_IP6_DST_95_64:
346                         offload_pedit(fs, val, mask, IP6_DST_95_64);
347                         break;
348                 case PEDIT_IP6_DST_127_96:
349                         offload_pedit(fs, val, mask, IP6_DST_127_96);
350                 }
351                 fs->nat_mode = NAT_MODE_ALL;
352                 break;
353         case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
354                 switch (offset) {
355                 case PEDIT_TCP_SPORT_DPORT:
356                         if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
357                                 offload_pedit(fs, cpu_to_be32(val) >> 16,
358                                               cpu_to_be32(mask) >> 16,
359                                               TCP_SPORT);
360                         else
361                                 offload_pedit(fs, cpu_to_be32(val),
362                                               cpu_to_be32(mask), TCP_DPORT);
363                 }
364                 fs->nat_mode = NAT_MODE_ALL;
365                 break;
366         case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
367                 switch (offset) {
368                 case PEDIT_UDP_SPORT_DPORT:
369                         if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
370                                 offload_pedit(fs, cpu_to_be32(val) >> 16,
371                                               cpu_to_be32(mask) >> 16,
372                                               UDP_SPORT);
373                         else
374                                 offload_pedit(fs, cpu_to_be32(val),
375                                               cpu_to_be32(mask), UDP_DPORT);
376                 }
377                 fs->nat_mode = NAT_MODE_ALL;
378         }
379 }
380
381 static void cxgb4_process_flow_actions(struct net_device *in,
382                                        struct flow_cls_offload *cls,
383                                        struct ch_filter_specification *fs)
384 {
385         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
386         struct flow_action_entry *act;
387         int i;
388
389         flow_action_for_each(i, act, &rule->action) {
390                 switch (act->id) {
391                 case FLOW_ACTION_ACCEPT:
392                         fs->action = FILTER_PASS;
393                         break;
394                 case FLOW_ACTION_DROP:
395                         fs->action = FILTER_DROP;
396                         break;
397                 case FLOW_ACTION_REDIRECT: {
398                         struct net_device *out = act->dev;
399                         struct port_info *pi = netdev_priv(out);
400
401                         fs->action = FILTER_SWITCH;
402                         fs->eport = pi->port_id;
403                         }
404                         break;
405                 case FLOW_ACTION_VLAN_POP:
406                 case FLOW_ACTION_VLAN_PUSH:
407                 case FLOW_ACTION_VLAN_MANGLE: {
408                         u8 prio = act->vlan.prio;
409                         u16 vid = act->vlan.vid;
410                         u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
411                         switch (act->id) {
412                         case FLOW_ACTION_VLAN_POP:
413                                 fs->newvlan |= VLAN_REMOVE;
414                                 break;
415                         case FLOW_ACTION_VLAN_PUSH:
416                                 fs->newvlan |= VLAN_INSERT;
417                                 fs->vlan = vlan_tci;
418                                 break;
419                         case FLOW_ACTION_VLAN_MANGLE:
420                                 fs->newvlan |= VLAN_REWRITE;
421                                 fs->vlan = vlan_tci;
422                                 break;
423                         default:
424                                 break;
425                         }
426                         }
427                         break;
428                 case FLOW_ACTION_MANGLE: {
429                         u32 mask, val, offset;
430                         u8 htype;
431
432                         htype = act->mangle.htype;
433                         mask = act->mangle.mask;
434                         val = act->mangle.val;
435                         offset = act->mangle.offset;
436
437                         process_pedit_field(fs, val, mask, offset, htype);
438                         }
439                         break;
440                 default:
441                         break;
442                 }
443         }
444 }
445
446 static bool valid_l4_mask(u32 mask)
447 {
448         u16 hi, lo;
449
450         /* Either the upper 16-bits (SPORT) OR the lower
451          * 16-bits (DPORT) can be set, but NOT BOTH.
452          */
453         hi = (mask >> 16) & 0xFFFF;
454         lo = mask & 0xFFFF;
455
456         return hi && lo ? false : true;
457 }
458
459 static bool valid_pedit_action(struct net_device *dev,
460                                const struct flow_action_entry *act)
461 {
462         u32 mask, offset;
463         u8 htype;
464
465         htype = act->mangle.htype;
466         mask = act->mangle.mask;
467         offset = act->mangle.offset;
468
469         switch (htype) {
470         case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
471                 switch (offset) {
472                 case PEDIT_ETH_DMAC_31_0:
473                 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
474                 case PEDIT_ETH_SMAC_47_16:
475                         break;
476                 default:
477                         netdev_err(dev, "%s: Unsupported pedit field\n",
478                                    __func__);
479                         return false;
480                 }
481                 break;
482         case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
483                 switch (offset) {
484                 case PEDIT_IP4_SRC:
485                 case PEDIT_IP4_DST:
486                         break;
487                 default:
488                         netdev_err(dev, "%s: Unsupported pedit field\n",
489                                    __func__);
490                         return false;
491                 }
492                 break;
493         case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
494                 switch (offset) {
495                 case PEDIT_IP6_SRC_31_0:
496                 case PEDIT_IP6_SRC_63_32:
497                 case PEDIT_IP6_SRC_95_64:
498                 case PEDIT_IP6_SRC_127_96:
499                 case PEDIT_IP6_DST_31_0:
500                 case PEDIT_IP6_DST_63_32:
501                 case PEDIT_IP6_DST_95_64:
502                 case PEDIT_IP6_DST_127_96:
503                         break;
504                 default:
505                         netdev_err(dev, "%s: Unsupported pedit field\n",
506                                    __func__);
507                         return false;
508                 }
509                 break;
510         case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
511                 switch (offset) {
512                 case PEDIT_TCP_SPORT_DPORT:
513                         if (!valid_l4_mask(~mask)) {
514                                 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
515                                            __func__);
516                                 return false;
517                         }
518                         break;
519                 default:
520                         netdev_err(dev, "%s: Unsupported pedit field\n",
521                                    __func__);
522                         return false;
523                 }
524                 break;
525         case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
526                 switch (offset) {
527                 case PEDIT_UDP_SPORT_DPORT:
528                         if (!valid_l4_mask(~mask)) {
529                                 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
530                                            __func__);
531                                 return false;
532                         }
533                         break;
534                 default:
535                         netdev_err(dev, "%s: Unsupported pedit field\n",
536                                    __func__);
537                         return false;
538                 }
539                 break;
540         default:
541                 netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
542                 return false;
543         }
544         return true;
545 }
546
547 static int cxgb4_validate_flow_actions(struct net_device *dev,
548                                        struct flow_cls_offload *cls)
549 {
550         struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
551         struct flow_action_entry *act;
552         bool act_redir = false;
553         bool act_pedit = false;
554         bool act_vlan = false;
555         int i;
556
557         flow_action_for_each(i, act, &rule->action) {
558                 switch (act->id) {
559                 case FLOW_ACTION_ACCEPT:
560                 case FLOW_ACTION_DROP:
561                         /* Do nothing */
562                         break;
563                 case FLOW_ACTION_REDIRECT: {
564                         struct adapter *adap = netdev2adap(dev);
565                         struct net_device *n_dev, *target_dev;
566                         unsigned int i;
567                         bool found = false;
568
569                         target_dev = act->dev;
570                         for_each_port(adap, i) {
571                                 n_dev = adap->port[i];
572                                 if (target_dev == n_dev) {
573                                         found = true;
574                                         break;
575                                 }
576                         }
577
578                         /* If interface doesn't belong to our hw, then
579                          * the provided output port is not valid
580                          */
581                         if (!found) {
582                                 netdev_err(dev, "%s: Out port invalid\n",
583                                            __func__);
584                                 return -EINVAL;
585                         }
586                         act_redir = true;
587                         }
588                         break;
589                 case FLOW_ACTION_VLAN_POP:
590                 case FLOW_ACTION_VLAN_PUSH:
591                 case FLOW_ACTION_VLAN_MANGLE: {
592                         u16 proto = be16_to_cpu(act->vlan.proto);
593
594                         switch (act->id) {
595                         case FLOW_ACTION_VLAN_POP:
596                                 break;
597                         case FLOW_ACTION_VLAN_PUSH:
598                         case FLOW_ACTION_VLAN_MANGLE:
599                                 if (proto != ETH_P_8021Q) {
600                                         netdev_err(dev, "%s: Unsupported vlan proto\n",
601                                                    __func__);
602                                         return -EOPNOTSUPP;
603                                 }
604                                 break;
605                         default:
606                                 netdev_err(dev, "%s: Unsupported vlan action\n",
607                                            __func__);
608                                 return -EOPNOTSUPP;
609                         }
610                         act_vlan = true;
611                         }
612                         break;
613                 case FLOW_ACTION_MANGLE: {
614                         bool pedit_valid = valid_pedit_action(dev, act);
615
616                         if (!pedit_valid)
617                                 return -EOPNOTSUPP;
618                         act_pedit = true;
619                         }
620                         break;
621                 default:
622                         netdev_err(dev, "%s: Unsupported action\n", __func__);
623                         return -EOPNOTSUPP;
624                 }
625         }
626
627         if ((act_pedit || act_vlan) && !act_redir) {
628                 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
629                            __func__);
630                 return -EINVAL;
631         }
632
633         return 0;
634 }
635
636 int cxgb4_tc_flower_replace(struct net_device *dev,
637                             struct flow_cls_offload *cls)
638 {
639         struct adapter *adap = netdev2adap(dev);
640         struct ch_tc_flower_entry *ch_flower;
641         struct ch_filter_specification *fs;
642         struct filter_ctx ctx;
643         int fidx;
644         int ret;
645
646         if (cxgb4_validate_flow_actions(dev, cls))
647                 return -EOPNOTSUPP;
648
649         if (cxgb4_validate_flow_match(dev, cls))
650                 return -EOPNOTSUPP;
651
652         ch_flower = allocate_flower_entry();
653         if (!ch_flower) {
654                 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
655                 return -ENOMEM;
656         }
657
658         fs = &ch_flower->fs;
659         fs->hitcnts = 1;
660         cxgb4_process_flow_match(dev, cls, fs);
661         cxgb4_process_flow_actions(dev, cls, fs);
662
663         fs->hash = is_filter_exact_match(adap, fs);
664         if (fs->hash) {
665                 fidx = 0;
666         } else {
667                 fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
668                 if (fidx < 0) {
669                         netdev_err(dev, "%s: No fidx for offload.\n", __func__);
670                         ret = -ENOMEM;
671                         goto free_entry;
672                 }
673         }
674
675         init_completion(&ctx.completion);
676         ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
677         if (ret) {
678                 netdev_err(dev, "%s: filter creation err %d\n",
679                            __func__, ret);
680                 goto free_entry;
681         }
682
683         /* Wait for reply */
684         ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
685         if (!ret) {
686                 ret = -ETIMEDOUT;
687                 goto free_entry;
688         }
689
690         ret = ctx.result;
691         /* Check if hw returned error for filter creation */
692         if (ret)
693                 goto free_entry;
694
695         ch_flower->tc_flower_cookie = cls->cookie;
696         ch_flower->filter_id = ctx.tid;
697         ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
698                                      adap->flower_ht_params);
699         if (ret)
700                 goto del_filter;
701
702         return 0;
703
704 del_filter:
705         cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
706
707 free_entry:
708         kfree(ch_flower);
709         return ret;
710 }
711
712 int cxgb4_tc_flower_destroy(struct net_device *dev,
713                             struct flow_cls_offload *cls)
714 {
715         struct adapter *adap = netdev2adap(dev);
716         struct ch_tc_flower_entry *ch_flower;
717         int ret;
718
719         ch_flower = ch_flower_lookup(adap, cls->cookie);
720         if (!ch_flower)
721                 return -ENOENT;
722
723         ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
724         if (ret)
725                 goto err;
726
727         ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
728                                      adap->flower_ht_params);
729         if (ret) {
730                 netdev_err(dev, "Flow remove from rhashtable failed");
731                 goto err;
732         }
733         kfree_rcu(ch_flower, rcu);
734
735 err:
736         return ret;
737 }
738
739 static void ch_flower_stats_handler(struct work_struct *work)
740 {
741         struct adapter *adap = container_of(work, struct adapter,
742                                             flower_stats_work);
743         struct ch_tc_flower_entry *flower_entry;
744         struct ch_tc_flower_stats *ofld_stats;
745         struct rhashtable_iter iter;
746         u64 packets;
747         u64 bytes;
748         int ret;
749
750         rhashtable_walk_enter(&adap->flower_tbl, &iter);
751         do {
752                 rhashtable_walk_start(&iter);
753
754                 while ((flower_entry = rhashtable_walk_next(&iter)) &&
755                        !IS_ERR(flower_entry)) {
756                         ret = cxgb4_get_filter_counters(adap->port[0],
757                                                         flower_entry->filter_id,
758                                                         &packets, &bytes,
759                                                         flower_entry->fs.hash);
760                         if (!ret) {
761                                 spin_lock(&flower_entry->lock);
762                                 ofld_stats = &flower_entry->stats;
763
764                                 if (ofld_stats->prev_packet_count != packets) {
765                                         ofld_stats->prev_packet_count = packets;
766                                         ofld_stats->last_used = jiffies;
767                                 }
768                                 spin_unlock(&flower_entry->lock);
769                         }
770                 }
771
772                 rhashtable_walk_stop(&iter);
773
774         } while (flower_entry == ERR_PTR(-EAGAIN));
775         rhashtable_walk_exit(&iter);
776         mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
777 }
778
779 static void ch_flower_stats_cb(struct timer_list *t)
780 {
781         struct adapter *adap = from_timer(adap, t, flower_stats_timer);
782
783         schedule_work(&adap->flower_stats_work);
784 }
785
786 int cxgb4_tc_flower_stats(struct net_device *dev,
787                           struct flow_cls_offload *cls)
788 {
789         struct adapter *adap = netdev2adap(dev);
790         struct ch_tc_flower_stats *ofld_stats;
791         struct ch_tc_flower_entry *ch_flower;
792         u64 packets;
793         u64 bytes;
794         int ret;
795
796         ch_flower = ch_flower_lookup(adap, cls->cookie);
797         if (!ch_flower) {
798                 ret = -ENOENT;
799                 goto err;
800         }
801
802         ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
803                                         &packets, &bytes,
804                                         ch_flower->fs.hash);
805         if (ret < 0)
806                 goto err;
807
808         spin_lock_bh(&ch_flower->lock);
809         ofld_stats = &ch_flower->stats;
810         if (ofld_stats->packet_count != packets) {
811                 if (ofld_stats->prev_packet_count != packets)
812                         ofld_stats->last_used = jiffies;
813                 flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
814                                   packets - ofld_stats->packet_count,
815                                   ofld_stats->last_used);
816
817                 ofld_stats->packet_count = packets;
818                 ofld_stats->byte_count = bytes;
819                 ofld_stats->prev_packet_count = packets;
820         }
821         spin_unlock_bh(&ch_flower->lock);
822         return 0;
823
824 err:
825         return ret;
826 }
827
828 static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
829         .nelem_hint = 384,
830         .head_offset = offsetof(struct ch_tc_flower_entry, node),
831         .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
832         .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
833         .max_size = 524288,
834         .min_size = 512,
835         .automatic_shrinking = true
836 };
837
838 int cxgb4_init_tc_flower(struct adapter *adap)
839 {
840         int ret;
841
842         if (adap->tc_flower_initialized)
843                 return -EEXIST;
844
845         adap->flower_ht_params = cxgb4_tc_flower_ht_params;
846         ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
847         if (ret)
848                 return ret;
849
850         INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
851         timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
852         mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
853         adap->tc_flower_initialized = true;
854         return 0;
855 }
856
857 void cxgb4_cleanup_tc_flower(struct adapter *adap)
858 {
859         if (!adap->tc_flower_initialized)
860                 return;
861
862         if (adap->flower_stats_timer.function)
863                 del_timer_sync(&adap->flower_stats_timer);
864         cancel_work_sync(&adap->flower_stats_work);
865         rhashtable_destroy(&adap->flower_tbl);
866         adap->tc_flower_initialized = false;
867 }