afs: use correct afs_call_type in yfs_fs_store_opaque_acl2
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_tc.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2017 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9
10 #include <linux/netdevice.h>
11 #include <linux/inetdevice.h>
12 #include <linux/if_vlan.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_skbedit.h>
17 #include <net/tc_act/tc_mirred.h>
18 #include <net/tc_act/tc_vlan.h>
19 #include <net/tc_act/tc_tunnel_key.h>
20
21 #include "bnxt_hsi.h"
22 #include "bnxt.h"
23 #include "bnxt_sriov.h"
24 #include "bnxt_tc.h"
25 #include "bnxt_vfr.h"
26
27 #define BNXT_FID_INVALID                        0xffff
28 #define VLAN_TCI(vid, prio)     ((vid) | ((prio) << VLAN_PRIO_SHIFT))
29
30 #define is_vlan_pcp_wildcarded(vlan_tci_mask)   \
31         ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
32 #define is_vlan_pcp_exactmatch(vlan_tci_mask)   \
33         ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
34 #define is_vlan_pcp_zero(vlan_tci)      \
35         ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
36 #define is_vid_exactmatch(vlan_tci_mask)        \
37         ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
38
39 /* Return the dst fid of the func for flow forwarding
40  * For PFs: src_fid is the fid of the PF
41  * For VF-reps: src_fid the fid of the VF
42  */
43 static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
44 {
45         struct bnxt *bp;
46
47         /* check if dev belongs to the same switch */
48         if (!netdev_port_same_parent_id(pf_bp->dev, dev)) {
49                 netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
50                             dev->ifindex);
51                 return BNXT_FID_INVALID;
52         }
53
54         /* Is dev a VF-rep? */
55         if (bnxt_dev_is_vf_rep(dev))
56                 return bnxt_vf_rep_get_fid(dev);
57
58         bp = netdev_priv(dev);
59         return bp->pf.fw_fid;
60 }
61
62 static int bnxt_tc_parse_redir(struct bnxt *bp,
63                                struct bnxt_tc_actions *actions,
64                                const struct flow_action_entry *act)
65 {
66         struct net_device *dev = act->dev;
67
68         if (!dev) {
69                 netdev_info(bp->dev, "no dev in mirred action");
70                 return -EINVAL;
71         }
72
73         actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
74         actions->dst_dev = dev;
75         return 0;
76 }
77
78 static int bnxt_tc_parse_vlan(struct bnxt *bp,
79                               struct bnxt_tc_actions *actions,
80                               const struct flow_action_entry *act)
81 {
82         switch (act->id) {
83         case FLOW_ACTION_VLAN_POP:
84                 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
85                 break;
86         case FLOW_ACTION_VLAN_PUSH:
87                 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
88                 actions->push_vlan_tci = htons(act->vlan.vid);
89                 actions->push_vlan_tpid = act->vlan.proto;
90                 break;
91         default:
92                 return -EOPNOTSUPP;
93         }
94         return 0;
95 }
96
97 static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
98                                     struct bnxt_tc_actions *actions,
99                                     const struct flow_action_entry *act)
100 {
101         const struct ip_tunnel_info *tun_info = act->tunnel;
102         const struct ip_tunnel_key *tun_key = &tun_info->key;
103
104         if (ip_tunnel_info_af(tun_info) != AF_INET) {
105                 netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
106                 return -EOPNOTSUPP;
107         }
108
109         actions->tun_encap_key = *tun_key;
110         actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
111         return 0;
112 }
113
114 static int bnxt_tc_parse_actions(struct bnxt *bp,
115                                  struct bnxt_tc_actions *actions,
116                                  struct flow_action *flow_action)
117 {
118         struct flow_action_entry *act;
119         int i, rc;
120
121         if (!flow_action_has_entries(flow_action)) {
122                 netdev_info(bp->dev, "no actions");
123                 return -EINVAL;
124         }
125
126         flow_action_for_each(i, act, flow_action) {
127                 switch (act->id) {
128                 case FLOW_ACTION_DROP:
129                         actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
130                         return 0; /* don't bother with other actions */
131                 case FLOW_ACTION_REDIRECT:
132                         rc = bnxt_tc_parse_redir(bp, actions, act);
133                         if (rc)
134                                 return rc;
135                         break;
136                 case FLOW_ACTION_VLAN_POP:
137                 case FLOW_ACTION_VLAN_PUSH:
138                 case FLOW_ACTION_VLAN_MANGLE:
139                         rc = bnxt_tc_parse_vlan(bp, actions, act);
140                         if (rc)
141                                 return rc;
142                         break;
143                 case FLOW_ACTION_TUNNEL_ENCAP:
144                         rc = bnxt_tc_parse_tunnel_set(bp, actions, act);
145                         if (rc)
146                                 return rc;
147                         break;
148                 case FLOW_ACTION_TUNNEL_DECAP:
149                         actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
150                         break;
151                 default:
152                         break;
153                 }
154         }
155
156         if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
157                 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
158                         /* dst_fid is PF's fid */
159                         actions->dst_fid = bp->pf.fw_fid;
160                 } else {
161                         /* find the FID from dst_dev */
162                         actions->dst_fid =
163                                 bnxt_flow_get_dst_fid(bp, actions->dst_dev);
164                         if (actions->dst_fid == BNXT_FID_INVALID)
165                                 return -EINVAL;
166                 }
167         }
168
169         return 0;
170 }
171
172 static int bnxt_tc_parse_flow(struct bnxt *bp,
173                               struct flow_cls_offload *tc_flow_cmd,
174                               struct bnxt_tc_flow *flow)
175 {
176         struct flow_rule *rule = flow_cls_offload_flow_rule(tc_flow_cmd);
177         struct flow_dissector *dissector = rule->match.dissector;
178
179         /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
180         if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
181             (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
182                 netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
183                             dissector->used_keys);
184                 return -EOPNOTSUPP;
185         }
186
187         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
188                 struct flow_match_basic match;
189
190                 flow_rule_match_basic(rule, &match);
191                 flow->l2_key.ether_type = match.key->n_proto;
192                 flow->l2_mask.ether_type = match.mask->n_proto;
193
194                 if (match.key->n_proto == htons(ETH_P_IP) ||
195                     match.key->n_proto == htons(ETH_P_IPV6)) {
196                         flow->l4_key.ip_proto = match.key->ip_proto;
197                         flow->l4_mask.ip_proto = match.mask->ip_proto;
198                 }
199         }
200
201         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
202                 struct flow_match_eth_addrs match;
203
204                 flow_rule_match_eth_addrs(rule, &match);
205                 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
206                 ether_addr_copy(flow->l2_key.dmac, match.key->dst);
207                 ether_addr_copy(flow->l2_mask.dmac, match.mask->dst);
208                 ether_addr_copy(flow->l2_key.smac, match.key->src);
209                 ether_addr_copy(flow->l2_mask.smac, match.mask->src);
210         }
211
212         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
213                 struct flow_match_vlan match;
214
215                 flow_rule_match_vlan(rule, &match);
216                 flow->l2_key.inner_vlan_tci =
217                         cpu_to_be16(VLAN_TCI(match.key->vlan_id,
218                                              match.key->vlan_priority));
219                 flow->l2_mask.inner_vlan_tci =
220                         cpu_to_be16((VLAN_TCI(match.mask->vlan_id,
221                                               match.mask->vlan_priority)));
222                 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
223                 flow->l2_mask.inner_vlan_tpid = htons(0xffff);
224                 flow->l2_key.num_vlans = 1;
225         }
226
227         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
228                 struct flow_match_ipv4_addrs match;
229
230                 flow_rule_match_ipv4_addrs(rule, &match);
231                 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
232                 flow->l3_key.ipv4.daddr.s_addr = match.key->dst;
233                 flow->l3_mask.ipv4.daddr.s_addr = match.mask->dst;
234                 flow->l3_key.ipv4.saddr.s_addr = match.key->src;
235                 flow->l3_mask.ipv4.saddr.s_addr = match.mask->src;
236         } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
237                 struct flow_match_ipv6_addrs match;
238
239                 flow_rule_match_ipv6_addrs(rule, &match);
240                 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
241                 flow->l3_key.ipv6.daddr = match.key->dst;
242                 flow->l3_mask.ipv6.daddr = match.mask->dst;
243                 flow->l3_key.ipv6.saddr = match.key->src;
244                 flow->l3_mask.ipv6.saddr = match.mask->src;
245         }
246
247         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
248                 struct flow_match_ports match;
249
250                 flow_rule_match_ports(rule, &match);
251                 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
252                 flow->l4_key.ports.dport = match.key->dst;
253                 flow->l4_mask.ports.dport = match.mask->dst;
254                 flow->l4_key.ports.sport = match.key->src;
255                 flow->l4_mask.ports.sport = match.mask->src;
256         }
257
258         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
259                 struct flow_match_icmp match;
260
261                 flow_rule_match_icmp(rule, &match);
262                 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
263                 flow->l4_key.icmp.type = match.key->type;
264                 flow->l4_key.icmp.code = match.key->code;
265                 flow->l4_mask.icmp.type = match.mask->type;
266                 flow->l4_mask.icmp.code = match.mask->code;
267         }
268
269         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
270                 struct flow_match_ipv4_addrs match;
271
272                 flow_rule_match_enc_ipv4_addrs(rule, &match);
273                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
274                 flow->tun_key.u.ipv4.dst = match.key->dst;
275                 flow->tun_mask.u.ipv4.dst = match.mask->dst;
276                 flow->tun_key.u.ipv4.src = match.key->src;
277                 flow->tun_mask.u.ipv4.src = match.mask->src;
278         } else if (flow_rule_match_key(rule,
279                                       FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
280                 return -EOPNOTSUPP;
281         }
282
283         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
284                 struct flow_match_enc_keyid match;
285
286                 flow_rule_match_enc_keyid(rule, &match);
287                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
288                 flow->tun_key.tun_id = key32_to_tunnel_id(match.key->keyid);
289                 flow->tun_mask.tun_id = key32_to_tunnel_id(match.mask->keyid);
290         }
291
292         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
293                 struct flow_match_ports match;
294
295                 flow_rule_match_enc_ports(rule, &match);
296                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
297                 flow->tun_key.tp_dst = match.key->dst;
298                 flow->tun_mask.tp_dst = match.mask->dst;
299                 flow->tun_key.tp_src = match.key->src;
300                 flow->tun_mask.tp_src = match.mask->src;
301         }
302
303         return bnxt_tc_parse_actions(bp, &flow->actions, &rule->action);
304 }
305
306 static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
307                                    struct bnxt_tc_flow_node *flow_node)
308 {
309         struct hwrm_cfa_flow_free_input req = { 0 };
310         int rc;
311
312         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
313         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
314                 req.ext_flow_handle = flow_node->ext_flow_handle;
315         else
316                 req.flow_handle = flow_node->flow_handle;
317
318         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
319         if (rc)
320                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
321
322         if (rc)
323                 rc = -EIO;
324         return rc;
325 }
326
327 static int ipv6_mask_len(struct in6_addr *mask)
328 {
329         int mask_len = 0, i;
330
331         for (i = 0; i < 4; i++)
332                 mask_len += inet_mask_len(mask->s6_addr32[i]);
333
334         return mask_len;
335 }
336
337 static bool is_wildcard(void *mask, int len)
338 {
339         const u8 *p = mask;
340         int i;
341
342         for (i = 0; i < len; i++) {
343                 if (p[i] != 0)
344                         return false;
345         }
346         return true;
347 }
348
349 static bool is_exactmatch(void *mask, int len)
350 {
351         const u8 *p = mask;
352         int i;
353
354         for (i = 0; i < len; i++)
355                 if (p[i] != 0xff)
356                         return false;
357
358         return true;
359 }
360
361 static bool is_vlan_tci_allowed(__be16  vlan_tci_mask,
362                                 __be16  vlan_tci)
363 {
364         /* VLAN priority must be either exactly zero or fully wildcarded and
365          * VLAN id must be exact match.
366          */
367         if (is_vid_exactmatch(vlan_tci_mask) &&
368             ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
369               is_vlan_pcp_zero(vlan_tci)) ||
370              is_vlan_pcp_wildcarded(vlan_tci_mask)))
371                 return true;
372
373         return false;
374 }
375
376 static bool bits_set(void *key, int len)
377 {
378         const u8 *p = key;
379         int i;
380
381         for (i = 0; i < len; i++)
382                 if (p[i] != 0)
383                         return true;
384
385         return false;
386 }
387
388 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
389                                     __le16 ref_flow_handle,
390                                     __le32 tunnel_handle,
391                                     struct bnxt_tc_flow_node *flow_node)
392 {
393         struct bnxt_tc_actions *actions = &flow->actions;
394         struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
395         struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
396         struct hwrm_cfa_flow_alloc_input req = { 0 };
397         struct hwrm_cfa_flow_alloc_output *resp;
398         u16 flow_flags = 0, action_flags = 0;
399         int rc;
400
401         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
402
403         req.src_fid = cpu_to_le16(flow->src_fid);
404         req.ref_flow_handle = ref_flow_handle;
405
406         if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
407             actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
408                 req.tunnel_handle = tunnel_handle;
409                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
410                 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
411         }
412
413         req.ethertype = flow->l2_key.ether_type;
414         req.ip_proto = flow->l4_key.ip_proto;
415
416         if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
417                 memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
418                 memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
419         }
420
421         if (flow->l2_key.num_vlans > 0) {
422                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
423                 /* FW expects the inner_vlan_tci value to be set
424                  * in outer_vlan_tci when num_vlans is 1 (which is
425                  * always the case in TC.)
426                  */
427                 req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
428         }
429
430         /* If all IP and L4 fields are wildcarded then this is an L2 flow */
431         if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
432             is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
433                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
434         } else {
435                 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
436                                 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
437                                 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
438
439                 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
440                         req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
441                         req.ip_dst_mask_len =
442                                 inet_mask_len(l3_mask->ipv4.daddr.s_addr);
443                         req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
444                         req.ip_src_mask_len =
445                                 inet_mask_len(l3_mask->ipv4.saddr.s_addr);
446                 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
447                         memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
448                                sizeof(req.ip_dst));
449                         req.ip_dst_mask_len =
450                                         ipv6_mask_len(&l3_mask->ipv6.daddr);
451                         memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
452                                sizeof(req.ip_src));
453                         req.ip_src_mask_len =
454                                         ipv6_mask_len(&l3_mask->ipv6.saddr);
455                 }
456         }
457
458         if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
459                 req.l4_src_port = flow->l4_key.ports.sport;
460                 req.l4_src_port_mask = flow->l4_mask.ports.sport;
461                 req.l4_dst_port = flow->l4_key.ports.dport;
462                 req.l4_dst_port_mask = flow->l4_mask.ports.dport;
463         } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
464                 /* l4 ports serve as type/code when ip_proto is ICMP */
465                 req.l4_src_port = htons(flow->l4_key.icmp.type);
466                 req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
467                 req.l4_dst_port = htons(flow->l4_key.icmp.code);
468                 req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
469         }
470         req.flags = cpu_to_le16(flow_flags);
471
472         if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
473                 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
474         } else {
475                 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
476                         action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
477                         req.dst_fid = cpu_to_le16(actions->dst_fid);
478                 }
479                 if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
480                         action_flags |=
481                             CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
482                         req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
483                         req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
484                         memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
485                         memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
486                 }
487                 if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
488                         action_flags |=
489                             CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
490                         /* Rewrite config with tpid = 0 implies vlan pop */
491                         req.l2_rewrite_vlan_tpid = 0;
492                         memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
493                         memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
494                 }
495         }
496         req.action_flags = cpu_to_le16(action_flags);
497
498         mutex_lock(&bp->hwrm_cmd_lock);
499         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
500         if (!rc) {
501                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
502                 /* CFA_FLOW_ALLOC response interpretation:
503                  *                  fw with          fw with
504                  *                  16-bit           64-bit
505                  *                  flow handle      flow handle
506                  *                  ===========      ===========
507                  * flow_handle      flow handle      flow context id
508                  * ext_flow_handle  INVALID          flow handle
509                  * flow_id          INVALID          flow counter id
510                  */
511                 flow_node->flow_handle = resp->flow_handle;
512                 if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
513                         flow_node->ext_flow_handle = resp->ext_flow_handle;
514                         flow_node->flow_id = resp->flow_id;
515                 }
516         }
517         mutex_unlock(&bp->hwrm_cmd_lock);
518
519         if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
520                 rc = -ENOSPC;
521         else if (rc)
522                 rc = -EIO;
523         return rc;
524 }
525
526 static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
527                                        struct bnxt_tc_flow *flow,
528                                        struct bnxt_tc_l2_key *l2_info,
529                                        __le32 ref_decap_handle,
530                                        __le32 *decap_filter_handle)
531 {
532         struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
533         struct hwrm_cfa_decap_filter_alloc_output *resp;
534         struct ip_tunnel_key *tun_key = &flow->tun_key;
535         u32 enables = 0;
536         int rc;
537
538         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
539
540         req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
541         enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
542                    CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
543         req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
544         req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
545
546         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
547                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
548                 /* tunnel_id is wrongly defined in hsi defn. as __le32 */
549                 req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
550         }
551
552         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
553                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
554                 ether_addr_copy(req.dst_macaddr, l2_info->dmac);
555         }
556         if (l2_info->num_vlans) {
557                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
558                 req.t_ivlan_vid = l2_info->inner_vlan_tci;
559         }
560
561         enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
562         req.ethertype = htons(ETH_P_IP);
563
564         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
565                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
566                            CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
567                            CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
568                 req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
569                 req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
570                 req.src_ipaddr[0] = tun_key->u.ipv4.src;
571         }
572
573         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
574                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
575                 req.dst_port = tun_key->tp_dst;
576         }
577
578         /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
579          * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
580          */
581         req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
582         req.enables = cpu_to_le32(enables);
583
584         mutex_lock(&bp->hwrm_cmd_lock);
585         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
586         if (!rc) {
587                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
588                 *decap_filter_handle = resp->decap_filter_id;
589         } else {
590                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
591         }
592         mutex_unlock(&bp->hwrm_cmd_lock);
593
594         if (rc)
595                 rc = -EIO;
596         return rc;
597 }
598
599 static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
600                                       __le32 decap_filter_handle)
601 {
602         struct hwrm_cfa_decap_filter_free_input req = { 0 };
603         int rc;
604
605         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
606         req.decap_filter_id = decap_filter_handle;
607
608         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
609         if (rc)
610                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
611
612         if (rc)
613                 rc = -EIO;
614         return rc;
615 }
616
617 static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
618                                        struct ip_tunnel_key *encap_key,
619                                        struct bnxt_tc_l2_key *l2_info,
620                                        __le32 *encap_record_handle)
621 {
622         struct hwrm_cfa_encap_record_alloc_input req = { 0 };
623         struct hwrm_cfa_encap_record_alloc_output *resp;
624         struct hwrm_cfa_encap_data_vxlan *encap =
625                         (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
626         struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
627                                 (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
628         int rc;
629
630         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
631
632         req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
633
634         ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
635         ether_addr_copy(encap->src_mac_addr, l2_info->smac);
636         if (l2_info->num_vlans) {
637                 encap->num_vlan_tags = l2_info->num_vlans;
638                 encap->ovlan_tci = l2_info->inner_vlan_tci;
639                 encap->ovlan_tpid = l2_info->inner_vlan_tpid;
640         }
641
642         encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
643         encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
644         encap_ipv4->ttl = encap_key->ttl;
645
646         encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
647         encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
648         encap_ipv4->protocol = IPPROTO_UDP;
649
650         encap->dst_port = encap_key->tp_dst;
651         encap->vni = tunnel_id_to_key32(encap_key->tun_id);
652
653         mutex_lock(&bp->hwrm_cmd_lock);
654         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
655         if (!rc) {
656                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
657                 *encap_record_handle = resp->encap_record_id;
658         } else {
659                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
660         }
661         mutex_unlock(&bp->hwrm_cmd_lock);
662
663         if (rc)
664                 rc = -EIO;
665         return rc;
666 }
667
668 static int hwrm_cfa_encap_record_free(struct bnxt *bp,
669                                       __le32 encap_record_handle)
670 {
671         struct hwrm_cfa_encap_record_free_input req = { 0 };
672         int rc;
673
674         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
675         req.encap_record_id = encap_record_handle;
676
677         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
678         if (rc)
679                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
680
681         if (rc)
682                 rc = -EIO;
683         return rc;
684 }
685
686 static int bnxt_tc_put_l2_node(struct bnxt *bp,
687                                struct bnxt_tc_flow_node *flow_node)
688 {
689         struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
690         struct bnxt_tc_info *tc_info = bp->tc_info;
691         int rc;
692
693         /* remove flow_node from the L2 shared flow list */
694         list_del(&flow_node->l2_list_node);
695         if (--l2_node->refcount == 0) {
696                 rc =  rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
697                                              tc_info->l2_ht_params);
698                 if (rc)
699                         netdev_err(bp->dev,
700                                    "Error: %s: rhashtable_remove_fast: %d",
701                                    __func__, rc);
702                 kfree_rcu(l2_node, rcu);
703         }
704         return 0;
705 }
706
707 static struct bnxt_tc_l2_node *
708 bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
709                     struct rhashtable_params ht_params,
710                     struct bnxt_tc_l2_key *l2_key)
711 {
712         struct bnxt_tc_l2_node *l2_node;
713         int rc;
714
715         l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
716         if (!l2_node) {
717                 l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
718                 if (!l2_node) {
719                         rc = -ENOMEM;
720                         return NULL;
721                 }
722
723                 l2_node->key = *l2_key;
724                 rc = rhashtable_insert_fast(l2_table, &l2_node->node,
725                                             ht_params);
726                 if (rc) {
727                         kfree_rcu(l2_node, rcu);
728                         netdev_err(bp->dev,
729                                    "Error: %s: rhashtable_insert_fast: %d",
730                                    __func__, rc);
731                         return NULL;
732                 }
733                 INIT_LIST_HEAD(&l2_node->common_l2_flows);
734         }
735         return l2_node;
736 }
737
738 /* Get the ref_flow_handle for a flow by checking if there are any other
739  * flows that share the same L2 key as this flow.
740  */
741 static int
742 bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
743                             struct bnxt_tc_flow_node *flow_node,
744                             __le16 *ref_flow_handle)
745 {
746         struct bnxt_tc_info *tc_info = bp->tc_info;
747         struct bnxt_tc_flow_node *ref_flow_node;
748         struct bnxt_tc_l2_node *l2_node;
749
750         l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
751                                       tc_info->l2_ht_params,
752                                       &flow->l2_key);
753         if (!l2_node)
754                 return -1;
755
756         /* If any other flow is using this l2_node, use it's flow_handle
757          * as the ref_flow_handle
758          */
759         if (l2_node->refcount > 0) {
760                 ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
761                                                  struct bnxt_tc_flow_node,
762                                                  l2_list_node);
763                 *ref_flow_handle = ref_flow_node->flow_handle;
764         } else {
765                 *ref_flow_handle = cpu_to_le16(0xffff);
766         }
767
768         /* Insert the l2_node into the flow_node so that subsequent flows
769          * with a matching l2 key can use the flow_handle of this flow
770          * as their ref_flow_handle
771          */
772         flow_node->l2_node = l2_node;
773         list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
774         l2_node->refcount++;
775         return 0;
776 }
777
778 /* After the flow parsing is done, this routine is used for checking
779  * if there are any aspects of the flow that prevent it from being
780  * offloaded.
781  */
782 static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
783 {
784         /* If L4 ports are specified then ip_proto must be TCP or UDP */
785         if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
786             (flow->l4_key.ip_proto != IPPROTO_TCP &&
787              flow->l4_key.ip_proto != IPPROTO_UDP)) {
788                 netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
789                             flow->l4_key.ip_proto);
790                 return false;
791         }
792
793         /* Currently source/dest MAC cannot be partial wildcard  */
794         if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
795             !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
796                 netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
797                 return false;
798         }
799         if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
800             !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
801                 netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
802                 return false;
803         }
804
805         /* Currently VLAN fields cannot be partial wildcard */
806         if (bits_set(&flow->l2_key.inner_vlan_tci,
807                      sizeof(flow->l2_key.inner_vlan_tci)) &&
808             !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
809                                  flow->l2_key.inner_vlan_tci)) {
810                 netdev_info(bp->dev, "Unsupported VLAN TCI\n");
811                 return false;
812         }
813         if (bits_set(&flow->l2_key.inner_vlan_tpid,
814                      sizeof(flow->l2_key.inner_vlan_tpid)) &&
815             !is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
816                            sizeof(flow->l2_mask.inner_vlan_tpid))) {
817                 netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
818                 return false;
819         }
820
821         /* Currently Ethertype must be set */
822         if (!is_exactmatch(&flow->l2_mask.ether_type,
823                            sizeof(flow->l2_mask.ether_type))) {
824                 netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
825                 return false;
826         }
827
828         return true;
829 }
830
831 /* Returns the final refcount of the node on success
832  * or a -ve error code on failure
833  */
834 static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
835                                    struct rhashtable *tunnel_table,
836                                    struct rhashtable_params *ht_params,
837                                    struct bnxt_tc_tunnel_node *tunnel_node)
838 {
839         int rc;
840
841         if (--tunnel_node->refcount == 0) {
842                 rc =  rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
843                                              *ht_params);
844                 if (rc) {
845                         netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
846                         rc = -1;
847                 }
848                 kfree_rcu(tunnel_node, rcu);
849                 return rc;
850         } else {
851                 return tunnel_node->refcount;
852         }
853 }
854
855 /* Get (or add) either encap or decap tunnel node from/to the supplied
856  * hash table.
857  */
858 static struct bnxt_tc_tunnel_node *
859 bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
860                         struct rhashtable_params *ht_params,
861                         struct ip_tunnel_key *tun_key)
862 {
863         struct bnxt_tc_tunnel_node *tunnel_node;
864         int rc;
865
866         tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
867         if (!tunnel_node) {
868                 tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
869                 if (!tunnel_node) {
870                         rc = -ENOMEM;
871                         goto err;
872                 }
873
874                 tunnel_node->key = *tun_key;
875                 tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
876                 rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
877                                             *ht_params);
878                 if (rc) {
879                         kfree_rcu(tunnel_node, rcu);
880                         goto err;
881                 }
882         }
883         tunnel_node->refcount++;
884         return tunnel_node;
885 err:
886         netdev_info(bp->dev, "error rc=%d", rc);
887         return NULL;
888 }
889
890 static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
891                                         struct bnxt_tc_flow *flow,
892                                         struct bnxt_tc_l2_key *l2_key,
893                                         struct bnxt_tc_flow_node *flow_node,
894                                         __le32 *ref_decap_handle)
895 {
896         struct bnxt_tc_info *tc_info = bp->tc_info;
897         struct bnxt_tc_flow_node *ref_flow_node;
898         struct bnxt_tc_l2_node *decap_l2_node;
899
900         decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
901                                             tc_info->decap_l2_ht_params,
902                                             l2_key);
903         if (!decap_l2_node)
904                 return -1;
905
906         /* If any other flow is using this decap_l2_node, use it's decap_handle
907          * as the ref_decap_handle
908          */
909         if (decap_l2_node->refcount > 0) {
910                 ref_flow_node =
911                         list_first_entry(&decap_l2_node->common_l2_flows,
912                                          struct bnxt_tc_flow_node,
913                                          decap_l2_list_node);
914                 *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
915         } else {
916                 *ref_decap_handle = INVALID_TUNNEL_HANDLE;
917         }
918
919         /* Insert the l2_node into the flow_node so that subsequent flows
920          * with a matching decap l2 key can use the decap_filter_handle of
921          * this flow as their ref_decap_handle
922          */
923         flow_node->decap_l2_node = decap_l2_node;
924         list_add(&flow_node->decap_l2_list_node,
925                  &decap_l2_node->common_l2_flows);
926         decap_l2_node->refcount++;
927         return 0;
928 }
929
930 static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
931                                       struct bnxt_tc_flow_node *flow_node)
932 {
933         struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
934         struct bnxt_tc_info *tc_info = bp->tc_info;
935         int rc;
936
937         /* remove flow_node from the decap L2 sharing flow list */
938         list_del(&flow_node->decap_l2_list_node);
939         if (--decap_l2_node->refcount == 0) {
940                 rc =  rhashtable_remove_fast(&tc_info->decap_l2_table,
941                                              &decap_l2_node->node,
942                                              tc_info->decap_l2_ht_params);
943                 if (rc)
944                         netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
945                 kfree_rcu(decap_l2_node, rcu);
946         }
947 }
948
949 static void bnxt_tc_put_decap_handle(struct bnxt *bp,
950                                      struct bnxt_tc_flow_node *flow_node)
951 {
952         __le32 decap_handle = flow_node->decap_node->tunnel_handle;
953         struct bnxt_tc_info *tc_info = bp->tc_info;
954         int rc;
955
956         if (flow_node->decap_l2_node)
957                 bnxt_tc_put_decap_l2_node(bp, flow_node);
958
959         rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
960                                      &tc_info->decap_ht_params,
961                                      flow_node->decap_node);
962         if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
963                 hwrm_cfa_decap_filter_free(bp, decap_handle);
964 }
965
966 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
967                                        struct ip_tunnel_key *tun_key,
968                                        struct bnxt_tc_l2_key *l2_info)
969 {
970 #ifdef CONFIG_INET
971         struct net_device *real_dst_dev = bp->dev;
972         struct flowi4 flow = { {0} };
973         struct net_device *dst_dev;
974         struct neighbour *nbr;
975         struct rtable *rt;
976         int rc;
977
978         flow.flowi4_proto = IPPROTO_UDP;
979         flow.fl4_dport = tun_key->tp_dst;
980         flow.daddr = tun_key->u.ipv4.dst;
981
982         rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
983         if (IS_ERR(rt)) {
984                 netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
985                 return -EOPNOTSUPP;
986         }
987
988         /* The route must either point to the real_dst_dev or a dst_dev that
989          * uses the real_dst_dev.
990          */
991         dst_dev = rt->dst.dev;
992         if (is_vlan_dev(dst_dev)) {
993 #if IS_ENABLED(CONFIG_VLAN_8021Q)
994                 struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
995
996                 if (vlan->real_dev != real_dst_dev) {
997                         netdev_info(bp->dev,
998                                     "dst_dev(%s) doesn't use PF-if(%s)",
999                                     netdev_name(dst_dev),
1000                                     netdev_name(real_dst_dev));
1001                         rc = -EOPNOTSUPP;
1002                         goto put_rt;
1003                 }
1004                 l2_info->inner_vlan_tci = htons(vlan->vlan_id);
1005                 l2_info->inner_vlan_tpid = vlan->vlan_proto;
1006                 l2_info->num_vlans = 1;
1007 #endif
1008         } else if (dst_dev != real_dst_dev) {
1009                 netdev_info(bp->dev,
1010                             "dst_dev(%s) for %pI4b is not PF-if(%s)",
1011                             netdev_name(dst_dev), &flow.daddr,
1012                             netdev_name(real_dst_dev));
1013                 rc = -EOPNOTSUPP;
1014                 goto put_rt;
1015         }
1016
1017         nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
1018         if (!nbr) {
1019                 netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
1020                             &flow.daddr);
1021                 rc = -EOPNOTSUPP;
1022                 goto put_rt;
1023         }
1024
1025         tun_key->u.ipv4.src = flow.saddr;
1026         tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
1027         neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
1028         ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
1029         neigh_release(nbr);
1030         ip_rt_put(rt);
1031
1032         return 0;
1033 put_rt:
1034         ip_rt_put(rt);
1035         return rc;
1036 #else
1037         return -EOPNOTSUPP;
1038 #endif
1039 }
1040
1041 static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1042                                     struct bnxt_tc_flow_node *flow_node,
1043                                     __le32 *decap_filter_handle)
1044 {
1045         struct ip_tunnel_key *decap_key = &flow->tun_key;
1046         struct bnxt_tc_info *tc_info = bp->tc_info;
1047         struct bnxt_tc_l2_key l2_info = { {0} };
1048         struct bnxt_tc_tunnel_node *decap_node;
1049         struct ip_tunnel_key tun_key = { 0 };
1050         struct bnxt_tc_l2_key *decap_l2_info;
1051         __le32 ref_decap_handle;
1052         int rc;
1053
1054         /* Check if there's another flow using the same tunnel decap.
1055          * If not, add this tunnel to the table and resolve the other
1056          * tunnel header fileds. Ignore src_port in the tunnel_key,
1057          * since it is not required for decap filters.
1058          */
1059         decap_key->tp_src = 0;
1060         decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
1061                                              &tc_info->decap_ht_params,
1062                                              decap_key);
1063         if (!decap_node)
1064                 return -ENOMEM;
1065
1066         flow_node->decap_node = decap_node;
1067
1068         if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1069                 goto done;
1070
1071         /* Resolve the L2 fields for tunnel decap
1072          * Resolve the route for remote vtep (saddr) of the decap key
1073          * Find it's next-hop mac addrs
1074          */
1075         tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1076         tun_key.tp_dst = flow->tun_key.tp_dst;
1077         rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
1078         if (rc)
1079                 goto put_decap;
1080
1081         decap_l2_info = &decap_node->l2_info;
1082         /* decap smac is wildcarded */
1083         ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
1084         if (l2_info.num_vlans) {
1085                 decap_l2_info->num_vlans = l2_info.num_vlans;
1086                 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
1087                 decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
1088         }
1089         flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
1090
1091         /* For getting a decap_filter_handle we first need to check if
1092          * there are any other decap flows that share the same tunnel L2
1093          * key and if so, pass that flow's decap_filter_handle as the
1094          * ref_decap_handle for this flow.
1095          */
1096         rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
1097                                           &ref_decap_handle);
1098         if (rc)
1099                 goto put_decap;
1100
1101         /* Issue the hwrm cmd to allocate a decap filter handle */
1102         rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
1103                                          ref_decap_handle,
1104                                          &decap_node->tunnel_handle);
1105         if (rc)
1106                 goto put_decap_l2;
1107
1108 done:
1109         *decap_filter_handle = decap_node->tunnel_handle;
1110         return 0;
1111
1112 put_decap_l2:
1113         bnxt_tc_put_decap_l2_node(bp, flow_node);
1114 put_decap:
1115         bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1116                                 &tc_info->decap_ht_params,
1117                                 flow_node->decap_node);
1118         return rc;
1119 }
1120
1121 static void bnxt_tc_put_encap_handle(struct bnxt *bp,
1122                                      struct bnxt_tc_tunnel_node *encap_node)
1123 {
1124         __le32 encap_handle = encap_node->tunnel_handle;
1125         struct bnxt_tc_info *tc_info = bp->tc_info;
1126         int rc;
1127
1128         rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1129                                      &tc_info->encap_ht_params, encap_node);
1130         if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
1131                 hwrm_cfa_encap_record_free(bp, encap_handle);
1132 }
1133
1134 /* Lookup the tunnel encap table and check if there's an encap_handle
1135  * alloc'd already.
1136  * If not, query L2 info via a route lookup and issue an encap_record_alloc
1137  * cmd to FW.
1138  */
1139 static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1140                                     struct bnxt_tc_flow_node *flow_node,
1141                                     __le32 *encap_handle)
1142 {
1143         struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
1144         struct bnxt_tc_info *tc_info = bp->tc_info;
1145         struct bnxt_tc_tunnel_node *encap_node;
1146         int rc;
1147
1148         /* Check if there's another flow using the same tunnel encap.
1149          * If not, add this tunnel to the table and resolve the other
1150          * tunnel header fileds
1151          */
1152         encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
1153                                              &tc_info->encap_ht_params,
1154                                              encap_key);
1155         if (!encap_node)
1156                 return -ENOMEM;
1157
1158         flow_node->encap_node = encap_node;
1159
1160         if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1161                 goto done;
1162
1163         rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
1164         if (rc)
1165                 goto put_encap;
1166
1167         /* Allocate a new tunnel encap record */
1168         rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
1169                                          &encap_node->tunnel_handle);
1170         if (rc)
1171                 goto put_encap;
1172
1173 done:
1174         *encap_handle = encap_node->tunnel_handle;
1175         return 0;
1176
1177 put_encap:
1178         bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1179                                 &tc_info->encap_ht_params, encap_node);
1180         return rc;
1181 }
1182
1183 static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
1184                                       struct bnxt_tc_flow *flow,
1185                                       struct bnxt_tc_flow_node *flow_node)
1186 {
1187         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1188                 bnxt_tc_put_decap_handle(bp, flow_node);
1189         else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1190                 bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
1191 }
1192
1193 static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
1194                                      struct bnxt_tc_flow *flow,
1195                                      struct bnxt_tc_flow_node *flow_node,
1196                                      __le32 *tunnel_handle)
1197 {
1198         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1199                 return bnxt_tc_get_decap_handle(bp, flow, flow_node,
1200                                                 tunnel_handle);
1201         else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1202                 return bnxt_tc_get_encap_handle(bp, flow, flow_node,
1203                                                 tunnel_handle);
1204         else
1205                 return 0;
1206 }
1207 static int __bnxt_tc_del_flow(struct bnxt *bp,
1208                               struct bnxt_tc_flow_node *flow_node)
1209 {
1210         struct bnxt_tc_info *tc_info = bp->tc_info;
1211         int rc;
1212
1213         /* send HWRM cmd to free the flow-id */
1214         bnxt_hwrm_cfa_flow_free(bp, flow_node);
1215
1216         mutex_lock(&tc_info->lock);
1217
1218         /* release references to any tunnel encap/decap nodes */
1219         bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
1220
1221         /* release reference to l2 node */
1222         bnxt_tc_put_l2_node(bp, flow_node);
1223
1224         mutex_unlock(&tc_info->lock);
1225
1226         rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
1227                                     tc_info->flow_ht_params);
1228         if (rc)
1229                 netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
1230                            __func__, rc);
1231
1232         kfree_rcu(flow_node, rcu);
1233         return 0;
1234 }
1235
1236 static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow,
1237                                  u16 src_fid)
1238 {
1239         flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX;
1240 }
1241
1242 static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1243                                 u16 src_fid)
1244 {
1245         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1246                 flow->src_fid = bp->pf.fw_fid;
1247         else
1248                 flow->src_fid = src_fid;
1249 }
1250
1251 /* Add a new flow or replace an existing flow.
1252  * Notes on locking:
1253  * There are essentially two critical sections here.
1254  * 1. while adding a new flow
1255  *    a) lookup l2-key
1256  *    b) issue HWRM cmd and get flow_handle
1257  *    c) link l2-key with flow
1258  * 2. while deleting a flow
1259  *    a) unlinking l2-key from flow
1260  * A lock is needed to protect these two critical sections.
1261  *
1262  * The hash-tables are already protected by the rhashtable API.
1263  */
1264 static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1265                             struct flow_cls_offload *tc_flow_cmd)
1266 {
1267         struct bnxt_tc_flow_node *new_node, *old_node;
1268         struct bnxt_tc_info *tc_info = bp->tc_info;
1269         struct bnxt_tc_flow *flow;
1270         __le32 tunnel_handle = 0;
1271         __le16 ref_flow_handle;
1272         int rc;
1273
1274         /* allocate memory for the new flow and it's node */
1275         new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1276         if (!new_node) {
1277                 rc = -ENOMEM;
1278                 goto done;
1279         }
1280         new_node->cookie = tc_flow_cmd->cookie;
1281         flow = &new_node->flow;
1282
1283         rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1284         if (rc)
1285                 goto free_node;
1286
1287         bnxt_tc_set_src_fid(bp, flow, src_fid);
1288
1289         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE)
1290                 bnxt_tc_set_flow_dir(bp, flow, src_fid);
1291
1292         if (!bnxt_tc_can_offload(bp, flow)) {
1293                 rc = -EOPNOTSUPP;
1294                 goto free_node;
1295         }
1296
1297         /* If a flow exists with the same cookie, delete it */
1298         old_node = rhashtable_lookup_fast(&tc_info->flow_table,
1299                                           &tc_flow_cmd->cookie,
1300                                           tc_info->flow_ht_params);
1301         if (old_node)
1302                 __bnxt_tc_del_flow(bp, old_node);
1303
1304         /* Check if the L2 part of the flow has been offloaded already.
1305          * If so, bump up it's refcnt and get it's reference handle.
1306          */
1307         mutex_lock(&tc_info->lock);
1308         rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
1309         if (rc)
1310                 goto unlock;
1311
1312         /* If the flow involves tunnel encap/decap, get tunnel_handle */
1313         rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
1314         if (rc)
1315                 goto put_l2;
1316
1317         /* send HWRM cmd to alloc the flow */
1318         rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
1319                                       tunnel_handle, new_node);
1320         if (rc)
1321                 goto put_tunnel;
1322
1323         flow->lastused = jiffies;
1324         spin_lock_init(&flow->stats_lock);
1325         /* add new flow to flow-table */
1326         rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
1327                                     tc_info->flow_ht_params);
1328         if (rc)
1329                 goto hwrm_flow_free;
1330
1331         mutex_unlock(&tc_info->lock);
1332         return 0;
1333
1334 hwrm_flow_free:
1335         bnxt_hwrm_cfa_flow_free(bp, new_node);
1336 put_tunnel:
1337         bnxt_tc_put_tunnel_handle(bp, flow, new_node);
1338 put_l2:
1339         bnxt_tc_put_l2_node(bp, new_node);
1340 unlock:
1341         mutex_unlock(&tc_info->lock);
1342 free_node:
1343         kfree_rcu(new_node, rcu);
1344 done:
1345         netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
1346                    __func__, tc_flow_cmd->cookie, rc);
1347         return rc;
1348 }
1349
1350 static int bnxt_tc_del_flow(struct bnxt *bp,
1351                             struct flow_cls_offload *tc_flow_cmd)
1352 {
1353         struct bnxt_tc_info *tc_info = bp->tc_info;
1354         struct bnxt_tc_flow_node *flow_node;
1355
1356         flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1357                                            &tc_flow_cmd->cookie,
1358                                            tc_info->flow_ht_params);
1359         if (!flow_node)
1360                 return -EINVAL;
1361
1362         return __bnxt_tc_del_flow(bp, flow_node);
1363 }
1364
1365 static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1366                                   struct flow_cls_offload *tc_flow_cmd)
1367 {
1368         struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
1369         struct bnxt_tc_info *tc_info = bp->tc_info;
1370         struct bnxt_tc_flow_node *flow_node;
1371         struct bnxt_tc_flow *flow;
1372         unsigned long lastused;
1373
1374         flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1375                                            &tc_flow_cmd->cookie,
1376                                            tc_info->flow_ht_params);
1377         if (!flow_node)
1378                 return -1;
1379
1380         flow = &flow_node->flow;
1381         curr_stats = &flow->stats;
1382         prev_stats = &flow->prev_stats;
1383
1384         spin_lock(&flow->stats_lock);
1385         stats.packets = curr_stats->packets - prev_stats->packets;
1386         stats.bytes = curr_stats->bytes - prev_stats->bytes;
1387         *prev_stats = *curr_stats;
1388         lastused = flow->lastused;
1389         spin_unlock(&flow->stats_lock);
1390
1391         flow_stats_update(&tc_flow_cmd->stats, stats.bytes, stats.packets,
1392                           lastused);
1393         return 0;
1394 }
1395
1396 static void bnxt_fill_cfa_stats_req(struct bnxt *bp,
1397                                     struct bnxt_tc_flow_node *flow_node,
1398                                     __le16 *flow_handle, __le32 *flow_id)
1399 {
1400         u16 handle;
1401
1402         if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) {
1403                 *flow_id = flow_node->flow_id;
1404
1405                 /* If flow_id is used to fetch flow stats then:
1406                  * 1. lower 12 bits of flow_handle must be set to all 1s.
1407                  * 2. 15th bit of flow_handle must specify the flow
1408                  *    direction (TX/RX).
1409                  */
1410                 if (flow_node->flow.dir == BNXT_DIR_RX)
1411                         handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX |
1412                                  CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1413                 else
1414                         handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK;
1415
1416                 *flow_handle = cpu_to_le16(handle);
1417         } else {
1418                 *flow_handle = flow_node->flow_handle;
1419         }
1420 }
1421
1422 static int
1423 bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1424                              struct bnxt_tc_stats_batch stats_batch[])
1425 {
1426         struct hwrm_cfa_flow_stats_input req = { 0 };
1427         struct hwrm_cfa_flow_stats_output *resp;
1428         __le16 *req_flow_handles = &req.flow_handle_0;
1429         __le32 *req_flow_ids = &req.flow_id_0;
1430         int rc, i;
1431
1432         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
1433         req.num_flows = cpu_to_le16(num_flows);
1434         for (i = 0; i < num_flows; i++) {
1435                 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1436
1437                 bnxt_fill_cfa_stats_req(bp, flow_node,
1438                                         &req_flow_handles[i], &req_flow_ids[i]);
1439         }
1440
1441         mutex_lock(&bp->hwrm_cmd_lock);
1442         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1443         if (!rc) {
1444                 __le64 *resp_packets;
1445                 __le64 *resp_bytes;
1446
1447                 resp = bnxt_get_hwrm_resp_addr(bp, &req);
1448                 resp_packets = &resp->packet_0;
1449                 resp_bytes = &resp->byte_0;
1450
1451                 for (i = 0; i < num_flows; i++) {
1452                         stats_batch[i].hw_stats.packets =
1453                                                 le64_to_cpu(resp_packets[i]);
1454                         stats_batch[i].hw_stats.bytes =
1455                                                 le64_to_cpu(resp_bytes[i]);
1456                 }
1457         } else {
1458                 netdev_info(bp->dev, "error rc=%d", rc);
1459         }
1460         mutex_unlock(&bp->hwrm_cmd_lock);
1461
1462         if (rc)
1463                 rc = -EIO;
1464         return rc;
1465 }
1466
1467 /* Add val to accum while handling a possible wraparound
1468  * of val. Eventhough val is of type u64, its actual width
1469  * is denoted by mask and will wrap-around beyond that width.
1470  */
1471 static void accumulate_val(u64 *accum, u64 val, u64 mask)
1472 {
1473 #define low_bits(x, mask)               ((x) & (mask))
1474 #define high_bits(x, mask)              ((x) & ~(mask))
1475         bool wrapped = val < low_bits(*accum, mask);
1476
1477         *accum = high_bits(*accum, mask) + val;
1478         if (wrapped)
1479                 *accum += (mask + 1);
1480 }
1481
1482 /* The HW counters' width is much less than 64bits.
1483  * Handle possible wrap-around while updating the stat counters
1484  */
1485 static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
1486                                   struct bnxt_tc_flow_stats *acc_stats,
1487                                   struct bnxt_tc_flow_stats *hw_stats)
1488 {
1489         accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
1490         accumulate_val(&acc_stats->packets, hw_stats->packets,
1491                        tc_info->packets_mask);
1492 }
1493
1494 static int
1495 bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
1496                                 struct bnxt_tc_stats_batch stats_batch[])
1497 {
1498         struct bnxt_tc_info *tc_info = bp->tc_info;
1499         int rc, i;
1500
1501         rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
1502         if (rc)
1503                 return rc;
1504
1505         for (i = 0; i < num_flows; i++) {
1506                 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1507                 struct bnxt_tc_flow *flow = &flow_node->flow;
1508
1509                 spin_lock(&flow->stats_lock);
1510                 bnxt_flow_stats_accum(tc_info, &flow->stats,
1511                                       &stats_batch[i].hw_stats);
1512                 if (flow->stats.packets != flow->prev_stats.packets)
1513                         flow->lastused = jiffies;
1514                 spin_unlock(&flow->stats_lock);
1515         }
1516
1517         return 0;
1518 }
1519
1520 static int
1521 bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
1522                               struct bnxt_tc_stats_batch stats_batch[],
1523                               int *num_flows)
1524 {
1525         struct bnxt_tc_info *tc_info = bp->tc_info;
1526         struct rhashtable_iter *iter = &tc_info->iter;
1527         void *flow_node;
1528         int rc, i;
1529
1530         rhashtable_walk_start(iter);
1531
1532         rc = 0;
1533         for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
1534                 flow_node = rhashtable_walk_next(iter);
1535                 if (IS_ERR(flow_node)) {
1536                         i = 0;
1537                         if (PTR_ERR(flow_node) == -EAGAIN) {
1538                                 continue;
1539                         } else {
1540                                 rc = PTR_ERR(flow_node);
1541                                 goto done;
1542                         }
1543                 }
1544
1545                 /* No more flows */
1546                 if (!flow_node)
1547                         goto done;
1548
1549                 stats_batch[i].flow_node = flow_node;
1550         }
1551 done:
1552         rhashtable_walk_stop(iter);
1553         *num_flows = i;
1554         return rc;
1555 }
1556
1557 void bnxt_tc_flow_stats_work(struct bnxt *bp)
1558 {
1559         struct bnxt_tc_info *tc_info = bp->tc_info;
1560         int num_flows, rc;
1561
1562         num_flows = atomic_read(&tc_info->flow_table.nelems);
1563         if (!num_flows)
1564                 return;
1565
1566         rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
1567
1568         for (;;) {
1569                 rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
1570                                                    &num_flows);
1571                 if (rc) {
1572                         if (rc == -EAGAIN)
1573                                 continue;
1574                         break;
1575                 }
1576
1577                 if (!num_flows)
1578                         break;
1579
1580                 bnxt_tc_flow_stats_batch_update(bp, num_flows,
1581                                                 tc_info->stats_batch);
1582         }
1583
1584         rhashtable_walk_exit(&tc_info->iter);
1585 }
1586
1587 int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1588                          struct flow_cls_offload *cls_flower)
1589 {
1590         switch (cls_flower->command) {
1591         case FLOW_CLS_REPLACE:
1592                 return bnxt_tc_add_flow(bp, src_fid, cls_flower);
1593         case FLOW_CLS_DESTROY:
1594                 return bnxt_tc_del_flow(bp, cls_flower);
1595         case FLOW_CLS_STATS:
1596                 return bnxt_tc_get_flow_stats(bp, cls_flower);
1597         default:
1598                 return -EOPNOTSUPP;
1599         }
1600 }
1601
1602 static const struct rhashtable_params bnxt_tc_flow_ht_params = {
1603         .head_offset = offsetof(struct bnxt_tc_flow_node, node),
1604         .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
1605         .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
1606         .automatic_shrinking = true
1607 };
1608
1609 static const struct rhashtable_params bnxt_tc_l2_ht_params = {
1610         .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1611         .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1612         .key_len = BNXT_TC_L2_KEY_LEN,
1613         .automatic_shrinking = true
1614 };
1615
1616 static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
1617         .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1618         .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1619         .key_len = BNXT_TC_L2_KEY_LEN,
1620         .automatic_shrinking = true
1621 };
1622
1623 static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
1624         .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
1625         .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
1626         .key_len = sizeof(struct ip_tunnel_key),
1627         .automatic_shrinking = true
1628 };
1629
1630 /* convert counter width in bits to a mask */
1631 #define mask(width)             ((u64)~0 >> (64 - (width)))
1632
1633 int bnxt_init_tc(struct bnxt *bp)
1634 {
1635         struct bnxt_tc_info *tc_info;
1636         int rc;
1637
1638         if (bp->hwrm_spec_code < 0x10803) {
1639                 netdev_warn(bp->dev,
1640                             "Firmware does not support TC flower offload.\n");
1641                 return -ENOTSUPP;
1642         }
1643
1644         tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
1645         if (!tc_info)
1646                 return -ENOMEM;
1647         mutex_init(&tc_info->lock);
1648
1649         /* Counter widths are programmed by FW */
1650         tc_info->bytes_mask = mask(36);
1651         tc_info->packets_mask = mask(28);
1652
1653         tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
1654         rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
1655         if (rc)
1656                 goto free_tc_info;
1657
1658         tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
1659         rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
1660         if (rc)
1661                 goto destroy_flow_table;
1662
1663         tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
1664         rc = rhashtable_init(&tc_info->decap_l2_table,
1665                              &tc_info->decap_l2_ht_params);
1666         if (rc)
1667                 goto destroy_l2_table;
1668
1669         tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
1670         rc = rhashtable_init(&tc_info->decap_table,
1671                              &tc_info->decap_ht_params);
1672         if (rc)
1673                 goto destroy_decap_l2_table;
1674
1675         tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
1676         rc = rhashtable_init(&tc_info->encap_table,
1677                              &tc_info->encap_ht_params);
1678         if (rc)
1679                 goto destroy_decap_table;
1680
1681         tc_info->enabled = true;
1682         bp->dev->hw_features |= NETIF_F_HW_TC;
1683         bp->dev->features |= NETIF_F_HW_TC;
1684         bp->tc_info = tc_info;
1685         return 0;
1686
1687 destroy_decap_table:
1688         rhashtable_destroy(&tc_info->decap_table);
1689 destroy_decap_l2_table:
1690         rhashtable_destroy(&tc_info->decap_l2_table);
1691 destroy_l2_table:
1692         rhashtable_destroy(&tc_info->l2_table);
1693 destroy_flow_table:
1694         rhashtable_destroy(&tc_info->flow_table);
1695 free_tc_info:
1696         kfree(tc_info);
1697         return rc;
1698 }
1699
1700 void bnxt_shutdown_tc(struct bnxt *bp)
1701 {
1702         struct bnxt_tc_info *tc_info = bp->tc_info;
1703
1704         if (!bnxt_tc_flower_enabled(bp))
1705                 return;
1706
1707         rhashtable_destroy(&tc_info->flow_table);
1708         rhashtable_destroy(&tc_info->l2_table);
1709         rhashtable_destroy(&tc_info->decap_l2_table);
1710         rhashtable_destroy(&tc_info->decap_table);
1711         rhashtable_destroy(&tc_info->encap_table);
1712         kfree(tc_info);
1713         bp->tc_info = NULL;
1714 }