Merge branch 'mlx5-next' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox...
[sfrench/cifs-2.6.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_tc.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2017 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9
10 #include <linux/netdevice.h>
11 #include <linux/inetdevice.h>
12 #include <linux/if_vlan.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_skbedit.h>
17 #include <net/tc_act/tc_mirred.h>
18 #include <net/tc_act/tc_vlan.h>
19 #include <net/tc_act/tc_tunnel_key.h>
20
21 #include "bnxt_hsi.h"
22 #include "bnxt.h"
23 #include "bnxt_sriov.h"
24 #include "bnxt_tc.h"
25 #include "bnxt_vfr.h"
26
27 #define BNXT_FID_INVALID                        0xffff
28 #define VLAN_TCI(vid, prio)     ((vid) | ((prio) << VLAN_PRIO_SHIFT))
29
30 #define is_vlan_pcp_wildcarded(vlan_tci_mask)   \
31         ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
32 #define is_vlan_pcp_exactmatch(vlan_tci_mask)   \
33         ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
34 #define is_vlan_pcp_zero(vlan_tci)      \
35         ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
36 #define is_vid_exactmatch(vlan_tci_mask)        \
37         ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
38
39 /* Return the dst fid of the func for flow forwarding
40  * For PFs: src_fid is the fid of the PF
41  * For VF-reps: src_fid the fid of the VF
42  */
43 static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
44 {
45         struct bnxt *bp;
46
47         /* check if dev belongs to the same switch */
48         if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
49                 netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
50                             dev->ifindex);
51                 return BNXT_FID_INVALID;
52         }
53
54         /* Is dev a VF-rep? */
55         if (bnxt_dev_is_vf_rep(dev))
56                 return bnxt_vf_rep_get_fid(dev);
57
58         bp = netdev_priv(dev);
59         return bp->pf.fw_fid;
60 }
61
62 static int bnxt_tc_parse_redir(struct bnxt *bp,
63                                struct bnxt_tc_actions *actions,
64                                const struct tc_action *tc_act)
65 {
66         struct net_device *dev = tcf_mirred_dev(tc_act);
67
68         if (!dev) {
69                 netdev_info(bp->dev, "no dev in mirred action");
70                 return -EINVAL;
71         }
72
73         actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
74         actions->dst_dev = dev;
75         return 0;
76 }
77
78 static int bnxt_tc_parse_vlan(struct bnxt *bp,
79                               struct bnxt_tc_actions *actions,
80                               const struct tc_action *tc_act)
81 {
82         switch (tcf_vlan_action(tc_act)) {
83         case TCA_VLAN_ACT_POP:
84                 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
85                 break;
86         case TCA_VLAN_ACT_PUSH:
87                 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
88                 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
89                 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
90                 break;
91         default:
92                 return -EOPNOTSUPP;
93         }
94         return 0;
95 }
96
97 static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
98                                     struct bnxt_tc_actions *actions,
99                                     const struct tc_action *tc_act)
100 {
101         struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
102         struct ip_tunnel_key *tun_key = &tun_info->key;
103
104         if (ip_tunnel_info_af(tun_info) != AF_INET) {
105                 netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
106                 return -EOPNOTSUPP;
107         }
108
109         actions->tun_encap_key = *tun_key;
110         actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
111         return 0;
112 }
113
114 static int bnxt_tc_parse_actions(struct bnxt *bp,
115                                  struct bnxt_tc_actions *actions,
116                                  struct tcf_exts *tc_exts)
117 {
118         const struct tc_action *tc_act;
119         int i, rc;
120
121         if (!tcf_exts_has_actions(tc_exts)) {
122                 netdev_info(bp->dev, "no actions");
123                 return -EINVAL;
124         }
125
126         tcf_exts_for_each_action(i, tc_act, tc_exts) {
127                 /* Drop action */
128                 if (is_tcf_gact_shot(tc_act)) {
129                         actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
130                         return 0; /* don't bother with other actions */
131                 }
132
133                 /* Redirect action */
134                 if (is_tcf_mirred_egress_redirect(tc_act)) {
135                         rc = bnxt_tc_parse_redir(bp, actions, tc_act);
136                         if (rc)
137                                 return rc;
138                         continue;
139                 }
140
141                 /* Push/pop VLAN */
142                 if (is_tcf_vlan(tc_act)) {
143                         rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
144                         if (rc)
145                                 return rc;
146                         continue;
147                 }
148
149                 /* Tunnel encap */
150                 if (is_tcf_tunnel_set(tc_act)) {
151                         rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
152                         if (rc)
153                                 return rc;
154                         continue;
155                 }
156
157                 /* Tunnel decap */
158                 if (is_tcf_tunnel_release(tc_act)) {
159                         actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
160                         continue;
161                 }
162         }
163
164         if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
165                 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
166                         /* dst_fid is PF's fid */
167                         actions->dst_fid = bp->pf.fw_fid;
168                 } else {
169                         /* find the FID from dst_dev */
170                         actions->dst_fid =
171                                 bnxt_flow_get_dst_fid(bp, actions->dst_dev);
172                         if (actions->dst_fid == BNXT_FID_INVALID)
173                                 return -EINVAL;
174                 }
175         }
176
177         return 0;
178 }
179
180 #define GET_KEY(flow_cmd, key_type)                                     \
181                 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
182                                           (flow_cmd)->key)
183 #define GET_MASK(flow_cmd, key_type)                                    \
184                 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
185                                           (flow_cmd)->mask)
186
187 static int bnxt_tc_parse_flow(struct bnxt *bp,
188                               struct tc_cls_flower_offload *tc_flow_cmd,
189                               struct bnxt_tc_flow *flow)
190 {
191         struct flow_dissector *dissector = tc_flow_cmd->dissector;
192
193         /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
194         if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
195             (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
196                 netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
197                             dissector->used_keys);
198                 return -EOPNOTSUPP;
199         }
200
201         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
202                 struct flow_dissector_key_basic *key =
203                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
204                 struct flow_dissector_key_basic *mask =
205                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
206
207                 flow->l2_key.ether_type = key->n_proto;
208                 flow->l2_mask.ether_type = mask->n_proto;
209
210                 if (key->n_proto == htons(ETH_P_IP) ||
211                     key->n_proto == htons(ETH_P_IPV6)) {
212                         flow->l4_key.ip_proto = key->ip_proto;
213                         flow->l4_mask.ip_proto = mask->ip_proto;
214                 }
215         }
216
217         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
218                 struct flow_dissector_key_eth_addrs *key =
219                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
220                 struct flow_dissector_key_eth_addrs *mask =
221                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
222
223                 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
224                 ether_addr_copy(flow->l2_key.dmac, key->dst);
225                 ether_addr_copy(flow->l2_mask.dmac, mask->dst);
226                 ether_addr_copy(flow->l2_key.smac, key->src);
227                 ether_addr_copy(flow->l2_mask.smac, mask->src);
228         }
229
230         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
231                 struct flow_dissector_key_vlan *key =
232                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
233                 struct flow_dissector_key_vlan *mask =
234                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
235
236                 flow->l2_key.inner_vlan_tci =
237                    cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
238                 flow->l2_mask.inner_vlan_tci =
239                    cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
240                 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
241                 flow->l2_mask.inner_vlan_tpid = htons(0xffff);
242                 flow->l2_key.num_vlans = 1;
243         }
244
245         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
246                 struct flow_dissector_key_ipv4_addrs *key =
247                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
248                 struct flow_dissector_key_ipv4_addrs *mask =
249                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
250
251                 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
252                 flow->l3_key.ipv4.daddr.s_addr = key->dst;
253                 flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
254                 flow->l3_key.ipv4.saddr.s_addr = key->src;
255                 flow->l3_mask.ipv4.saddr.s_addr = mask->src;
256         } else if (dissector_uses_key(dissector,
257                                       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
258                 struct flow_dissector_key_ipv6_addrs *key =
259                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
260                 struct flow_dissector_key_ipv6_addrs *mask =
261                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
262
263                 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
264                 flow->l3_key.ipv6.daddr = key->dst;
265                 flow->l3_mask.ipv6.daddr = mask->dst;
266                 flow->l3_key.ipv6.saddr = key->src;
267                 flow->l3_mask.ipv6.saddr = mask->src;
268         }
269
270         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
271                 struct flow_dissector_key_ports *key =
272                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
273                 struct flow_dissector_key_ports *mask =
274                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
275
276                 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
277                 flow->l4_key.ports.dport = key->dst;
278                 flow->l4_mask.ports.dport = mask->dst;
279                 flow->l4_key.ports.sport = key->src;
280                 flow->l4_mask.ports.sport = mask->src;
281         }
282
283         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
284                 struct flow_dissector_key_icmp *key =
285                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
286                 struct flow_dissector_key_icmp *mask =
287                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
288
289                 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
290                 flow->l4_key.icmp.type = key->type;
291                 flow->l4_key.icmp.code = key->code;
292                 flow->l4_mask.icmp.type = mask->type;
293                 flow->l4_mask.icmp.code = mask->code;
294         }
295
296         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
297                 struct flow_dissector_key_ipv4_addrs *key =
298                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
299                 struct flow_dissector_key_ipv4_addrs *mask =
300                                 GET_MASK(tc_flow_cmd,
301                                          FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
302
303                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
304                 flow->tun_key.u.ipv4.dst = key->dst;
305                 flow->tun_mask.u.ipv4.dst = mask->dst;
306                 flow->tun_key.u.ipv4.src = key->src;
307                 flow->tun_mask.u.ipv4.src = mask->src;
308         } else if (dissector_uses_key(dissector,
309                                       FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
310                 return -EOPNOTSUPP;
311         }
312
313         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
314                 struct flow_dissector_key_keyid *key =
315                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
316                 struct flow_dissector_key_keyid *mask =
317                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
318
319                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
320                 flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
321                 flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
322         }
323
324         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
325                 struct flow_dissector_key_ports *key =
326                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
327                 struct flow_dissector_key_ports *mask =
328                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
329
330                 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
331                 flow->tun_key.tp_dst = key->dst;
332                 flow->tun_mask.tp_dst = mask->dst;
333                 flow->tun_key.tp_src = key->src;
334                 flow->tun_mask.tp_src = mask->src;
335         }
336
337         return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
338 }
339
340 static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
341 {
342         struct hwrm_cfa_flow_free_input req = { 0 };
343         int rc;
344
345         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
346         req.flow_handle = flow_handle;
347
348         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
349         if (rc)
350                 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
351                             __func__, flow_handle, rc);
352
353         if (rc)
354                 rc = -EIO;
355         return rc;
356 }
357
358 static int ipv6_mask_len(struct in6_addr *mask)
359 {
360         int mask_len = 0, i;
361
362         for (i = 0; i < 4; i++)
363                 mask_len += inet_mask_len(mask->s6_addr32[i]);
364
365         return mask_len;
366 }
367
368 static bool is_wildcard(void *mask, int len)
369 {
370         const u8 *p = mask;
371         int i;
372
373         for (i = 0; i < len; i++) {
374                 if (p[i] != 0)
375                         return false;
376         }
377         return true;
378 }
379
380 static bool is_exactmatch(void *mask, int len)
381 {
382         const u8 *p = mask;
383         int i;
384
385         for (i = 0; i < len; i++)
386                 if (p[i] != 0xff)
387                         return false;
388
389         return true;
390 }
391
392 static bool is_vlan_tci_allowed(__be16  vlan_tci_mask,
393                                 __be16  vlan_tci)
394 {
395         /* VLAN priority must be either exactly zero or fully wildcarded and
396          * VLAN id must be exact match.
397          */
398         if (is_vid_exactmatch(vlan_tci_mask) &&
399             ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
400               is_vlan_pcp_zero(vlan_tci)) ||
401              is_vlan_pcp_wildcarded(vlan_tci_mask)))
402                 return true;
403
404         return false;
405 }
406
407 static bool bits_set(void *key, int len)
408 {
409         const u8 *p = key;
410         int i;
411
412         for (i = 0; i < len; i++)
413                 if (p[i] != 0)
414                         return true;
415
416         return false;
417 }
418
419 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
420                                     __le16 ref_flow_handle,
421                                     __le32 tunnel_handle, __le16 *flow_handle)
422 {
423         struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
424         struct bnxt_tc_actions *actions = &flow->actions;
425         struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
426         struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
427         struct hwrm_cfa_flow_alloc_input req = { 0 };
428         u16 flow_flags = 0, action_flags = 0;
429         int rc;
430
431         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
432
433         req.src_fid = cpu_to_le16(flow->src_fid);
434         req.ref_flow_handle = ref_flow_handle;
435
436         if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
437             actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
438                 req.tunnel_handle = tunnel_handle;
439                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
440                 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
441         }
442
443         req.ethertype = flow->l2_key.ether_type;
444         req.ip_proto = flow->l4_key.ip_proto;
445
446         if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
447                 memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
448                 memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
449         }
450
451         if (flow->l2_key.num_vlans > 0) {
452                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
453                 /* FW expects the inner_vlan_tci value to be set
454                  * in outer_vlan_tci when num_vlans is 1 (which is
455                  * always the case in TC.)
456                  */
457                 req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
458         }
459
460         /* If all IP and L4 fields are wildcarded then this is an L2 flow */
461         if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
462             is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
463                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
464         } else {
465                 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
466                                 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
467                                 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
468
469                 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
470                         req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
471                         req.ip_dst_mask_len =
472                                 inet_mask_len(l3_mask->ipv4.daddr.s_addr);
473                         req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
474                         req.ip_src_mask_len =
475                                 inet_mask_len(l3_mask->ipv4.saddr.s_addr);
476                 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
477                         memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
478                                sizeof(req.ip_dst));
479                         req.ip_dst_mask_len =
480                                         ipv6_mask_len(&l3_mask->ipv6.daddr);
481                         memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
482                                sizeof(req.ip_src));
483                         req.ip_src_mask_len =
484                                         ipv6_mask_len(&l3_mask->ipv6.saddr);
485                 }
486         }
487
488         if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
489                 req.l4_src_port = flow->l4_key.ports.sport;
490                 req.l4_src_port_mask = flow->l4_mask.ports.sport;
491                 req.l4_dst_port = flow->l4_key.ports.dport;
492                 req.l4_dst_port_mask = flow->l4_mask.ports.dport;
493         } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
494                 /* l4 ports serve as type/code when ip_proto is ICMP */
495                 req.l4_src_port = htons(flow->l4_key.icmp.type);
496                 req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
497                 req.l4_dst_port = htons(flow->l4_key.icmp.code);
498                 req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
499         }
500         req.flags = cpu_to_le16(flow_flags);
501
502         if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
503                 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
504         } else {
505                 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
506                         action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
507                         req.dst_fid = cpu_to_le16(actions->dst_fid);
508                 }
509                 if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
510                         action_flags |=
511                             CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
512                         req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
513                         req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
514                         memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
515                         memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
516                 }
517                 if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
518                         action_flags |=
519                             CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
520                         /* Rewrite config with tpid = 0 implies vlan pop */
521                         req.l2_rewrite_vlan_tpid = 0;
522                         memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
523                         memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
524                 }
525         }
526         req.action_flags = cpu_to_le16(action_flags);
527
528         mutex_lock(&bp->hwrm_cmd_lock);
529         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
530         if (!rc)
531                 *flow_handle = resp->flow_handle;
532         mutex_unlock(&bp->hwrm_cmd_lock);
533
534         if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
535                 rc = -ENOSPC;
536         else if (rc)
537                 rc = -EIO;
538         return rc;
539 }
540
541 static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
542                                        struct bnxt_tc_flow *flow,
543                                        struct bnxt_tc_l2_key *l2_info,
544                                        __le32 ref_decap_handle,
545                                        __le32 *decap_filter_handle)
546 {
547         struct hwrm_cfa_decap_filter_alloc_output *resp =
548                                                 bp->hwrm_cmd_resp_addr;
549         struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
550         struct ip_tunnel_key *tun_key = &flow->tun_key;
551         u32 enables = 0;
552         int rc;
553
554         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
555
556         req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
557         enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
558                    CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
559         req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
560         req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
561
562         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
563                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
564                 /* tunnel_id is wrongly defined in hsi defn. as __le32 */
565                 req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
566         }
567
568         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
569                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
570                 ether_addr_copy(req.dst_macaddr, l2_info->dmac);
571         }
572         if (l2_info->num_vlans) {
573                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
574                 req.t_ivlan_vid = l2_info->inner_vlan_tci;
575         }
576
577         enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
578         req.ethertype = htons(ETH_P_IP);
579
580         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
581                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
582                            CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
583                            CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
584                 req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
585                 req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
586                 req.src_ipaddr[0] = tun_key->u.ipv4.src;
587         }
588
589         if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
590                 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
591                 req.dst_port = tun_key->tp_dst;
592         }
593
594         /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
595          * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
596          */
597         req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
598         req.enables = cpu_to_le32(enables);
599
600         mutex_lock(&bp->hwrm_cmd_lock);
601         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
602         if (!rc)
603                 *decap_filter_handle = resp->decap_filter_id;
604         else
605                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
606         mutex_unlock(&bp->hwrm_cmd_lock);
607
608         if (rc)
609                 rc = -EIO;
610         return rc;
611 }
612
613 static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
614                                       __le32 decap_filter_handle)
615 {
616         struct hwrm_cfa_decap_filter_free_input req = { 0 };
617         int rc;
618
619         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
620         req.decap_filter_id = decap_filter_handle;
621
622         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
623         if (rc)
624                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
625
626         if (rc)
627                 rc = -EIO;
628         return rc;
629 }
630
631 static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
632                                        struct ip_tunnel_key *encap_key,
633                                        struct bnxt_tc_l2_key *l2_info,
634                                        __le32 *encap_record_handle)
635 {
636         struct hwrm_cfa_encap_record_alloc_output *resp =
637                                                 bp->hwrm_cmd_resp_addr;
638         struct hwrm_cfa_encap_record_alloc_input req = { 0 };
639         struct hwrm_cfa_encap_data_vxlan *encap =
640                         (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
641         struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
642                                 (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
643         int rc;
644
645         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
646
647         req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
648
649         ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
650         ether_addr_copy(encap->src_mac_addr, l2_info->smac);
651         if (l2_info->num_vlans) {
652                 encap->num_vlan_tags = l2_info->num_vlans;
653                 encap->ovlan_tci = l2_info->inner_vlan_tci;
654                 encap->ovlan_tpid = l2_info->inner_vlan_tpid;
655         }
656
657         encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
658         encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
659         encap_ipv4->ttl = encap_key->ttl;
660
661         encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
662         encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
663         encap_ipv4->protocol = IPPROTO_UDP;
664
665         encap->dst_port = encap_key->tp_dst;
666         encap->vni = tunnel_id_to_key32(encap_key->tun_id);
667
668         mutex_lock(&bp->hwrm_cmd_lock);
669         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
670         if (!rc)
671                 *encap_record_handle = resp->encap_record_id;
672         else
673                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
674         mutex_unlock(&bp->hwrm_cmd_lock);
675
676         if (rc)
677                 rc = -EIO;
678         return rc;
679 }
680
681 static int hwrm_cfa_encap_record_free(struct bnxt *bp,
682                                       __le32 encap_record_handle)
683 {
684         struct hwrm_cfa_encap_record_free_input req = { 0 };
685         int rc;
686
687         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
688         req.encap_record_id = encap_record_handle;
689
690         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
691         if (rc)
692                 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
693
694         if (rc)
695                 rc = -EIO;
696         return rc;
697 }
698
699 static int bnxt_tc_put_l2_node(struct bnxt *bp,
700                                struct bnxt_tc_flow_node *flow_node)
701 {
702         struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
703         struct bnxt_tc_info *tc_info = bp->tc_info;
704         int rc;
705
706         /* remove flow_node from the L2 shared flow list */
707         list_del(&flow_node->l2_list_node);
708         if (--l2_node->refcount == 0) {
709                 rc =  rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
710                                              tc_info->l2_ht_params);
711                 if (rc)
712                         netdev_err(bp->dev,
713                                    "Error: %s: rhashtable_remove_fast: %d",
714                                    __func__, rc);
715                 kfree_rcu(l2_node, rcu);
716         }
717         return 0;
718 }
719
720 static struct bnxt_tc_l2_node *
721 bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
722                     struct rhashtable_params ht_params,
723                     struct bnxt_tc_l2_key *l2_key)
724 {
725         struct bnxt_tc_l2_node *l2_node;
726         int rc;
727
728         l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
729         if (!l2_node) {
730                 l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
731                 if (!l2_node) {
732                         rc = -ENOMEM;
733                         return NULL;
734                 }
735
736                 l2_node->key = *l2_key;
737                 rc = rhashtable_insert_fast(l2_table, &l2_node->node,
738                                             ht_params);
739                 if (rc) {
740                         kfree_rcu(l2_node, rcu);
741                         netdev_err(bp->dev,
742                                    "Error: %s: rhashtable_insert_fast: %d",
743                                    __func__, rc);
744                         return NULL;
745                 }
746                 INIT_LIST_HEAD(&l2_node->common_l2_flows);
747         }
748         return l2_node;
749 }
750
751 /* Get the ref_flow_handle for a flow by checking if there are any other
752  * flows that share the same L2 key as this flow.
753  */
754 static int
755 bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
756                             struct bnxt_tc_flow_node *flow_node,
757                             __le16 *ref_flow_handle)
758 {
759         struct bnxt_tc_info *tc_info = bp->tc_info;
760         struct bnxt_tc_flow_node *ref_flow_node;
761         struct bnxt_tc_l2_node *l2_node;
762
763         l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
764                                       tc_info->l2_ht_params,
765                                       &flow->l2_key);
766         if (!l2_node)
767                 return -1;
768
769         /* If any other flow is using this l2_node, use it's flow_handle
770          * as the ref_flow_handle
771          */
772         if (l2_node->refcount > 0) {
773                 ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
774                                                  struct bnxt_tc_flow_node,
775                                                  l2_list_node);
776                 *ref_flow_handle = ref_flow_node->flow_handle;
777         } else {
778                 *ref_flow_handle = cpu_to_le16(0xffff);
779         }
780
781         /* Insert the l2_node into the flow_node so that subsequent flows
782          * with a matching l2 key can use the flow_handle of this flow
783          * as their ref_flow_handle
784          */
785         flow_node->l2_node = l2_node;
786         list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
787         l2_node->refcount++;
788         return 0;
789 }
790
791 /* After the flow parsing is done, this routine is used for checking
792  * if there are any aspects of the flow that prevent it from being
793  * offloaded.
794  */
795 static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
796 {
797         /* If L4 ports are specified then ip_proto must be TCP or UDP */
798         if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
799             (flow->l4_key.ip_proto != IPPROTO_TCP &&
800              flow->l4_key.ip_proto != IPPROTO_UDP)) {
801                 netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
802                             flow->l4_key.ip_proto);
803                 return false;
804         }
805
806         /* Currently source/dest MAC cannot be partial wildcard  */
807         if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
808             !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
809                 netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
810                 return false;
811         }
812         if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
813             !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
814                 netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
815                 return false;
816         }
817
818         /* Currently VLAN fields cannot be partial wildcard */
819         if (bits_set(&flow->l2_key.inner_vlan_tci,
820                      sizeof(flow->l2_key.inner_vlan_tci)) &&
821             !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
822                                  flow->l2_key.inner_vlan_tci)) {
823                 netdev_info(bp->dev, "Unsupported VLAN TCI\n");
824                 return false;
825         }
826         if (bits_set(&flow->l2_key.inner_vlan_tpid,
827                      sizeof(flow->l2_key.inner_vlan_tpid)) &&
828             !is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
829                            sizeof(flow->l2_mask.inner_vlan_tpid))) {
830                 netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
831                 return false;
832         }
833
834         /* Currently Ethertype must be set */
835         if (!is_exactmatch(&flow->l2_mask.ether_type,
836                            sizeof(flow->l2_mask.ether_type))) {
837                 netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
838                 return false;
839         }
840
841         return true;
842 }
843
844 /* Returns the final refcount of the node on success
845  * or a -ve error code on failure
846  */
847 static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
848                                    struct rhashtable *tunnel_table,
849                                    struct rhashtable_params *ht_params,
850                                    struct bnxt_tc_tunnel_node *tunnel_node)
851 {
852         int rc;
853
854         if (--tunnel_node->refcount == 0) {
855                 rc =  rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
856                                              *ht_params);
857                 if (rc) {
858                         netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
859                         rc = -1;
860                 }
861                 kfree_rcu(tunnel_node, rcu);
862                 return rc;
863         } else {
864                 return tunnel_node->refcount;
865         }
866 }
867
868 /* Get (or add) either encap or decap tunnel node from/to the supplied
869  * hash table.
870  */
871 static struct bnxt_tc_tunnel_node *
872 bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
873                         struct rhashtable_params *ht_params,
874                         struct ip_tunnel_key *tun_key)
875 {
876         struct bnxt_tc_tunnel_node *tunnel_node;
877         int rc;
878
879         tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
880         if (!tunnel_node) {
881                 tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
882                 if (!tunnel_node) {
883                         rc = -ENOMEM;
884                         goto err;
885                 }
886
887                 tunnel_node->key = *tun_key;
888                 tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
889                 rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
890                                             *ht_params);
891                 if (rc) {
892                         kfree_rcu(tunnel_node, rcu);
893                         goto err;
894                 }
895         }
896         tunnel_node->refcount++;
897         return tunnel_node;
898 err:
899         netdev_info(bp->dev, "error rc=%d", rc);
900         return NULL;
901 }
902
903 static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
904                                         struct bnxt_tc_flow *flow,
905                                         struct bnxt_tc_l2_key *l2_key,
906                                         struct bnxt_tc_flow_node *flow_node,
907                                         __le32 *ref_decap_handle)
908 {
909         struct bnxt_tc_info *tc_info = bp->tc_info;
910         struct bnxt_tc_flow_node *ref_flow_node;
911         struct bnxt_tc_l2_node *decap_l2_node;
912
913         decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
914                                             tc_info->decap_l2_ht_params,
915                                             l2_key);
916         if (!decap_l2_node)
917                 return -1;
918
919         /* If any other flow is using this decap_l2_node, use it's decap_handle
920          * as the ref_decap_handle
921          */
922         if (decap_l2_node->refcount > 0) {
923                 ref_flow_node =
924                         list_first_entry(&decap_l2_node->common_l2_flows,
925                                          struct bnxt_tc_flow_node,
926                                          decap_l2_list_node);
927                 *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
928         } else {
929                 *ref_decap_handle = INVALID_TUNNEL_HANDLE;
930         }
931
932         /* Insert the l2_node into the flow_node so that subsequent flows
933          * with a matching decap l2 key can use the decap_filter_handle of
934          * this flow as their ref_decap_handle
935          */
936         flow_node->decap_l2_node = decap_l2_node;
937         list_add(&flow_node->decap_l2_list_node,
938                  &decap_l2_node->common_l2_flows);
939         decap_l2_node->refcount++;
940         return 0;
941 }
942
943 static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
944                                       struct bnxt_tc_flow_node *flow_node)
945 {
946         struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
947         struct bnxt_tc_info *tc_info = bp->tc_info;
948         int rc;
949
950         /* remove flow_node from the decap L2 sharing flow list */
951         list_del(&flow_node->decap_l2_list_node);
952         if (--decap_l2_node->refcount == 0) {
953                 rc =  rhashtable_remove_fast(&tc_info->decap_l2_table,
954                                              &decap_l2_node->node,
955                                              tc_info->decap_l2_ht_params);
956                 if (rc)
957                         netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
958                 kfree_rcu(decap_l2_node, rcu);
959         }
960 }
961
962 static void bnxt_tc_put_decap_handle(struct bnxt *bp,
963                                      struct bnxt_tc_flow_node *flow_node)
964 {
965         __le32 decap_handle = flow_node->decap_node->tunnel_handle;
966         struct bnxt_tc_info *tc_info = bp->tc_info;
967         int rc;
968
969         if (flow_node->decap_l2_node)
970                 bnxt_tc_put_decap_l2_node(bp, flow_node);
971
972         rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
973                                      &tc_info->decap_ht_params,
974                                      flow_node->decap_node);
975         if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
976                 hwrm_cfa_decap_filter_free(bp, decap_handle);
977 }
978
979 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
980                                        struct ip_tunnel_key *tun_key,
981                                        struct bnxt_tc_l2_key *l2_info)
982 {
983 #ifdef CONFIG_INET
984         struct net_device *real_dst_dev = bp->dev;
985         struct flowi4 flow = { {0} };
986         struct net_device *dst_dev;
987         struct neighbour *nbr;
988         struct rtable *rt;
989         int rc;
990
991         flow.flowi4_proto = IPPROTO_UDP;
992         flow.fl4_dport = tun_key->tp_dst;
993         flow.daddr = tun_key->u.ipv4.dst;
994
995         rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
996         if (IS_ERR(rt)) {
997                 netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
998                 return -EOPNOTSUPP;
999         }
1000
1001         /* The route must either point to the real_dst_dev or a dst_dev that
1002          * uses the real_dst_dev.
1003          */
1004         dst_dev = rt->dst.dev;
1005         if (is_vlan_dev(dst_dev)) {
1006 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1007                 struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
1008
1009                 if (vlan->real_dev != real_dst_dev) {
1010                         netdev_info(bp->dev,
1011                                     "dst_dev(%s) doesn't use PF-if(%s)",
1012                                     netdev_name(dst_dev),
1013                                     netdev_name(real_dst_dev));
1014                         rc = -EOPNOTSUPP;
1015                         goto put_rt;
1016                 }
1017                 l2_info->inner_vlan_tci = htons(vlan->vlan_id);
1018                 l2_info->inner_vlan_tpid = vlan->vlan_proto;
1019                 l2_info->num_vlans = 1;
1020 #endif
1021         } else if (dst_dev != real_dst_dev) {
1022                 netdev_info(bp->dev,
1023                             "dst_dev(%s) for %pI4b is not PF-if(%s)",
1024                             netdev_name(dst_dev), &flow.daddr,
1025                             netdev_name(real_dst_dev));
1026                 rc = -EOPNOTSUPP;
1027                 goto put_rt;
1028         }
1029
1030         nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
1031         if (!nbr) {
1032                 netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
1033                             &flow.daddr);
1034                 rc = -EOPNOTSUPP;
1035                 goto put_rt;
1036         }
1037
1038         tun_key->u.ipv4.src = flow.saddr;
1039         tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
1040         neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
1041         ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
1042         neigh_release(nbr);
1043         ip_rt_put(rt);
1044
1045         return 0;
1046 put_rt:
1047         ip_rt_put(rt);
1048         return rc;
1049 #else
1050         return -EOPNOTSUPP;
1051 #endif
1052 }
1053
1054 static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1055                                     struct bnxt_tc_flow_node *flow_node,
1056                                     __le32 *decap_filter_handle)
1057 {
1058         struct ip_tunnel_key *decap_key = &flow->tun_key;
1059         struct bnxt_tc_info *tc_info = bp->tc_info;
1060         struct bnxt_tc_l2_key l2_info = { {0} };
1061         struct bnxt_tc_tunnel_node *decap_node;
1062         struct ip_tunnel_key tun_key = { 0 };
1063         struct bnxt_tc_l2_key *decap_l2_info;
1064         __le32 ref_decap_handle;
1065         int rc;
1066
1067         /* Check if there's another flow using the same tunnel decap.
1068          * If not, add this tunnel to the table and resolve the other
1069          * tunnel header fileds. Ignore src_port in the tunnel_key,
1070          * since it is not required for decap filters.
1071          */
1072         decap_key->tp_src = 0;
1073         decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
1074                                              &tc_info->decap_ht_params,
1075                                              decap_key);
1076         if (!decap_node)
1077                 return -ENOMEM;
1078
1079         flow_node->decap_node = decap_node;
1080
1081         if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1082                 goto done;
1083
1084         /* Resolve the L2 fields for tunnel decap
1085          * Resolve the route for remote vtep (saddr) of the decap key
1086          * Find it's next-hop mac addrs
1087          */
1088         tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1089         tun_key.tp_dst = flow->tun_key.tp_dst;
1090         rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
1091         if (rc)
1092                 goto put_decap;
1093
1094         decap_l2_info = &decap_node->l2_info;
1095         /* decap smac is wildcarded */
1096         ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
1097         if (l2_info.num_vlans) {
1098                 decap_l2_info->num_vlans = l2_info.num_vlans;
1099                 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
1100                 decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
1101         }
1102         flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
1103
1104         /* For getting a decap_filter_handle we first need to check if
1105          * there are any other decap flows that share the same tunnel L2
1106          * key and if so, pass that flow's decap_filter_handle as the
1107          * ref_decap_handle for this flow.
1108          */
1109         rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
1110                                           &ref_decap_handle);
1111         if (rc)
1112                 goto put_decap;
1113
1114         /* Issue the hwrm cmd to allocate a decap filter handle */
1115         rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
1116                                          ref_decap_handle,
1117                                          &decap_node->tunnel_handle);
1118         if (rc)
1119                 goto put_decap_l2;
1120
1121 done:
1122         *decap_filter_handle = decap_node->tunnel_handle;
1123         return 0;
1124
1125 put_decap_l2:
1126         bnxt_tc_put_decap_l2_node(bp, flow_node);
1127 put_decap:
1128         bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1129                                 &tc_info->decap_ht_params,
1130                                 flow_node->decap_node);
1131         return rc;
1132 }
1133
1134 static void bnxt_tc_put_encap_handle(struct bnxt *bp,
1135                                      struct bnxt_tc_tunnel_node *encap_node)
1136 {
1137         __le32 encap_handle = encap_node->tunnel_handle;
1138         struct bnxt_tc_info *tc_info = bp->tc_info;
1139         int rc;
1140
1141         rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1142                                      &tc_info->encap_ht_params, encap_node);
1143         if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
1144                 hwrm_cfa_encap_record_free(bp, encap_handle);
1145 }
1146
1147 /* Lookup the tunnel encap table and check if there's an encap_handle
1148  * alloc'd already.
1149  * If not, query L2 info via a route lookup and issue an encap_record_alloc
1150  * cmd to FW.
1151  */
1152 static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1153                                     struct bnxt_tc_flow_node *flow_node,
1154                                     __le32 *encap_handle)
1155 {
1156         struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
1157         struct bnxt_tc_info *tc_info = bp->tc_info;
1158         struct bnxt_tc_tunnel_node *encap_node;
1159         int rc;
1160
1161         /* Check if there's another flow using the same tunnel encap.
1162          * If not, add this tunnel to the table and resolve the other
1163          * tunnel header fileds
1164          */
1165         encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
1166                                              &tc_info->encap_ht_params,
1167                                              encap_key);
1168         if (!encap_node)
1169                 return -ENOMEM;
1170
1171         flow_node->encap_node = encap_node;
1172
1173         if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1174                 goto done;
1175
1176         rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
1177         if (rc)
1178                 goto put_encap;
1179
1180         /* Allocate a new tunnel encap record */
1181         rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
1182                                          &encap_node->tunnel_handle);
1183         if (rc)
1184                 goto put_encap;
1185
1186 done:
1187         *encap_handle = encap_node->tunnel_handle;
1188         return 0;
1189
1190 put_encap:
1191         bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1192                                 &tc_info->encap_ht_params, encap_node);
1193         return rc;
1194 }
1195
1196 static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
1197                                       struct bnxt_tc_flow *flow,
1198                                       struct bnxt_tc_flow_node *flow_node)
1199 {
1200         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1201                 bnxt_tc_put_decap_handle(bp, flow_node);
1202         else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1203                 bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
1204 }
1205
1206 static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
1207                                      struct bnxt_tc_flow *flow,
1208                                      struct bnxt_tc_flow_node *flow_node,
1209                                      __le32 *tunnel_handle)
1210 {
1211         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1212                 return bnxt_tc_get_decap_handle(bp, flow, flow_node,
1213                                                 tunnel_handle);
1214         else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1215                 return bnxt_tc_get_encap_handle(bp, flow, flow_node,
1216                                                 tunnel_handle);
1217         else
1218                 return 0;
1219 }
1220 static int __bnxt_tc_del_flow(struct bnxt *bp,
1221                               struct bnxt_tc_flow_node *flow_node)
1222 {
1223         struct bnxt_tc_info *tc_info = bp->tc_info;
1224         int rc;
1225
1226         /* send HWRM cmd to free the flow-id */
1227         bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
1228
1229         mutex_lock(&tc_info->lock);
1230
1231         /* release references to any tunnel encap/decap nodes */
1232         bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
1233
1234         /* release reference to l2 node */
1235         bnxt_tc_put_l2_node(bp, flow_node);
1236
1237         mutex_unlock(&tc_info->lock);
1238
1239         rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
1240                                     tc_info->flow_ht_params);
1241         if (rc)
1242                 netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
1243                            __func__, rc);
1244
1245         kfree_rcu(flow_node, rcu);
1246         return 0;
1247 }
1248
1249 static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1250                                 u16 src_fid)
1251 {
1252         if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1253                 flow->src_fid = bp->pf.fw_fid;
1254         else
1255                 flow->src_fid = src_fid;
1256 }
1257
1258 /* Add a new flow or replace an existing flow.
1259  * Notes on locking:
1260  * There are essentially two critical sections here.
1261  * 1. while adding a new flow
1262  *    a) lookup l2-key
1263  *    b) issue HWRM cmd and get flow_handle
1264  *    c) link l2-key with flow
1265  * 2. while deleting a flow
1266  *    a) unlinking l2-key from flow
1267  * A lock is needed to protect these two critical sections.
1268  *
1269  * The hash-tables are already protected by the rhashtable API.
1270  */
1271 static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1272                             struct tc_cls_flower_offload *tc_flow_cmd)
1273 {
1274         struct bnxt_tc_flow_node *new_node, *old_node;
1275         struct bnxt_tc_info *tc_info = bp->tc_info;
1276         struct bnxt_tc_flow *flow;
1277         __le32 tunnel_handle = 0;
1278         __le16 ref_flow_handle;
1279         int rc;
1280
1281         /* allocate memory for the new flow and it's node */
1282         new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1283         if (!new_node) {
1284                 rc = -ENOMEM;
1285                 goto done;
1286         }
1287         new_node->cookie = tc_flow_cmd->cookie;
1288         flow = &new_node->flow;
1289
1290         rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1291         if (rc)
1292                 goto free_node;
1293
1294         bnxt_tc_set_src_fid(bp, flow, src_fid);
1295
1296         if (!bnxt_tc_can_offload(bp, flow)) {
1297                 rc = -ENOSPC;
1298                 goto free_node;
1299         }
1300
1301         /* If a flow exists with the same cookie, delete it */
1302         old_node = rhashtable_lookup_fast(&tc_info->flow_table,
1303                                           &tc_flow_cmd->cookie,
1304                                           tc_info->flow_ht_params);
1305         if (old_node)
1306                 __bnxt_tc_del_flow(bp, old_node);
1307
1308         /* Check if the L2 part of the flow has been offloaded already.
1309          * If so, bump up it's refcnt and get it's reference handle.
1310          */
1311         mutex_lock(&tc_info->lock);
1312         rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
1313         if (rc)
1314                 goto unlock;
1315
1316         /* If the flow involves tunnel encap/decap, get tunnel_handle */
1317         rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
1318         if (rc)
1319                 goto put_l2;
1320
1321         /* send HWRM cmd to alloc the flow */
1322         rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
1323                                       tunnel_handle, &new_node->flow_handle);
1324         if (rc)
1325                 goto put_tunnel;
1326
1327         flow->lastused = jiffies;
1328         spin_lock_init(&flow->stats_lock);
1329         /* add new flow to flow-table */
1330         rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
1331                                     tc_info->flow_ht_params);
1332         if (rc)
1333                 goto hwrm_flow_free;
1334
1335         mutex_unlock(&tc_info->lock);
1336         return 0;
1337
1338 hwrm_flow_free:
1339         bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
1340 put_tunnel:
1341         bnxt_tc_put_tunnel_handle(bp, flow, new_node);
1342 put_l2:
1343         bnxt_tc_put_l2_node(bp, new_node);
1344 unlock:
1345         mutex_unlock(&tc_info->lock);
1346 free_node:
1347         kfree_rcu(new_node, rcu);
1348 done:
1349         netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
1350                    __func__, tc_flow_cmd->cookie, rc);
1351         return rc;
1352 }
1353
1354 static int bnxt_tc_del_flow(struct bnxt *bp,
1355                             struct tc_cls_flower_offload *tc_flow_cmd)
1356 {
1357         struct bnxt_tc_info *tc_info = bp->tc_info;
1358         struct bnxt_tc_flow_node *flow_node;
1359
1360         flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1361                                            &tc_flow_cmd->cookie,
1362                                            tc_info->flow_ht_params);
1363         if (!flow_node)
1364                 return -EINVAL;
1365
1366         return __bnxt_tc_del_flow(bp, flow_node);
1367 }
1368
1369 static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1370                                   struct tc_cls_flower_offload *tc_flow_cmd)
1371 {
1372         struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
1373         struct bnxt_tc_info *tc_info = bp->tc_info;
1374         struct bnxt_tc_flow_node *flow_node;
1375         struct bnxt_tc_flow *flow;
1376         unsigned long lastused;
1377
1378         flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1379                                            &tc_flow_cmd->cookie,
1380                                            tc_info->flow_ht_params);
1381         if (!flow_node)
1382                 return -1;
1383
1384         flow = &flow_node->flow;
1385         curr_stats = &flow->stats;
1386         prev_stats = &flow->prev_stats;
1387
1388         spin_lock(&flow->stats_lock);
1389         stats.packets = curr_stats->packets - prev_stats->packets;
1390         stats.bytes = curr_stats->bytes - prev_stats->bytes;
1391         *prev_stats = *curr_stats;
1392         lastused = flow->lastused;
1393         spin_unlock(&flow->stats_lock);
1394
1395         tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
1396                               lastused);
1397         return 0;
1398 }
1399
1400 static int
1401 bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1402                              struct bnxt_tc_stats_batch stats_batch[])
1403 {
1404         struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
1405         struct hwrm_cfa_flow_stats_input req = { 0 };
1406         __le16 *req_flow_handles = &req.flow_handle_0;
1407         int rc, i;
1408
1409         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
1410         req.num_flows = cpu_to_le16(num_flows);
1411         for (i = 0; i < num_flows; i++) {
1412                 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1413
1414                 req_flow_handles[i] = flow_node->flow_handle;
1415         }
1416
1417         mutex_lock(&bp->hwrm_cmd_lock);
1418         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1419         if (!rc) {
1420                 __le64 *resp_packets = &resp->packet_0;
1421                 __le64 *resp_bytes = &resp->byte_0;
1422
1423                 for (i = 0; i < num_flows; i++) {
1424                         stats_batch[i].hw_stats.packets =
1425                                                 le64_to_cpu(resp_packets[i]);
1426                         stats_batch[i].hw_stats.bytes =
1427                                                 le64_to_cpu(resp_bytes[i]);
1428                 }
1429         } else {
1430                 netdev_info(bp->dev, "error rc=%d", rc);
1431         }
1432         mutex_unlock(&bp->hwrm_cmd_lock);
1433
1434         if (rc)
1435                 rc = -EIO;
1436         return rc;
1437 }
1438
1439 /* Add val to accum while handling a possible wraparound
1440  * of val. Eventhough val is of type u64, its actual width
1441  * is denoted by mask and will wrap-around beyond that width.
1442  */
1443 static void accumulate_val(u64 *accum, u64 val, u64 mask)
1444 {
1445 #define low_bits(x, mask)               ((x) & (mask))
1446 #define high_bits(x, mask)              ((x) & ~(mask))
1447         bool wrapped = val < low_bits(*accum, mask);
1448
1449         *accum = high_bits(*accum, mask) + val;
1450         if (wrapped)
1451                 *accum += (mask + 1);
1452 }
1453
1454 /* The HW counters' width is much less than 64bits.
1455  * Handle possible wrap-around while updating the stat counters
1456  */
1457 static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
1458                                   struct bnxt_tc_flow_stats *acc_stats,
1459                                   struct bnxt_tc_flow_stats *hw_stats)
1460 {
1461         accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
1462         accumulate_val(&acc_stats->packets, hw_stats->packets,
1463                        tc_info->packets_mask);
1464 }
1465
1466 static int
1467 bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
1468                                 struct bnxt_tc_stats_batch stats_batch[])
1469 {
1470         struct bnxt_tc_info *tc_info = bp->tc_info;
1471         int rc, i;
1472
1473         rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
1474         if (rc)
1475                 return rc;
1476
1477         for (i = 0; i < num_flows; i++) {
1478                 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1479                 struct bnxt_tc_flow *flow = &flow_node->flow;
1480
1481                 spin_lock(&flow->stats_lock);
1482                 bnxt_flow_stats_accum(tc_info, &flow->stats,
1483                                       &stats_batch[i].hw_stats);
1484                 if (flow->stats.packets != flow->prev_stats.packets)
1485                         flow->lastused = jiffies;
1486                 spin_unlock(&flow->stats_lock);
1487         }
1488
1489         return 0;
1490 }
1491
1492 static int
1493 bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
1494                               struct bnxt_tc_stats_batch stats_batch[],
1495                               int *num_flows)
1496 {
1497         struct bnxt_tc_info *tc_info = bp->tc_info;
1498         struct rhashtable_iter *iter = &tc_info->iter;
1499         void *flow_node;
1500         int rc, i;
1501
1502         rhashtable_walk_start(iter);
1503
1504         rc = 0;
1505         for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
1506                 flow_node = rhashtable_walk_next(iter);
1507                 if (IS_ERR(flow_node)) {
1508                         i = 0;
1509                         if (PTR_ERR(flow_node) == -EAGAIN) {
1510                                 continue;
1511                         } else {
1512                                 rc = PTR_ERR(flow_node);
1513                                 goto done;
1514                         }
1515                 }
1516
1517                 /* No more flows */
1518                 if (!flow_node)
1519                         goto done;
1520
1521                 stats_batch[i].flow_node = flow_node;
1522         }
1523 done:
1524         rhashtable_walk_stop(iter);
1525         *num_flows = i;
1526         return rc;
1527 }
1528
1529 void bnxt_tc_flow_stats_work(struct bnxt *bp)
1530 {
1531         struct bnxt_tc_info *tc_info = bp->tc_info;
1532         int num_flows, rc;
1533
1534         num_flows = atomic_read(&tc_info->flow_table.nelems);
1535         if (!num_flows)
1536                 return;
1537
1538         rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
1539
1540         for (;;) {
1541                 rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
1542                                                    &num_flows);
1543                 if (rc) {
1544                         if (rc == -EAGAIN)
1545                                 continue;
1546                         break;
1547                 }
1548
1549                 if (!num_flows)
1550                         break;
1551
1552                 bnxt_tc_flow_stats_batch_update(bp, num_flows,
1553                                                 tc_info->stats_batch);
1554         }
1555
1556         rhashtable_walk_exit(&tc_info->iter);
1557 }
1558
1559 int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1560                          struct tc_cls_flower_offload *cls_flower)
1561 {
1562         switch (cls_flower->command) {
1563         case TC_CLSFLOWER_REPLACE:
1564                 return bnxt_tc_add_flow(bp, src_fid, cls_flower);
1565         case TC_CLSFLOWER_DESTROY:
1566                 return bnxt_tc_del_flow(bp, cls_flower);
1567         case TC_CLSFLOWER_STATS:
1568                 return bnxt_tc_get_flow_stats(bp, cls_flower);
1569         default:
1570                 return -EOPNOTSUPP;
1571         }
1572 }
1573
1574 static const struct rhashtable_params bnxt_tc_flow_ht_params = {
1575         .head_offset = offsetof(struct bnxt_tc_flow_node, node),
1576         .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
1577         .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
1578         .automatic_shrinking = true
1579 };
1580
1581 static const struct rhashtable_params bnxt_tc_l2_ht_params = {
1582         .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1583         .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1584         .key_len = BNXT_TC_L2_KEY_LEN,
1585         .automatic_shrinking = true
1586 };
1587
1588 static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
1589         .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1590         .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1591         .key_len = BNXT_TC_L2_KEY_LEN,
1592         .automatic_shrinking = true
1593 };
1594
1595 static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
1596         .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
1597         .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
1598         .key_len = sizeof(struct ip_tunnel_key),
1599         .automatic_shrinking = true
1600 };
1601
1602 /* convert counter width in bits to a mask */
1603 #define mask(width)             ((u64)~0 >> (64 - (width)))
1604
1605 int bnxt_init_tc(struct bnxt *bp)
1606 {
1607         struct bnxt_tc_info *tc_info;
1608         int rc;
1609
1610         if (bp->hwrm_spec_code < 0x10803) {
1611                 netdev_warn(bp->dev,
1612                             "Firmware does not support TC flower offload.\n");
1613                 return -ENOTSUPP;
1614         }
1615
1616         tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
1617         if (!tc_info)
1618                 return -ENOMEM;
1619         mutex_init(&tc_info->lock);
1620
1621         /* Counter widths are programmed by FW */
1622         tc_info->bytes_mask = mask(36);
1623         tc_info->packets_mask = mask(28);
1624
1625         tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
1626         rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
1627         if (rc)
1628                 goto free_tc_info;
1629
1630         tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
1631         rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
1632         if (rc)
1633                 goto destroy_flow_table;
1634
1635         tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
1636         rc = rhashtable_init(&tc_info->decap_l2_table,
1637                              &tc_info->decap_l2_ht_params);
1638         if (rc)
1639                 goto destroy_l2_table;
1640
1641         tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
1642         rc = rhashtable_init(&tc_info->decap_table,
1643                              &tc_info->decap_ht_params);
1644         if (rc)
1645                 goto destroy_decap_l2_table;
1646
1647         tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
1648         rc = rhashtable_init(&tc_info->encap_table,
1649                              &tc_info->encap_ht_params);
1650         if (rc)
1651                 goto destroy_decap_table;
1652
1653         tc_info->enabled = true;
1654         bp->dev->hw_features |= NETIF_F_HW_TC;
1655         bp->dev->features |= NETIF_F_HW_TC;
1656         bp->tc_info = tc_info;
1657         return 0;
1658
1659 destroy_decap_table:
1660         rhashtable_destroy(&tc_info->decap_table);
1661 destroy_decap_l2_table:
1662         rhashtable_destroy(&tc_info->decap_l2_table);
1663 destroy_l2_table:
1664         rhashtable_destroy(&tc_info->l2_table);
1665 destroy_flow_table:
1666         rhashtable_destroy(&tc_info->flow_table);
1667 free_tc_info:
1668         kfree(tc_info);
1669         return rc;
1670 }
1671
1672 void bnxt_shutdown_tc(struct bnxt *bp)
1673 {
1674         struct bnxt_tc_info *tc_info = bp->tc_info;
1675
1676         if (!bnxt_tc_flower_enabled(bp))
1677                 return;
1678
1679         rhashtable_destroy(&tc_info->flow_table);
1680         rhashtable_destroy(&tc_info->l2_table);
1681         rhashtable_destroy(&tc_info->decap_l2_table);
1682         rhashtable_destroy(&tc_info->decap_table);
1683         rhashtable_destroy(&tc_info->encap_table);
1684         kfree(tc_info);
1685         bp->tc_info = NULL;
1686 }