1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2017 Broadcom Limited
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
10 #include <linux/netdevice.h>
11 #include <linux/inetdevice.h>
12 #include <linux/if_vlan.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_skbedit.h>
17 #include <net/tc_act/tc_mirred.h>
18 #include <net/tc_act/tc_vlan.h>
19 #include <net/tc_act/tc_tunnel_key.h>
23 #include "bnxt_sriov.h"
27 #define BNXT_FID_INVALID 0xffff
28 #define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
30 #define is_vlan_pcp_wildcarded(vlan_tci_mask) \
31 ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
32 #define is_vlan_pcp_exactmatch(vlan_tci_mask) \
33 ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
34 #define is_vlan_pcp_zero(vlan_tci) \
35 ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
36 #define is_vid_exactmatch(vlan_tci_mask) \
37 ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
39 /* Return the dst fid of the func for flow forwarding
40 * For PFs: src_fid is the fid of the PF
41 * For VF-reps: src_fid the fid of the VF
43 static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
47 /* check if dev belongs to the same switch */
48 if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
49 netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
51 return BNXT_FID_INVALID;
54 /* Is dev a VF-rep? */
55 if (bnxt_dev_is_vf_rep(dev))
56 return bnxt_vf_rep_get_fid(dev);
58 bp = netdev_priv(dev);
62 static int bnxt_tc_parse_redir(struct bnxt *bp,
63 struct bnxt_tc_actions *actions,
64 const struct tc_action *tc_act)
66 struct net_device *dev = tcf_mirred_dev(tc_act);
69 netdev_info(bp->dev, "no dev in mirred action");
73 actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
74 actions->dst_dev = dev;
78 static int bnxt_tc_parse_vlan(struct bnxt *bp,
79 struct bnxt_tc_actions *actions,
80 const struct tc_action *tc_act)
82 switch (tcf_vlan_action(tc_act)) {
83 case TCA_VLAN_ACT_POP:
84 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
86 case TCA_VLAN_ACT_PUSH:
87 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
88 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
89 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
97 static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
98 struct bnxt_tc_actions *actions,
99 const struct tc_action *tc_act)
101 struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
102 struct ip_tunnel_key *tun_key = &tun_info->key;
104 if (ip_tunnel_info_af(tun_info) != AF_INET) {
105 netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
109 actions->tun_encap_key = *tun_key;
110 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
114 static int bnxt_tc_parse_actions(struct bnxt *bp,
115 struct bnxt_tc_actions *actions,
116 struct tcf_exts *tc_exts)
118 const struct tc_action *tc_act;
121 if (!tcf_exts_has_actions(tc_exts)) {
122 netdev_info(bp->dev, "no actions");
126 tcf_exts_for_each_action(i, tc_act, tc_exts) {
128 if (is_tcf_gact_shot(tc_act)) {
129 actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
130 return 0; /* don't bother with other actions */
133 /* Redirect action */
134 if (is_tcf_mirred_egress_redirect(tc_act)) {
135 rc = bnxt_tc_parse_redir(bp, actions, tc_act);
142 if (is_tcf_vlan(tc_act)) {
143 rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
150 if (is_tcf_tunnel_set(tc_act)) {
151 rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
158 if (is_tcf_tunnel_release(tc_act)) {
159 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
164 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
165 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
166 /* dst_fid is PF's fid */
167 actions->dst_fid = bp->pf.fw_fid;
169 /* find the FID from dst_dev */
171 bnxt_flow_get_dst_fid(bp, actions->dst_dev);
172 if (actions->dst_fid == BNXT_FID_INVALID)
180 #define GET_KEY(flow_cmd, key_type) \
181 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
183 #define GET_MASK(flow_cmd, key_type) \
184 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
187 static int bnxt_tc_parse_flow(struct bnxt *bp,
188 struct tc_cls_flower_offload *tc_flow_cmd,
189 struct bnxt_tc_flow *flow)
191 struct flow_dissector *dissector = tc_flow_cmd->dissector;
193 /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
194 if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
195 (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
196 netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
197 dissector->used_keys);
201 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
202 struct flow_dissector_key_basic *key =
203 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
204 struct flow_dissector_key_basic *mask =
205 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
207 flow->l2_key.ether_type = key->n_proto;
208 flow->l2_mask.ether_type = mask->n_proto;
210 if (key->n_proto == htons(ETH_P_IP) ||
211 key->n_proto == htons(ETH_P_IPV6)) {
212 flow->l4_key.ip_proto = key->ip_proto;
213 flow->l4_mask.ip_proto = mask->ip_proto;
217 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
218 struct flow_dissector_key_eth_addrs *key =
219 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
220 struct flow_dissector_key_eth_addrs *mask =
221 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
223 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
224 ether_addr_copy(flow->l2_key.dmac, key->dst);
225 ether_addr_copy(flow->l2_mask.dmac, mask->dst);
226 ether_addr_copy(flow->l2_key.smac, key->src);
227 ether_addr_copy(flow->l2_mask.smac, mask->src);
230 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
231 struct flow_dissector_key_vlan *key =
232 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
233 struct flow_dissector_key_vlan *mask =
234 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
236 flow->l2_key.inner_vlan_tci =
237 cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
238 flow->l2_mask.inner_vlan_tci =
239 cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
240 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
241 flow->l2_mask.inner_vlan_tpid = htons(0xffff);
242 flow->l2_key.num_vlans = 1;
245 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
246 struct flow_dissector_key_ipv4_addrs *key =
247 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
248 struct flow_dissector_key_ipv4_addrs *mask =
249 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
251 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
252 flow->l3_key.ipv4.daddr.s_addr = key->dst;
253 flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
254 flow->l3_key.ipv4.saddr.s_addr = key->src;
255 flow->l3_mask.ipv4.saddr.s_addr = mask->src;
256 } else if (dissector_uses_key(dissector,
257 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
258 struct flow_dissector_key_ipv6_addrs *key =
259 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
260 struct flow_dissector_key_ipv6_addrs *mask =
261 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
263 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
264 flow->l3_key.ipv6.daddr = key->dst;
265 flow->l3_mask.ipv6.daddr = mask->dst;
266 flow->l3_key.ipv6.saddr = key->src;
267 flow->l3_mask.ipv6.saddr = mask->src;
270 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
271 struct flow_dissector_key_ports *key =
272 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
273 struct flow_dissector_key_ports *mask =
274 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
276 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
277 flow->l4_key.ports.dport = key->dst;
278 flow->l4_mask.ports.dport = mask->dst;
279 flow->l4_key.ports.sport = key->src;
280 flow->l4_mask.ports.sport = mask->src;
283 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
284 struct flow_dissector_key_icmp *key =
285 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
286 struct flow_dissector_key_icmp *mask =
287 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
289 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
290 flow->l4_key.icmp.type = key->type;
291 flow->l4_key.icmp.code = key->code;
292 flow->l4_mask.icmp.type = mask->type;
293 flow->l4_mask.icmp.code = mask->code;
296 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
297 struct flow_dissector_key_ipv4_addrs *key =
298 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
299 struct flow_dissector_key_ipv4_addrs *mask =
300 GET_MASK(tc_flow_cmd,
301 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
303 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
304 flow->tun_key.u.ipv4.dst = key->dst;
305 flow->tun_mask.u.ipv4.dst = mask->dst;
306 flow->tun_key.u.ipv4.src = key->src;
307 flow->tun_mask.u.ipv4.src = mask->src;
308 } else if (dissector_uses_key(dissector,
309 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
313 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
314 struct flow_dissector_key_keyid *key =
315 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
316 struct flow_dissector_key_keyid *mask =
317 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
319 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
320 flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
321 flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
324 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
325 struct flow_dissector_key_ports *key =
326 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
327 struct flow_dissector_key_ports *mask =
328 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
330 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
331 flow->tun_key.tp_dst = key->dst;
332 flow->tun_mask.tp_dst = mask->dst;
333 flow->tun_key.tp_src = key->src;
334 flow->tun_mask.tp_src = mask->src;
337 return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
340 static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
342 struct hwrm_cfa_flow_free_input req = { 0 };
345 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
346 req.flow_handle = flow_handle;
348 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
350 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
351 __func__, flow_handle, rc);
358 static int ipv6_mask_len(struct in6_addr *mask)
362 for (i = 0; i < 4; i++)
363 mask_len += inet_mask_len(mask->s6_addr32[i]);
368 static bool is_wildcard(void *mask, int len)
373 for (i = 0; i < len; i++) {
380 static bool is_exactmatch(void *mask, int len)
385 for (i = 0; i < len; i++)
392 static bool is_vlan_tci_allowed(__be16 vlan_tci_mask,
395 /* VLAN priority must be either exactly zero or fully wildcarded and
396 * VLAN id must be exact match.
398 if (is_vid_exactmatch(vlan_tci_mask) &&
399 ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
400 is_vlan_pcp_zero(vlan_tci)) ||
401 is_vlan_pcp_wildcarded(vlan_tci_mask)))
407 static bool bits_set(void *key, int len)
412 for (i = 0; i < len; i++)
419 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
420 __le16 ref_flow_handle,
421 __le32 tunnel_handle, __le16 *flow_handle)
423 struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
424 struct bnxt_tc_actions *actions = &flow->actions;
425 struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
426 struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
427 struct hwrm_cfa_flow_alloc_input req = { 0 };
428 u16 flow_flags = 0, action_flags = 0;
431 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
433 req.src_fid = cpu_to_le16(flow->src_fid);
434 req.ref_flow_handle = ref_flow_handle;
436 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
437 actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
438 req.tunnel_handle = tunnel_handle;
439 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
440 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
443 req.ethertype = flow->l2_key.ether_type;
444 req.ip_proto = flow->l4_key.ip_proto;
446 if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
447 memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
448 memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
451 if (flow->l2_key.num_vlans > 0) {
452 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
453 /* FW expects the inner_vlan_tci value to be set
454 * in outer_vlan_tci when num_vlans is 1 (which is
455 * always the case in TC.)
457 req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
460 /* If all IP and L4 fields are wildcarded then this is an L2 flow */
461 if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
462 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
463 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
465 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
466 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
467 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
469 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
470 req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
471 req.ip_dst_mask_len =
472 inet_mask_len(l3_mask->ipv4.daddr.s_addr);
473 req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
474 req.ip_src_mask_len =
475 inet_mask_len(l3_mask->ipv4.saddr.s_addr);
476 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
477 memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
479 req.ip_dst_mask_len =
480 ipv6_mask_len(&l3_mask->ipv6.daddr);
481 memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
483 req.ip_src_mask_len =
484 ipv6_mask_len(&l3_mask->ipv6.saddr);
488 if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
489 req.l4_src_port = flow->l4_key.ports.sport;
490 req.l4_src_port_mask = flow->l4_mask.ports.sport;
491 req.l4_dst_port = flow->l4_key.ports.dport;
492 req.l4_dst_port_mask = flow->l4_mask.ports.dport;
493 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
494 /* l4 ports serve as type/code when ip_proto is ICMP */
495 req.l4_src_port = htons(flow->l4_key.icmp.type);
496 req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
497 req.l4_dst_port = htons(flow->l4_key.icmp.code);
498 req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
500 req.flags = cpu_to_le16(flow_flags);
502 if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
503 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
505 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
506 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
507 req.dst_fid = cpu_to_le16(actions->dst_fid);
509 if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
511 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
512 req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
513 req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
514 memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
515 memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
517 if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
519 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
520 /* Rewrite config with tpid = 0 implies vlan pop */
521 req.l2_rewrite_vlan_tpid = 0;
522 memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
523 memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
526 req.action_flags = cpu_to_le16(action_flags);
528 mutex_lock(&bp->hwrm_cmd_lock);
529 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
531 *flow_handle = resp->flow_handle;
532 mutex_unlock(&bp->hwrm_cmd_lock);
534 if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR)
541 static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
542 struct bnxt_tc_flow *flow,
543 struct bnxt_tc_l2_key *l2_info,
544 __le32 ref_decap_handle,
545 __le32 *decap_filter_handle)
547 struct hwrm_cfa_decap_filter_alloc_output *resp =
548 bp->hwrm_cmd_resp_addr;
549 struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
550 struct ip_tunnel_key *tun_key = &flow->tun_key;
554 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
556 req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
557 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
558 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
559 req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
560 req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
562 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
563 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
564 /* tunnel_id is wrongly defined in hsi defn. as __le32 */
565 req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
568 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
569 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
570 ether_addr_copy(req.dst_macaddr, l2_info->dmac);
572 if (l2_info->num_vlans) {
573 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
574 req.t_ivlan_vid = l2_info->inner_vlan_tci;
577 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
578 req.ethertype = htons(ETH_P_IP);
580 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
581 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
582 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
583 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
584 req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
585 req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
586 req.src_ipaddr[0] = tun_key->u.ipv4.src;
589 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
590 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
591 req.dst_port = tun_key->tp_dst;
594 /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
595 * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
597 req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
598 req.enables = cpu_to_le32(enables);
600 mutex_lock(&bp->hwrm_cmd_lock);
601 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
603 *decap_filter_handle = resp->decap_filter_id;
605 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
606 mutex_unlock(&bp->hwrm_cmd_lock);
613 static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
614 __le32 decap_filter_handle)
616 struct hwrm_cfa_decap_filter_free_input req = { 0 };
619 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
620 req.decap_filter_id = decap_filter_handle;
622 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
624 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
631 static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
632 struct ip_tunnel_key *encap_key,
633 struct bnxt_tc_l2_key *l2_info,
634 __le32 *encap_record_handle)
636 struct hwrm_cfa_encap_record_alloc_output *resp =
637 bp->hwrm_cmd_resp_addr;
638 struct hwrm_cfa_encap_record_alloc_input req = { 0 };
639 struct hwrm_cfa_encap_data_vxlan *encap =
640 (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
641 struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
642 (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
645 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
647 req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
649 ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
650 ether_addr_copy(encap->src_mac_addr, l2_info->smac);
651 if (l2_info->num_vlans) {
652 encap->num_vlan_tags = l2_info->num_vlans;
653 encap->ovlan_tci = l2_info->inner_vlan_tci;
654 encap->ovlan_tpid = l2_info->inner_vlan_tpid;
657 encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
658 encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
659 encap_ipv4->ttl = encap_key->ttl;
661 encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
662 encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
663 encap_ipv4->protocol = IPPROTO_UDP;
665 encap->dst_port = encap_key->tp_dst;
666 encap->vni = tunnel_id_to_key32(encap_key->tun_id);
668 mutex_lock(&bp->hwrm_cmd_lock);
669 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
671 *encap_record_handle = resp->encap_record_id;
673 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
674 mutex_unlock(&bp->hwrm_cmd_lock);
681 static int hwrm_cfa_encap_record_free(struct bnxt *bp,
682 __le32 encap_record_handle)
684 struct hwrm_cfa_encap_record_free_input req = { 0 };
687 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
688 req.encap_record_id = encap_record_handle;
690 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
692 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
699 static int bnxt_tc_put_l2_node(struct bnxt *bp,
700 struct bnxt_tc_flow_node *flow_node)
702 struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
703 struct bnxt_tc_info *tc_info = bp->tc_info;
706 /* remove flow_node from the L2 shared flow list */
707 list_del(&flow_node->l2_list_node);
708 if (--l2_node->refcount == 0) {
709 rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
710 tc_info->l2_ht_params);
713 "Error: %s: rhashtable_remove_fast: %d",
715 kfree_rcu(l2_node, rcu);
720 static struct bnxt_tc_l2_node *
721 bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
722 struct rhashtable_params ht_params,
723 struct bnxt_tc_l2_key *l2_key)
725 struct bnxt_tc_l2_node *l2_node;
728 l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
730 l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
736 l2_node->key = *l2_key;
737 rc = rhashtable_insert_fast(l2_table, &l2_node->node,
740 kfree_rcu(l2_node, rcu);
742 "Error: %s: rhashtable_insert_fast: %d",
746 INIT_LIST_HEAD(&l2_node->common_l2_flows);
751 /* Get the ref_flow_handle for a flow by checking if there are any other
752 * flows that share the same L2 key as this flow.
755 bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
756 struct bnxt_tc_flow_node *flow_node,
757 __le16 *ref_flow_handle)
759 struct bnxt_tc_info *tc_info = bp->tc_info;
760 struct bnxt_tc_flow_node *ref_flow_node;
761 struct bnxt_tc_l2_node *l2_node;
763 l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
764 tc_info->l2_ht_params,
769 /* If any other flow is using this l2_node, use it's flow_handle
770 * as the ref_flow_handle
772 if (l2_node->refcount > 0) {
773 ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
774 struct bnxt_tc_flow_node,
776 *ref_flow_handle = ref_flow_node->flow_handle;
778 *ref_flow_handle = cpu_to_le16(0xffff);
781 /* Insert the l2_node into the flow_node so that subsequent flows
782 * with a matching l2 key can use the flow_handle of this flow
783 * as their ref_flow_handle
785 flow_node->l2_node = l2_node;
786 list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
791 /* After the flow parsing is done, this routine is used for checking
792 * if there are any aspects of the flow that prevent it from being
795 static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
797 /* If L4 ports are specified then ip_proto must be TCP or UDP */
798 if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
799 (flow->l4_key.ip_proto != IPPROTO_TCP &&
800 flow->l4_key.ip_proto != IPPROTO_UDP)) {
801 netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
802 flow->l4_key.ip_proto);
806 /* Currently source/dest MAC cannot be partial wildcard */
807 if (bits_set(&flow->l2_key.smac, sizeof(flow->l2_key.smac)) &&
808 !is_exactmatch(flow->l2_mask.smac, sizeof(flow->l2_mask.smac))) {
809 netdev_info(bp->dev, "Wildcard match unsupported for Source MAC\n");
812 if (bits_set(&flow->l2_key.dmac, sizeof(flow->l2_key.dmac)) &&
813 !is_exactmatch(&flow->l2_mask.dmac, sizeof(flow->l2_mask.dmac))) {
814 netdev_info(bp->dev, "Wildcard match unsupported for Dest MAC\n");
818 /* Currently VLAN fields cannot be partial wildcard */
819 if (bits_set(&flow->l2_key.inner_vlan_tci,
820 sizeof(flow->l2_key.inner_vlan_tci)) &&
821 !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
822 flow->l2_key.inner_vlan_tci)) {
823 netdev_info(bp->dev, "Unsupported VLAN TCI\n");
826 if (bits_set(&flow->l2_key.inner_vlan_tpid,
827 sizeof(flow->l2_key.inner_vlan_tpid)) &&
828 !is_exactmatch(&flow->l2_mask.inner_vlan_tpid,
829 sizeof(flow->l2_mask.inner_vlan_tpid))) {
830 netdev_info(bp->dev, "Wildcard match unsupported for VLAN TPID\n");
834 /* Currently Ethertype must be set */
835 if (!is_exactmatch(&flow->l2_mask.ether_type,
836 sizeof(flow->l2_mask.ether_type))) {
837 netdev_info(bp->dev, "Wildcard match unsupported for Ethertype\n");
844 /* Returns the final refcount of the node on success
845 * or a -ve error code on failure
847 static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
848 struct rhashtable *tunnel_table,
849 struct rhashtable_params *ht_params,
850 struct bnxt_tc_tunnel_node *tunnel_node)
854 if (--tunnel_node->refcount == 0) {
855 rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
858 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
861 kfree_rcu(tunnel_node, rcu);
864 return tunnel_node->refcount;
868 /* Get (or add) either encap or decap tunnel node from/to the supplied
871 static struct bnxt_tc_tunnel_node *
872 bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
873 struct rhashtable_params *ht_params,
874 struct ip_tunnel_key *tun_key)
876 struct bnxt_tc_tunnel_node *tunnel_node;
879 tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
881 tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
887 tunnel_node->key = *tun_key;
888 tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
889 rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
892 kfree_rcu(tunnel_node, rcu);
896 tunnel_node->refcount++;
899 netdev_info(bp->dev, "error rc=%d", rc);
903 static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
904 struct bnxt_tc_flow *flow,
905 struct bnxt_tc_l2_key *l2_key,
906 struct bnxt_tc_flow_node *flow_node,
907 __le32 *ref_decap_handle)
909 struct bnxt_tc_info *tc_info = bp->tc_info;
910 struct bnxt_tc_flow_node *ref_flow_node;
911 struct bnxt_tc_l2_node *decap_l2_node;
913 decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
914 tc_info->decap_l2_ht_params,
919 /* If any other flow is using this decap_l2_node, use it's decap_handle
920 * as the ref_decap_handle
922 if (decap_l2_node->refcount > 0) {
924 list_first_entry(&decap_l2_node->common_l2_flows,
925 struct bnxt_tc_flow_node,
927 *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
929 *ref_decap_handle = INVALID_TUNNEL_HANDLE;
932 /* Insert the l2_node into the flow_node so that subsequent flows
933 * with a matching decap l2 key can use the decap_filter_handle of
934 * this flow as their ref_decap_handle
936 flow_node->decap_l2_node = decap_l2_node;
937 list_add(&flow_node->decap_l2_list_node,
938 &decap_l2_node->common_l2_flows);
939 decap_l2_node->refcount++;
943 static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
944 struct bnxt_tc_flow_node *flow_node)
946 struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
947 struct bnxt_tc_info *tc_info = bp->tc_info;
950 /* remove flow_node from the decap L2 sharing flow list */
951 list_del(&flow_node->decap_l2_list_node);
952 if (--decap_l2_node->refcount == 0) {
953 rc = rhashtable_remove_fast(&tc_info->decap_l2_table,
954 &decap_l2_node->node,
955 tc_info->decap_l2_ht_params);
957 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
958 kfree_rcu(decap_l2_node, rcu);
962 static void bnxt_tc_put_decap_handle(struct bnxt *bp,
963 struct bnxt_tc_flow_node *flow_node)
965 __le32 decap_handle = flow_node->decap_node->tunnel_handle;
966 struct bnxt_tc_info *tc_info = bp->tc_info;
969 if (flow_node->decap_l2_node)
970 bnxt_tc_put_decap_l2_node(bp, flow_node);
972 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
973 &tc_info->decap_ht_params,
974 flow_node->decap_node);
975 if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
976 hwrm_cfa_decap_filter_free(bp, decap_handle);
979 static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
980 struct ip_tunnel_key *tun_key,
981 struct bnxt_tc_l2_key *l2_info)
984 struct net_device *real_dst_dev = bp->dev;
985 struct flowi4 flow = { {0} };
986 struct net_device *dst_dev;
987 struct neighbour *nbr;
991 flow.flowi4_proto = IPPROTO_UDP;
992 flow.fl4_dport = tun_key->tp_dst;
993 flow.daddr = tun_key->u.ipv4.dst;
995 rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
997 netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
1001 /* The route must either point to the real_dst_dev or a dst_dev that
1002 * uses the real_dst_dev.
1004 dst_dev = rt->dst.dev;
1005 if (is_vlan_dev(dst_dev)) {
1006 #if IS_ENABLED(CONFIG_VLAN_8021Q)
1007 struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
1009 if (vlan->real_dev != real_dst_dev) {
1010 netdev_info(bp->dev,
1011 "dst_dev(%s) doesn't use PF-if(%s)",
1012 netdev_name(dst_dev),
1013 netdev_name(real_dst_dev));
1017 l2_info->inner_vlan_tci = htons(vlan->vlan_id);
1018 l2_info->inner_vlan_tpid = vlan->vlan_proto;
1019 l2_info->num_vlans = 1;
1021 } else if (dst_dev != real_dst_dev) {
1022 netdev_info(bp->dev,
1023 "dst_dev(%s) for %pI4b is not PF-if(%s)",
1024 netdev_name(dst_dev), &flow.daddr,
1025 netdev_name(real_dst_dev));
1030 nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
1032 netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
1038 tun_key->u.ipv4.src = flow.saddr;
1039 tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
1040 neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
1041 ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
1054 static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1055 struct bnxt_tc_flow_node *flow_node,
1056 __le32 *decap_filter_handle)
1058 struct ip_tunnel_key *decap_key = &flow->tun_key;
1059 struct bnxt_tc_info *tc_info = bp->tc_info;
1060 struct bnxt_tc_l2_key l2_info = { {0} };
1061 struct bnxt_tc_tunnel_node *decap_node;
1062 struct ip_tunnel_key tun_key = { 0 };
1063 struct bnxt_tc_l2_key *decap_l2_info;
1064 __le32 ref_decap_handle;
1067 /* Check if there's another flow using the same tunnel decap.
1068 * If not, add this tunnel to the table and resolve the other
1069 * tunnel header fileds. Ignore src_port in the tunnel_key,
1070 * since it is not required for decap filters.
1072 decap_key->tp_src = 0;
1073 decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
1074 &tc_info->decap_ht_params,
1079 flow_node->decap_node = decap_node;
1081 if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1084 /* Resolve the L2 fields for tunnel decap
1085 * Resolve the route for remote vtep (saddr) of the decap key
1086 * Find it's next-hop mac addrs
1088 tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1089 tun_key.tp_dst = flow->tun_key.tp_dst;
1090 rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
1094 decap_l2_info = &decap_node->l2_info;
1095 /* decap smac is wildcarded */
1096 ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
1097 if (l2_info.num_vlans) {
1098 decap_l2_info->num_vlans = l2_info.num_vlans;
1099 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
1100 decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
1102 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
1104 /* For getting a decap_filter_handle we first need to check if
1105 * there are any other decap flows that share the same tunnel L2
1106 * key and if so, pass that flow's decap_filter_handle as the
1107 * ref_decap_handle for this flow.
1109 rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
1114 /* Issue the hwrm cmd to allocate a decap filter handle */
1115 rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
1117 &decap_node->tunnel_handle);
1122 *decap_filter_handle = decap_node->tunnel_handle;
1126 bnxt_tc_put_decap_l2_node(bp, flow_node);
1128 bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1129 &tc_info->decap_ht_params,
1130 flow_node->decap_node);
1134 static void bnxt_tc_put_encap_handle(struct bnxt *bp,
1135 struct bnxt_tc_tunnel_node *encap_node)
1137 __le32 encap_handle = encap_node->tunnel_handle;
1138 struct bnxt_tc_info *tc_info = bp->tc_info;
1141 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1142 &tc_info->encap_ht_params, encap_node);
1143 if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
1144 hwrm_cfa_encap_record_free(bp, encap_handle);
1147 /* Lookup the tunnel encap table and check if there's an encap_handle
1149 * If not, query L2 info via a route lookup and issue an encap_record_alloc
1152 static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1153 struct bnxt_tc_flow_node *flow_node,
1154 __le32 *encap_handle)
1156 struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
1157 struct bnxt_tc_info *tc_info = bp->tc_info;
1158 struct bnxt_tc_tunnel_node *encap_node;
1161 /* Check if there's another flow using the same tunnel encap.
1162 * If not, add this tunnel to the table and resolve the other
1163 * tunnel header fileds
1165 encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
1166 &tc_info->encap_ht_params,
1171 flow_node->encap_node = encap_node;
1173 if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1176 rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
1180 /* Allocate a new tunnel encap record */
1181 rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
1182 &encap_node->tunnel_handle);
1187 *encap_handle = encap_node->tunnel_handle;
1191 bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1192 &tc_info->encap_ht_params, encap_node);
1196 static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
1197 struct bnxt_tc_flow *flow,
1198 struct bnxt_tc_flow_node *flow_node)
1200 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1201 bnxt_tc_put_decap_handle(bp, flow_node);
1202 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1203 bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
1206 static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
1207 struct bnxt_tc_flow *flow,
1208 struct bnxt_tc_flow_node *flow_node,
1209 __le32 *tunnel_handle)
1211 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1212 return bnxt_tc_get_decap_handle(bp, flow, flow_node,
1214 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1215 return bnxt_tc_get_encap_handle(bp, flow, flow_node,
1220 static int __bnxt_tc_del_flow(struct bnxt *bp,
1221 struct bnxt_tc_flow_node *flow_node)
1223 struct bnxt_tc_info *tc_info = bp->tc_info;
1226 /* send HWRM cmd to free the flow-id */
1227 bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
1229 mutex_lock(&tc_info->lock);
1231 /* release references to any tunnel encap/decap nodes */
1232 bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
1234 /* release reference to l2 node */
1235 bnxt_tc_put_l2_node(bp, flow_node);
1237 mutex_unlock(&tc_info->lock);
1239 rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
1240 tc_info->flow_ht_params);
1242 netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
1245 kfree_rcu(flow_node, rcu);
1249 static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1252 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1253 flow->src_fid = bp->pf.fw_fid;
1255 flow->src_fid = src_fid;
1258 /* Add a new flow or replace an existing flow.
1260 * There are essentially two critical sections here.
1261 * 1. while adding a new flow
1263 * b) issue HWRM cmd and get flow_handle
1264 * c) link l2-key with flow
1265 * 2. while deleting a flow
1266 * a) unlinking l2-key from flow
1267 * A lock is needed to protect these two critical sections.
1269 * The hash-tables are already protected by the rhashtable API.
1271 static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1272 struct tc_cls_flower_offload *tc_flow_cmd)
1274 struct bnxt_tc_flow_node *new_node, *old_node;
1275 struct bnxt_tc_info *tc_info = bp->tc_info;
1276 struct bnxt_tc_flow *flow;
1277 __le32 tunnel_handle = 0;
1278 __le16 ref_flow_handle;
1281 /* allocate memory for the new flow and it's node */
1282 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1287 new_node->cookie = tc_flow_cmd->cookie;
1288 flow = &new_node->flow;
1290 rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1294 bnxt_tc_set_src_fid(bp, flow, src_fid);
1296 if (!bnxt_tc_can_offload(bp, flow)) {
1301 /* If a flow exists with the same cookie, delete it */
1302 old_node = rhashtable_lookup_fast(&tc_info->flow_table,
1303 &tc_flow_cmd->cookie,
1304 tc_info->flow_ht_params);
1306 __bnxt_tc_del_flow(bp, old_node);
1308 /* Check if the L2 part of the flow has been offloaded already.
1309 * If so, bump up it's refcnt and get it's reference handle.
1311 mutex_lock(&tc_info->lock);
1312 rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
1316 /* If the flow involves tunnel encap/decap, get tunnel_handle */
1317 rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
1321 /* send HWRM cmd to alloc the flow */
1322 rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
1323 tunnel_handle, &new_node->flow_handle);
1327 flow->lastused = jiffies;
1328 spin_lock_init(&flow->stats_lock);
1329 /* add new flow to flow-table */
1330 rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
1331 tc_info->flow_ht_params);
1333 goto hwrm_flow_free;
1335 mutex_unlock(&tc_info->lock);
1339 bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
1341 bnxt_tc_put_tunnel_handle(bp, flow, new_node);
1343 bnxt_tc_put_l2_node(bp, new_node);
1345 mutex_unlock(&tc_info->lock);
1347 kfree_rcu(new_node, rcu);
1349 netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
1350 __func__, tc_flow_cmd->cookie, rc);
1354 static int bnxt_tc_del_flow(struct bnxt *bp,
1355 struct tc_cls_flower_offload *tc_flow_cmd)
1357 struct bnxt_tc_info *tc_info = bp->tc_info;
1358 struct bnxt_tc_flow_node *flow_node;
1360 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1361 &tc_flow_cmd->cookie,
1362 tc_info->flow_ht_params);
1366 return __bnxt_tc_del_flow(bp, flow_node);
1369 static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1370 struct tc_cls_flower_offload *tc_flow_cmd)
1372 struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
1373 struct bnxt_tc_info *tc_info = bp->tc_info;
1374 struct bnxt_tc_flow_node *flow_node;
1375 struct bnxt_tc_flow *flow;
1376 unsigned long lastused;
1378 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1379 &tc_flow_cmd->cookie,
1380 tc_info->flow_ht_params);
1384 flow = &flow_node->flow;
1385 curr_stats = &flow->stats;
1386 prev_stats = &flow->prev_stats;
1388 spin_lock(&flow->stats_lock);
1389 stats.packets = curr_stats->packets - prev_stats->packets;
1390 stats.bytes = curr_stats->bytes - prev_stats->bytes;
1391 *prev_stats = *curr_stats;
1392 lastused = flow->lastused;
1393 spin_unlock(&flow->stats_lock);
1395 tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
1401 bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1402 struct bnxt_tc_stats_batch stats_batch[])
1404 struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
1405 struct hwrm_cfa_flow_stats_input req = { 0 };
1406 __le16 *req_flow_handles = &req.flow_handle_0;
1409 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
1410 req.num_flows = cpu_to_le16(num_flows);
1411 for (i = 0; i < num_flows; i++) {
1412 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1414 req_flow_handles[i] = flow_node->flow_handle;
1417 mutex_lock(&bp->hwrm_cmd_lock);
1418 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1420 __le64 *resp_packets = &resp->packet_0;
1421 __le64 *resp_bytes = &resp->byte_0;
1423 for (i = 0; i < num_flows; i++) {
1424 stats_batch[i].hw_stats.packets =
1425 le64_to_cpu(resp_packets[i]);
1426 stats_batch[i].hw_stats.bytes =
1427 le64_to_cpu(resp_bytes[i]);
1430 netdev_info(bp->dev, "error rc=%d", rc);
1432 mutex_unlock(&bp->hwrm_cmd_lock);
1439 /* Add val to accum while handling a possible wraparound
1440 * of val. Eventhough val is of type u64, its actual width
1441 * is denoted by mask and will wrap-around beyond that width.
1443 static void accumulate_val(u64 *accum, u64 val, u64 mask)
1445 #define low_bits(x, mask) ((x) & (mask))
1446 #define high_bits(x, mask) ((x) & ~(mask))
1447 bool wrapped = val < low_bits(*accum, mask);
1449 *accum = high_bits(*accum, mask) + val;
1451 *accum += (mask + 1);
1454 /* The HW counters' width is much less than 64bits.
1455 * Handle possible wrap-around while updating the stat counters
1457 static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
1458 struct bnxt_tc_flow_stats *acc_stats,
1459 struct bnxt_tc_flow_stats *hw_stats)
1461 accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
1462 accumulate_val(&acc_stats->packets, hw_stats->packets,
1463 tc_info->packets_mask);
1467 bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
1468 struct bnxt_tc_stats_batch stats_batch[])
1470 struct bnxt_tc_info *tc_info = bp->tc_info;
1473 rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
1477 for (i = 0; i < num_flows; i++) {
1478 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1479 struct bnxt_tc_flow *flow = &flow_node->flow;
1481 spin_lock(&flow->stats_lock);
1482 bnxt_flow_stats_accum(tc_info, &flow->stats,
1483 &stats_batch[i].hw_stats);
1484 if (flow->stats.packets != flow->prev_stats.packets)
1485 flow->lastused = jiffies;
1486 spin_unlock(&flow->stats_lock);
1493 bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
1494 struct bnxt_tc_stats_batch stats_batch[],
1497 struct bnxt_tc_info *tc_info = bp->tc_info;
1498 struct rhashtable_iter *iter = &tc_info->iter;
1502 rhashtable_walk_start(iter);
1505 for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
1506 flow_node = rhashtable_walk_next(iter);
1507 if (IS_ERR(flow_node)) {
1509 if (PTR_ERR(flow_node) == -EAGAIN) {
1512 rc = PTR_ERR(flow_node);
1521 stats_batch[i].flow_node = flow_node;
1524 rhashtable_walk_stop(iter);
1529 void bnxt_tc_flow_stats_work(struct bnxt *bp)
1531 struct bnxt_tc_info *tc_info = bp->tc_info;
1534 num_flows = atomic_read(&tc_info->flow_table.nelems);
1538 rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
1541 rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
1552 bnxt_tc_flow_stats_batch_update(bp, num_flows,
1553 tc_info->stats_batch);
1556 rhashtable_walk_exit(&tc_info->iter);
1559 int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1560 struct tc_cls_flower_offload *cls_flower)
1562 switch (cls_flower->command) {
1563 case TC_CLSFLOWER_REPLACE:
1564 return bnxt_tc_add_flow(bp, src_fid, cls_flower);
1565 case TC_CLSFLOWER_DESTROY:
1566 return bnxt_tc_del_flow(bp, cls_flower);
1567 case TC_CLSFLOWER_STATS:
1568 return bnxt_tc_get_flow_stats(bp, cls_flower);
1574 static const struct rhashtable_params bnxt_tc_flow_ht_params = {
1575 .head_offset = offsetof(struct bnxt_tc_flow_node, node),
1576 .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
1577 .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
1578 .automatic_shrinking = true
1581 static const struct rhashtable_params bnxt_tc_l2_ht_params = {
1582 .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1583 .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1584 .key_len = BNXT_TC_L2_KEY_LEN,
1585 .automatic_shrinking = true
1588 static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
1589 .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1590 .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1591 .key_len = BNXT_TC_L2_KEY_LEN,
1592 .automatic_shrinking = true
1595 static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
1596 .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
1597 .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
1598 .key_len = sizeof(struct ip_tunnel_key),
1599 .automatic_shrinking = true
1602 /* convert counter width in bits to a mask */
1603 #define mask(width) ((u64)~0 >> (64 - (width)))
1605 int bnxt_init_tc(struct bnxt *bp)
1607 struct bnxt_tc_info *tc_info;
1610 if (bp->hwrm_spec_code < 0x10803) {
1611 netdev_warn(bp->dev,
1612 "Firmware does not support TC flower offload.\n");
1616 tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
1619 mutex_init(&tc_info->lock);
1621 /* Counter widths are programmed by FW */
1622 tc_info->bytes_mask = mask(36);
1623 tc_info->packets_mask = mask(28);
1625 tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
1626 rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
1630 tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
1631 rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
1633 goto destroy_flow_table;
1635 tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
1636 rc = rhashtable_init(&tc_info->decap_l2_table,
1637 &tc_info->decap_l2_ht_params);
1639 goto destroy_l2_table;
1641 tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
1642 rc = rhashtable_init(&tc_info->decap_table,
1643 &tc_info->decap_ht_params);
1645 goto destroy_decap_l2_table;
1647 tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
1648 rc = rhashtable_init(&tc_info->encap_table,
1649 &tc_info->encap_ht_params);
1651 goto destroy_decap_table;
1653 tc_info->enabled = true;
1654 bp->dev->hw_features |= NETIF_F_HW_TC;
1655 bp->dev->features |= NETIF_F_HW_TC;
1656 bp->tc_info = tc_info;
1659 destroy_decap_table:
1660 rhashtable_destroy(&tc_info->decap_table);
1661 destroy_decap_l2_table:
1662 rhashtable_destroy(&tc_info->decap_l2_table);
1664 rhashtable_destroy(&tc_info->l2_table);
1666 rhashtable_destroy(&tc_info->flow_table);
1672 void bnxt_shutdown_tc(struct bnxt *bp)
1674 struct bnxt_tc_info *tc_info = bp->tc_info;
1676 if (!bnxt_tc_flower_enabled(bp))
1679 rhashtable_destroy(&tc_info->flow_table);
1680 rhashtable_destroy(&tc_info->l2_table);
1681 rhashtable_destroy(&tc_info->decap_l2_table);
1682 rhashtable_destroy(&tc_info->decap_table);
1683 rhashtable_destroy(&tc_info->encap_table);