2 * This file is part of the Chelsio T4/T5/T6 Ethernet driver for Linux.
4 * Copyright (c) 2017 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <net/tc_act/tc_mirred.h>
36 #include <net/tc_act/tc_pedit.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_vlan.h>
41 #include "cxgb4_filter.h"
42 #include "cxgb4_tc_flower.h"
44 #define STATS_CHECK_PERIOD (HZ / 2)
46 static struct ch_tc_pedit_fields pedits[] = {
47 PEDIT_FIELDS(ETH_, DMAC_31_0, 4, dmac, 0),
48 PEDIT_FIELDS(ETH_, DMAC_47_32, 2, dmac, 4),
49 PEDIT_FIELDS(ETH_, SMAC_15_0, 2, smac, 0),
50 PEDIT_FIELDS(ETH_, SMAC_47_16, 4, smac, 2),
51 PEDIT_FIELDS(IP4_, SRC, 4, nat_fip, 0),
52 PEDIT_FIELDS(IP4_, DST, 4, nat_lip, 0),
53 PEDIT_FIELDS(IP6_, SRC_31_0, 4, nat_fip, 0),
54 PEDIT_FIELDS(IP6_, SRC_63_32, 4, nat_fip, 4),
55 PEDIT_FIELDS(IP6_, SRC_95_64, 4, nat_fip, 8),
56 PEDIT_FIELDS(IP6_, SRC_127_96, 4, nat_fip, 12),
57 PEDIT_FIELDS(IP6_, DST_31_0, 4, nat_lip, 0),
58 PEDIT_FIELDS(IP6_, DST_63_32, 4, nat_lip, 4),
59 PEDIT_FIELDS(IP6_, DST_95_64, 4, nat_lip, 8),
60 PEDIT_FIELDS(IP6_, DST_127_96, 4, nat_lip, 12),
61 PEDIT_FIELDS(TCP_, SPORT, 2, nat_fport, 0),
62 PEDIT_FIELDS(TCP_, DPORT, 2, nat_lport, 0),
63 PEDIT_FIELDS(UDP_, SPORT, 2, nat_fport, 0),
64 PEDIT_FIELDS(UDP_, DPORT, 2, nat_lport, 0),
67 static struct ch_tc_flower_entry *allocate_flower_entry(void)
69 struct ch_tc_flower_entry *new = kzalloc(sizeof(*new), GFP_KERNEL);
71 spin_lock_init(&new->lock);
75 /* Must be called with either RTNL or rcu_read_lock */
76 static struct ch_tc_flower_entry *ch_flower_lookup(struct adapter *adap,
77 unsigned long flower_cookie)
79 return rhashtable_lookup_fast(&adap->flower_tbl, &flower_cookie,
80 adap->flower_ht_params);
83 static void cxgb4_process_flow_match(struct net_device *dev,
84 struct flow_cls_offload *cls,
85 struct ch_filter_specification *fs)
87 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
90 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
91 struct flow_match_control match;
93 flow_rule_match_control(rule, &match);
94 addr_type = match.key->addr_type;
97 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
98 struct flow_match_basic match;
99 u16 ethtype_key, ethtype_mask;
101 flow_rule_match_basic(rule, &match);
102 ethtype_key = ntohs(match.key->n_proto);
103 ethtype_mask = ntohs(match.mask->n_proto);
105 if (ethtype_key == ETH_P_ALL) {
110 if (ethtype_key == ETH_P_IPV6)
113 fs->val.ethtype = ethtype_key;
114 fs->mask.ethtype = ethtype_mask;
115 fs->val.proto = match.key->ip_proto;
116 fs->mask.proto = match.mask->ip_proto;
119 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
120 struct flow_match_ipv4_addrs match;
122 flow_rule_match_ipv4_addrs(rule, &match);
124 memcpy(&fs->val.lip[0], &match.key->dst, sizeof(match.key->dst));
125 memcpy(&fs->val.fip[0], &match.key->src, sizeof(match.key->src));
126 memcpy(&fs->mask.lip[0], &match.mask->dst, sizeof(match.mask->dst));
127 memcpy(&fs->mask.fip[0], &match.mask->src, sizeof(match.mask->src));
129 /* also initialize nat_lip/fip to same values */
130 memcpy(&fs->nat_lip[0], &match.key->dst, sizeof(match.key->dst));
131 memcpy(&fs->nat_fip[0], &match.key->src, sizeof(match.key->src));
134 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
135 struct flow_match_ipv6_addrs match;
137 flow_rule_match_ipv6_addrs(rule, &match);
139 memcpy(&fs->val.lip[0], match.key->dst.s6_addr,
140 sizeof(match.key->dst));
141 memcpy(&fs->val.fip[0], match.key->src.s6_addr,
142 sizeof(match.key->src));
143 memcpy(&fs->mask.lip[0], match.mask->dst.s6_addr,
144 sizeof(match.mask->dst));
145 memcpy(&fs->mask.fip[0], match.mask->src.s6_addr,
146 sizeof(match.mask->src));
148 /* also initialize nat_lip/fip to same values */
149 memcpy(&fs->nat_lip[0], match.key->dst.s6_addr,
150 sizeof(match.key->dst));
151 memcpy(&fs->nat_fip[0], match.key->src.s6_addr,
152 sizeof(match.key->src));
155 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
156 struct flow_match_ports match;
158 flow_rule_match_ports(rule, &match);
159 fs->val.lport = cpu_to_be16(match.key->dst);
160 fs->mask.lport = cpu_to_be16(match.mask->dst);
161 fs->val.fport = cpu_to_be16(match.key->src);
162 fs->mask.fport = cpu_to_be16(match.mask->src);
164 /* also initialize nat_lport/fport to same values */
165 fs->nat_lport = cpu_to_be16(match.key->dst);
166 fs->nat_fport = cpu_to_be16(match.key->src);
169 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
170 struct flow_match_ip match;
172 flow_rule_match_ip(rule, &match);
173 fs->val.tos = match.key->tos;
174 fs->mask.tos = match.mask->tos;
177 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
178 struct flow_match_enc_keyid match;
180 flow_rule_match_enc_keyid(rule, &match);
181 fs->val.vni = be32_to_cpu(match.key->keyid);
182 fs->mask.vni = be32_to_cpu(match.mask->keyid);
184 fs->val.encap_vld = 1;
185 fs->mask.encap_vld = 1;
189 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
190 struct flow_match_vlan match;
191 u16 vlan_tci, vlan_tci_mask;
193 flow_rule_match_vlan(rule, &match);
194 vlan_tci = match.key->vlan_id | (match.key->vlan_priority <<
196 vlan_tci_mask = match.mask->vlan_id | (match.mask->vlan_priority <<
198 fs->val.ivlan = vlan_tci;
199 fs->mask.ivlan = vlan_tci_mask;
201 fs->val.ivlan_vld = 1;
202 fs->mask.ivlan_vld = 1;
204 /* Chelsio adapters use ivlan_vld bit to match vlan packets
205 * as 802.1Q. Also, when vlan tag is present in packets,
206 * ethtype match is used then to match on ethtype of inner
207 * header ie. the header following the vlan header.
208 * So, set the ivlan_vld based on ethtype info supplied by
209 * TC for vlan packets if its 802.1Q. And then reset the
210 * ethtype value else, hw will try to match the supplied
211 * ethtype value with ethtype of inner header.
213 if (fs->val.ethtype == ETH_P_8021Q) {
215 fs->mask.ethtype = 0;
219 /* Match only packets coming from the ingress port where this
220 * filter will be created.
222 fs->val.iport = netdev2pinfo(dev)->port_id;
226 static int cxgb4_validate_flow_match(struct net_device *dev,
227 struct flow_cls_offload *cls)
229 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
230 struct flow_dissector *dissector = rule->match.dissector;
231 u16 ethtype_mask = 0;
234 if (dissector->used_keys &
235 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
236 BIT(FLOW_DISSECTOR_KEY_BASIC) |
237 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
238 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
239 BIT(FLOW_DISSECTOR_KEY_PORTS) |
240 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
241 BIT(FLOW_DISSECTOR_KEY_VLAN) |
242 BIT(FLOW_DISSECTOR_KEY_IP))) {
243 netdev_warn(dev, "Unsupported key used: 0x%x\n",
244 dissector->used_keys);
248 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
249 struct flow_match_basic match;
251 flow_rule_match_basic(rule, &match);
252 ethtype_key = ntohs(match.key->n_proto);
253 ethtype_mask = ntohs(match.mask->n_proto);
256 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
257 u16 eth_ip_type = ethtype_key & ethtype_mask;
258 struct flow_match_ip match;
260 if (eth_ip_type != ETH_P_IP && eth_ip_type != ETH_P_IPV6) {
261 netdev_err(dev, "IP Key supported only with IPv4/v6");
265 flow_rule_match_ip(rule, &match);
266 if (match.mask->ttl) {
267 netdev_warn(dev, "ttl match unsupported for offload");
275 static void offload_pedit(struct ch_filter_specification *fs, u32 val, u32 mask,
278 u32 set_val = val & ~mask;
283 for (i = 0; i < ARRAY_SIZE(pedits); i++) {
284 if (pedits[i].field == field) {
285 offset = pedits[i].offset;
286 size = pedits[i].size;
290 memcpy((u8 *)fs + offset, &set_val, size);
293 static void process_pedit_field(struct ch_filter_specification *fs, u32 val,
294 u32 mask, u32 offset, u8 htype)
297 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
299 case PEDIT_ETH_DMAC_31_0:
301 offload_pedit(fs, val, mask, ETH_DMAC_31_0);
303 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
304 if (~mask & PEDIT_ETH_DMAC_MASK)
305 offload_pedit(fs, val, mask, ETH_DMAC_47_32);
307 offload_pedit(fs, val >> 16, mask >> 16,
310 case PEDIT_ETH_SMAC_47_16:
312 offload_pedit(fs, val, mask, ETH_SMAC_47_16);
315 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
318 offload_pedit(fs, val, mask, IP4_SRC);
321 offload_pedit(fs, val, mask, IP4_DST);
323 fs->nat_mode = NAT_MODE_ALL;
325 case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
327 case PEDIT_IP6_SRC_31_0:
328 offload_pedit(fs, val, mask, IP6_SRC_31_0);
330 case PEDIT_IP6_SRC_63_32:
331 offload_pedit(fs, val, mask, IP6_SRC_63_32);
333 case PEDIT_IP6_SRC_95_64:
334 offload_pedit(fs, val, mask, IP6_SRC_95_64);
336 case PEDIT_IP6_SRC_127_96:
337 offload_pedit(fs, val, mask, IP6_SRC_127_96);
339 case PEDIT_IP6_DST_31_0:
340 offload_pedit(fs, val, mask, IP6_DST_31_0);
342 case PEDIT_IP6_DST_63_32:
343 offload_pedit(fs, val, mask, IP6_DST_63_32);
345 case PEDIT_IP6_DST_95_64:
346 offload_pedit(fs, val, mask, IP6_DST_95_64);
348 case PEDIT_IP6_DST_127_96:
349 offload_pedit(fs, val, mask, IP6_DST_127_96);
351 fs->nat_mode = NAT_MODE_ALL;
353 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
355 case PEDIT_TCP_SPORT_DPORT:
356 if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
357 offload_pedit(fs, cpu_to_be32(val) >> 16,
358 cpu_to_be32(mask) >> 16,
361 offload_pedit(fs, cpu_to_be32(val),
362 cpu_to_be32(mask), TCP_DPORT);
364 fs->nat_mode = NAT_MODE_ALL;
366 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
368 case PEDIT_UDP_SPORT_DPORT:
369 if (~mask & PEDIT_TCP_UDP_SPORT_MASK)
370 offload_pedit(fs, cpu_to_be32(val) >> 16,
371 cpu_to_be32(mask) >> 16,
374 offload_pedit(fs, cpu_to_be32(val),
375 cpu_to_be32(mask), UDP_DPORT);
377 fs->nat_mode = NAT_MODE_ALL;
381 static void cxgb4_process_flow_actions(struct net_device *in,
382 struct flow_cls_offload *cls,
383 struct ch_filter_specification *fs)
385 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
386 struct flow_action_entry *act;
389 flow_action_for_each(i, act, &rule->action) {
391 case FLOW_ACTION_ACCEPT:
392 fs->action = FILTER_PASS;
394 case FLOW_ACTION_DROP:
395 fs->action = FILTER_DROP;
397 case FLOW_ACTION_REDIRECT: {
398 struct net_device *out = act->dev;
399 struct port_info *pi = netdev_priv(out);
401 fs->action = FILTER_SWITCH;
402 fs->eport = pi->port_id;
405 case FLOW_ACTION_VLAN_POP:
406 case FLOW_ACTION_VLAN_PUSH:
407 case FLOW_ACTION_VLAN_MANGLE: {
408 u8 prio = act->vlan.prio;
409 u16 vid = act->vlan.vid;
410 u16 vlan_tci = (prio << VLAN_PRIO_SHIFT) | vid;
412 case FLOW_ACTION_VLAN_POP:
413 fs->newvlan |= VLAN_REMOVE;
415 case FLOW_ACTION_VLAN_PUSH:
416 fs->newvlan |= VLAN_INSERT;
419 case FLOW_ACTION_VLAN_MANGLE:
420 fs->newvlan |= VLAN_REWRITE;
428 case FLOW_ACTION_MANGLE: {
429 u32 mask, val, offset;
432 htype = act->mangle.htype;
433 mask = act->mangle.mask;
434 val = act->mangle.val;
435 offset = act->mangle.offset;
437 process_pedit_field(fs, val, mask, offset, htype);
446 static bool valid_l4_mask(u32 mask)
450 /* Either the upper 16-bits (SPORT) OR the lower
451 * 16-bits (DPORT) can be set, but NOT BOTH.
453 hi = (mask >> 16) & 0xFFFF;
456 return hi && lo ? false : true;
459 static bool valid_pedit_action(struct net_device *dev,
460 const struct flow_action_entry *act)
465 htype = act->mangle.htype;
466 mask = act->mangle.mask;
467 offset = act->mangle.offset;
470 case FLOW_ACT_MANGLE_HDR_TYPE_ETH:
472 case PEDIT_ETH_DMAC_31_0:
473 case PEDIT_ETH_DMAC_47_32_SMAC_15_0:
474 case PEDIT_ETH_SMAC_47_16:
477 netdev_err(dev, "%s: Unsupported pedit field\n",
482 case FLOW_ACT_MANGLE_HDR_TYPE_IP4:
488 netdev_err(dev, "%s: Unsupported pedit field\n",
493 case FLOW_ACT_MANGLE_HDR_TYPE_IP6:
495 case PEDIT_IP6_SRC_31_0:
496 case PEDIT_IP6_SRC_63_32:
497 case PEDIT_IP6_SRC_95_64:
498 case PEDIT_IP6_SRC_127_96:
499 case PEDIT_IP6_DST_31_0:
500 case PEDIT_IP6_DST_63_32:
501 case PEDIT_IP6_DST_95_64:
502 case PEDIT_IP6_DST_127_96:
505 netdev_err(dev, "%s: Unsupported pedit field\n",
510 case FLOW_ACT_MANGLE_HDR_TYPE_TCP:
512 case PEDIT_TCP_SPORT_DPORT:
513 if (!valid_l4_mask(~mask)) {
514 netdev_err(dev, "%s: Unsupported mask for TCP L4 ports\n",
520 netdev_err(dev, "%s: Unsupported pedit field\n",
525 case FLOW_ACT_MANGLE_HDR_TYPE_UDP:
527 case PEDIT_UDP_SPORT_DPORT:
528 if (!valid_l4_mask(~mask)) {
529 netdev_err(dev, "%s: Unsupported mask for UDP L4 ports\n",
535 netdev_err(dev, "%s: Unsupported pedit field\n",
541 netdev_err(dev, "%s: Unsupported pedit type\n", __func__);
547 static int cxgb4_validate_flow_actions(struct net_device *dev,
548 struct flow_cls_offload *cls)
550 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
551 struct flow_action_entry *act;
552 bool act_redir = false;
553 bool act_pedit = false;
554 bool act_vlan = false;
557 flow_action_for_each(i, act, &rule->action) {
559 case FLOW_ACTION_ACCEPT:
560 case FLOW_ACTION_DROP:
563 case FLOW_ACTION_REDIRECT: {
564 struct adapter *adap = netdev2adap(dev);
565 struct net_device *n_dev, *target_dev;
569 target_dev = act->dev;
570 for_each_port(adap, i) {
571 n_dev = adap->port[i];
572 if (target_dev == n_dev) {
578 /* If interface doesn't belong to our hw, then
579 * the provided output port is not valid
582 netdev_err(dev, "%s: Out port invalid\n",
589 case FLOW_ACTION_VLAN_POP:
590 case FLOW_ACTION_VLAN_PUSH:
591 case FLOW_ACTION_VLAN_MANGLE: {
592 u16 proto = be16_to_cpu(act->vlan.proto);
595 case FLOW_ACTION_VLAN_POP:
597 case FLOW_ACTION_VLAN_PUSH:
598 case FLOW_ACTION_VLAN_MANGLE:
599 if (proto != ETH_P_8021Q) {
600 netdev_err(dev, "%s: Unsupported vlan proto\n",
606 netdev_err(dev, "%s: Unsupported vlan action\n",
613 case FLOW_ACTION_MANGLE: {
614 bool pedit_valid = valid_pedit_action(dev, act);
622 netdev_err(dev, "%s: Unsupported action\n", __func__);
627 if ((act_pedit || act_vlan) && !act_redir) {
628 netdev_err(dev, "%s: pedit/vlan rewrite invalid without egress redirect\n",
636 int cxgb4_tc_flower_replace(struct net_device *dev,
637 struct flow_cls_offload *cls)
639 struct adapter *adap = netdev2adap(dev);
640 struct ch_tc_flower_entry *ch_flower;
641 struct ch_filter_specification *fs;
642 struct filter_ctx ctx;
646 if (cxgb4_validate_flow_actions(dev, cls))
649 if (cxgb4_validate_flow_match(dev, cls))
652 ch_flower = allocate_flower_entry();
654 netdev_err(dev, "%s: ch_flower alloc failed.\n", __func__);
660 cxgb4_process_flow_match(dev, cls, fs);
661 cxgb4_process_flow_actions(dev, cls, fs);
663 fs->hash = is_filter_exact_match(adap, fs);
667 fidx = cxgb4_get_free_ftid(dev, fs->type ? PF_INET6 : PF_INET);
669 netdev_err(dev, "%s: No fidx for offload.\n", __func__);
675 init_completion(&ctx.completion);
676 ret = __cxgb4_set_filter(dev, fidx, fs, &ctx);
678 netdev_err(dev, "%s: filter creation err %d\n",
684 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
691 /* Check if hw returned error for filter creation */
695 ch_flower->tc_flower_cookie = cls->cookie;
696 ch_flower->filter_id = ctx.tid;
697 ret = rhashtable_insert_fast(&adap->flower_tbl, &ch_flower->node,
698 adap->flower_ht_params);
705 cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
712 int cxgb4_tc_flower_destroy(struct net_device *dev,
713 struct flow_cls_offload *cls)
715 struct adapter *adap = netdev2adap(dev);
716 struct ch_tc_flower_entry *ch_flower;
719 ch_flower = ch_flower_lookup(adap, cls->cookie);
723 ret = cxgb4_del_filter(dev, ch_flower->filter_id, &ch_flower->fs);
727 ret = rhashtable_remove_fast(&adap->flower_tbl, &ch_flower->node,
728 adap->flower_ht_params);
730 netdev_err(dev, "Flow remove from rhashtable failed");
733 kfree_rcu(ch_flower, rcu);
739 static void ch_flower_stats_handler(struct work_struct *work)
741 struct adapter *adap = container_of(work, struct adapter,
743 struct ch_tc_flower_entry *flower_entry;
744 struct ch_tc_flower_stats *ofld_stats;
745 struct rhashtable_iter iter;
750 rhashtable_walk_enter(&adap->flower_tbl, &iter);
752 rhashtable_walk_start(&iter);
754 while ((flower_entry = rhashtable_walk_next(&iter)) &&
755 !IS_ERR(flower_entry)) {
756 ret = cxgb4_get_filter_counters(adap->port[0],
757 flower_entry->filter_id,
759 flower_entry->fs.hash);
761 spin_lock(&flower_entry->lock);
762 ofld_stats = &flower_entry->stats;
764 if (ofld_stats->prev_packet_count != packets) {
765 ofld_stats->prev_packet_count = packets;
766 ofld_stats->last_used = jiffies;
768 spin_unlock(&flower_entry->lock);
772 rhashtable_walk_stop(&iter);
774 } while (flower_entry == ERR_PTR(-EAGAIN));
775 rhashtable_walk_exit(&iter);
776 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
779 static void ch_flower_stats_cb(struct timer_list *t)
781 struct adapter *adap = from_timer(adap, t, flower_stats_timer);
783 schedule_work(&adap->flower_stats_work);
786 int cxgb4_tc_flower_stats(struct net_device *dev,
787 struct flow_cls_offload *cls)
789 struct adapter *adap = netdev2adap(dev);
790 struct ch_tc_flower_stats *ofld_stats;
791 struct ch_tc_flower_entry *ch_flower;
796 ch_flower = ch_flower_lookup(adap, cls->cookie);
802 ret = cxgb4_get_filter_counters(dev, ch_flower->filter_id,
808 spin_lock_bh(&ch_flower->lock);
809 ofld_stats = &ch_flower->stats;
810 if (ofld_stats->packet_count != packets) {
811 if (ofld_stats->prev_packet_count != packets)
812 ofld_stats->last_used = jiffies;
813 flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count,
814 packets - ofld_stats->packet_count,
815 ofld_stats->last_used);
817 ofld_stats->packet_count = packets;
818 ofld_stats->byte_count = bytes;
819 ofld_stats->prev_packet_count = packets;
821 spin_unlock_bh(&ch_flower->lock);
828 static const struct rhashtable_params cxgb4_tc_flower_ht_params = {
830 .head_offset = offsetof(struct ch_tc_flower_entry, node),
831 .key_offset = offsetof(struct ch_tc_flower_entry, tc_flower_cookie),
832 .key_len = sizeof(((struct ch_tc_flower_entry *)0)->tc_flower_cookie),
835 .automatic_shrinking = true
838 int cxgb4_init_tc_flower(struct adapter *adap)
842 if (adap->tc_flower_initialized)
845 adap->flower_ht_params = cxgb4_tc_flower_ht_params;
846 ret = rhashtable_init(&adap->flower_tbl, &adap->flower_ht_params);
850 INIT_WORK(&adap->flower_stats_work, ch_flower_stats_handler);
851 timer_setup(&adap->flower_stats_timer, ch_flower_stats_cb, 0);
852 mod_timer(&adap->flower_stats_timer, jiffies + STATS_CHECK_PERIOD);
853 adap->tc_flower_initialized = true;
857 void cxgb4_cleanup_tc_flower(struct adapter *adap)
859 if (!adap->tc_flower_initialized)
862 if (adap->flower_stats_timer.function)
863 del_timer_sync(&adap->flower_stats_timer);
864 cancel_work_sync(&adap->flower_stats_work);
865 rhashtable_destroy(&adap->flower_tbl);
866 adap->tc_flower_initialized = false;