1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2014-2017 aQuantia Corporation. */
4 /* File aq_filters.c: RX filters related functions. */
6 #include "aq_filters.h"
8 static bool __must_check
9 aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp)
11 if (fsp->flow_type & FLOW_MAC_EXT)
14 switch (fsp->flow_type & ~FLOW_EXT) {
26 switch (fsp->h_u.usr_ip4_spec.proto) {
36 switch (fsp->h_u.usr_ip6_spec.l4_proto) {
52 static bool __must_check
53 aq_match_filter(struct ethtool_rx_flow_spec *fsp1,
54 struct ethtool_rx_flow_spec *fsp2)
56 if (fsp1->flow_type != fsp2->flow_type ||
57 memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) ||
58 memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) ||
59 memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) ||
60 memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext)))
66 static bool __must_check
67 aq_rule_already_exists(struct aq_nic_s *aq_nic,
68 struct ethtool_rx_flow_spec *fsp)
70 struct aq_rx_filter *rule;
71 struct hlist_node *aq_node2;
72 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
74 hlist_for_each_entry_safe(rule, aq_node2,
75 &rx_fltrs->filter_list, aq_node) {
76 if (rule->aq_fsp.location == fsp->location)
78 if (aq_match_filter(&rule->aq_fsp, fsp)) {
79 netdev_err(aq_nic->ndev,
80 "ethtool: This filter is already set\n");
88 static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic,
89 struct aq_hw_rx_fltrs_s *rx_fltrs,
90 struct ethtool_rx_flow_spec *fsp)
92 if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 ||
93 fsp->location > AQ_RX_LAST_LOC_FL3L4) {
94 netdev_err(aq_nic->ndev,
95 "ethtool: location must be in range [%d, %d]",
96 AQ_RX_FIRST_LOC_FL3L4,
97 AQ_RX_LAST_LOC_FL3L4);
100 if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) {
101 rx_fltrs->fl3l4.is_ipv6 = false;
102 netdev_err(aq_nic->ndev,
103 "ethtool: mixing ipv4 and ipv6 is not allowed");
105 } else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) {
106 rx_fltrs->fl3l4.is_ipv6 = true;
107 netdev_err(aq_nic->ndev,
108 "ethtool: mixing ipv4 and ipv6 is not allowed");
110 } else if (rx_fltrs->fl3l4.is_ipv6 &&
111 fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 &&
112 fsp->location != AQ_RX_FIRST_LOC_FL3L4) {
113 netdev_err(aq_nic->ndev,
114 "ethtool: The specified location for ipv6 must be %d or %d",
115 AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4);
122 static int __must_check
123 aq_check_approve_fl2(struct aq_nic_s *aq_nic,
124 struct aq_hw_rx_fltrs_s *rx_fltrs,
125 struct ethtool_rx_flow_spec *fsp)
127 if (fsp->location < AQ_RX_FIRST_LOC_FETHERT ||
128 fsp->location > AQ_RX_LAST_LOC_FETHERT) {
129 netdev_err(aq_nic->ndev,
130 "ethtool: location must be in range [%d, %d]",
131 AQ_RX_FIRST_LOC_FETHERT,
132 AQ_RX_LAST_LOC_FETHERT);
136 if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK &&
137 fsp->m_u.ether_spec.h_proto == 0U) {
138 netdev_err(aq_nic->ndev,
139 "ethtool: proto (ether_type) parameter must be specified");
146 static int __must_check
147 aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
148 struct aq_hw_rx_fltrs_s *rx_fltrs,
149 struct ethtool_rx_flow_spec *fsp)
151 if (fsp->location < AQ_RX_FIRST_LOC_FVLANID ||
152 fsp->location > AQ_RX_LAST_LOC_FVLANID) {
153 netdev_err(aq_nic->ndev,
154 "ethtool: location must be in range [%d, %d]",
155 AQ_RX_FIRST_LOC_FVLANID,
156 AQ_RX_LAST_LOC_FVLANID);
160 if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
161 (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
162 aq_nic->active_vlans))) {
163 netdev_err(aq_nic->ndev,
164 "ethtool: unknown vlan-id specified");
168 if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) {
169 netdev_err(aq_nic->ndev,
170 "ethtool: queue number must be in range [0, %d]",
171 aq_nic->aq_nic_cfg.num_rss_queues - 1);
177 static int __must_check
178 aq_check_filter(struct aq_nic_s *aq_nic,
179 struct ethtool_rx_flow_spec *fsp)
182 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
184 if (fsp->flow_type & FLOW_EXT) {
185 if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) {
186 err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp);
187 } else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) {
188 err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
190 netdev_err(aq_nic->ndev,
191 "ethtool: invalid vlan mask 0x%x specified",
192 be16_to_cpu(fsp->m_ext.vlan_tci));
196 switch (fsp->flow_type & ~FLOW_EXT) {
198 err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp);
205 rx_fltrs->fl3l4.is_ipv6 = false;
206 err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
213 rx_fltrs->fl3l4.is_ipv6 = true;
214 err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp);
217 netdev_err(aq_nic->ndev,
218 "ethtool: unknown flow-type specified");
226 static bool __must_check
227 aq_rule_is_not_support(struct aq_nic_s *aq_nic,
228 struct ethtool_rx_flow_spec *fsp)
230 bool rule_is_not_support = false;
232 if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) {
233 netdev_err(aq_nic->ndev,
234 "ethtool: Please, to enable the RX flow control:\n"
235 "ethtool -K %s ntuple on\n", aq_nic->ndev->name);
236 rule_is_not_support = true;
237 } else if (!aq_rule_is_approve(fsp)) {
238 netdev_err(aq_nic->ndev,
239 "ethtool: The specified flow type is not supported\n");
240 rule_is_not_support = true;
241 } else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW &&
242 (fsp->h_u.tcp_ip4_spec.tos ||
243 fsp->h_u.tcp_ip6_spec.tclass)) {
244 netdev_err(aq_nic->ndev,
245 "ethtool: The specified tos tclass are not supported\n");
246 rule_is_not_support = true;
247 } else if (fsp->flow_type & FLOW_MAC_EXT) {
248 netdev_err(aq_nic->ndev,
249 "ethtool: MAC_EXT is not supported");
250 rule_is_not_support = true;
253 return rule_is_not_support;
256 static bool __must_check
257 aq_rule_is_not_correct(struct aq_nic_s *aq_nic,
258 struct ethtool_rx_flow_spec *fsp)
260 bool rule_is_not_correct = false;
263 rule_is_not_correct = true;
264 } else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) {
265 netdev_err(aq_nic->ndev,
266 "ethtool: The specified number %u rule is invalid\n",
268 rule_is_not_correct = true;
269 } else if (aq_check_filter(aq_nic, fsp)) {
270 rule_is_not_correct = true;
271 } else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
272 if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) {
273 netdev_err(aq_nic->ndev,
274 "ethtool: The specified action is invalid.\n"
275 "Maximum allowable value action is %u.\n",
276 aq_nic->aq_nic_cfg.num_rss_queues - 1);
277 rule_is_not_correct = true;
281 return rule_is_not_correct;
284 static int __must_check
285 aq_check_rule(struct aq_nic_s *aq_nic,
286 struct ethtool_rx_flow_spec *fsp)
290 if (aq_rule_is_not_correct(aq_nic, fsp))
292 else if (aq_rule_is_not_support(aq_nic, fsp))
294 else if (aq_rule_already_exists(aq_nic, fsp))
300 static void aq_set_data_fl2(struct aq_nic_s *aq_nic,
301 struct aq_rx_filter *aq_rx_fltr,
302 struct aq_rx_filter_l2 *data, bool add)
304 const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
306 memset(data, 0, sizeof(*data));
308 data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT;
310 if (fsp->ring_cookie != RX_CLS_FLOW_DISC)
311 data->queue = fsp->ring_cookie;
315 data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto);
316 data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci)
318 data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci)
319 & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
322 static int aq_add_del_fether(struct aq_nic_s *aq_nic,
323 struct aq_rx_filter *aq_rx_fltr, bool add)
325 struct aq_rx_filter_l2 data;
326 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
327 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
329 aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add);
331 if (unlikely(!aq_hw_ops->hw_filter_l2_set))
333 if (unlikely(!aq_hw_ops->hw_filter_l2_clear))
337 return aq_hw_ops->hw_filter_l2_set(aq_hw, &data);
339 return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data);
342 static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan)
346 for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
347 if (aq_vlans[i].enable &&
348 aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED &&
349 aq_vlans[i].vlan_id == vlan) {
357 /* Function rebuilds array of vlan filters so that filters with assigned
358 * queue have a precedence over just vlans on the interface.
360 static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic,
361 unsigned long *active_vlans,
362 struct aq_rx_filter_vlan *aq_vlans)
364 bool vlan_busy = false;
368 for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
369 if (aq_vlans[i].enable &&
370 aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED)
373 vlan = find_next_bit(active_vlans,
376 if (vlan == VLAN_N_VID) {
377 aq_vlans[i].enable = 0U;
378 aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
379 aq_vlans[i].vlan_id = 0;
383 vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan);
385 aq_vlans[i].enable = 1U;
386 aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED;
387 aq_vlans[i].vlan_id = vlan;
389 } while (vlan_busy && vlan != VLAN_N_VID);
393 static int aq_set_data_fvlan(struct aq_nic_s *aq_nic,
394 struct aq_rx_filter *aq_rx_fltr,
395 struct aq_rx_filter_vlan *aq_vlans, bool add)
397 const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
398 int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID;
401 memset(&aq_vlans[location], 0, sizeof(aq_vlans[location]));
406 /* remove vlan if it was in table without queue assignment */
407 for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) {
408 if (aq_vlans[i].vlan_id ==
409 (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) {
410 aq_vlans[i].enable = false;
414 aq_vlans[location].location = location;
415 aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci)
417 aq_vlans[location].queue = fsp->ring_cookie & 0x1FU;
418 aq_vlans[location].enable = 1U;
423 int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id)
425 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
426 struct aq_rx_filter *rule = NULL;
427 struct hlist_node *aq_node2;
429 hlist_for_each_entry_safe(rule, aq_node2,
430 &rx_fltrs->filter_list, aq_node) {
431 if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id)
434 if (rule && rule->type == aq_rx_filter_vlan &&
435 be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) {
436 struct ethtool_rxnfc cmd;
438 cmd.fs.location = rule->aq_fsp.location;
439 return aq_del_rxnfc_rule(aq_nic, &cmd);
445 static int aq_add_del_fvlan(struct aq_nic_s *aq_nic,
446 struct aq_rx_filter *aq_rx_fltr, bool add)
448 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
450 if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
453 aq_set_data_fvlan(aq_nic,
455 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans,
458 return aq_filters_vlans_update(aq_nic);
461 static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic,
462 struct aq_rx_filter *aq_rx_fltr,
463 struct aq_rx_filter_l3l4 *data, bool add)
465 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
466 const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp;
468 memset(data, 0, sizeof(*data));
470 data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6;
471 data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location);
475 rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location);
477 rx_fltrs->fl3l4.active_ipv6 &=
478 ~BIT((data->location) / 4);
483 data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4;
485 switch (fsp->flow_type) {
488 data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
492 data->cmd |= HW_ATL_RX_UDP;
493 data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
497 data->cmd |= HW_ATL_RX_SCTP;
498 data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4;
504 if (!data->is_ipv6) {
506 ntohl(fsp->h_u.tcp_ip4_spec.ip4src);
508 ntohl(fsp->h_u.tcp_ip4_spec.ip4dst);
509 rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location);
513 rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4);
514 for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) {
516 ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]);
518 ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]);
520 data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6;
522 if (fsp->flow_type != IP_USER_FLOW &&
523 fsp->flow_type != IPV6_USER_FLOW) {
524 if (!data->is_ipv6) {
526 ntohs(fsp->h_u.tcp_ip4_spec.pdst);
528 ntohs(fsp->h_u.tcp_ip4_spec.psrc);
531 ntohs(fsp->h_u.tcp_ip6_spec.pdst);
533 ntohs(fsp->h_u.tcp_ip6_spec.psrc);
536 if (data->ip_src[0] && !data->is_ipv6)
537 data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3;
538 if (data->ip_dst[0] && !data->is_ipv6)
539 data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3;
541 data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4;
543 data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4;
544 if (fsp->ring_cookie != RX_CLS_FLOW_DISC) {
545 data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT;
546 data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT;
547 data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4;
549 data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT;
555 static int aq_set_fl3l4(struct aq_hw_s *aq_hw,
556 const struct aq_hw_ops *aq_hw_ops,
557 struct aq_rx_filter_l3l4 *data)
559 if (unlikely(!aq_hw_ops->hw_filter_l3l4_set))
562 return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data);
565 static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic,
566 struct aq_rx_filter *aq_rx_fltr, bool add)
568 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
569 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
570 struct aq_rx_filter_l3l4 data;
572 if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 ||
573 aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4 ||
574 aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add)))
577 return aq_set_fl3l4(aq_hw, aq_hw_ops, &data);
580 static int aq_add_del_rule(struct aq_nic_s *aq_nic,
581 struct aq_rx_filter *aq_rx_fltr, bool add)
585 if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) {
586 if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
588 aq_rx_fltr->type = aq_rx_filter_vlan;
589 err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add);
590 } else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci)
592 aq_rx_fltr->type = aq_rx_filter_ethertype;
593 err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
596 switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) {
598 aq_rx_fltr->type = aq_rx_filter_ethertype;
599 err = aq_add_del_fether(aq_nic, aq_rx_fltr, add);
609 aq_rx_fltr->type = aq_rx_filter_l3l4;
610 err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add);
621 static int aq_update_table_filters(struct aq_nic_s *aq_nic,
622 struct aq_rx_filter *aq_rx_fltr, u16 index,
623 struct ethtool_rxnfc *cmd)
625 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
626 struct aq_rx_filter *rule = NULL, *parent = NULL;
627 struct hlist_node *aq_node2;
630 hlist_for_each_entry_safe(rule, aq_node2,
631 &rx_fltrs->filter_list, aq_node) {
632 if (rule->aq_fsp.location >= index)
637 if (rule && rule->aq_fsp.location == index) {
638 err = aq_add_del_rule(aq_nic, rule, false);
639 hlist_del(&rule->aq_node);
641 --rx_fltrs->active_filters;
644 if (unlikely(!aq_rx_fltr))
647 INIT_HLIST_NODE(&aq_rx_fltr->aq_node);
650 hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node);
652 hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list);
654 ++rx_fltrs->active_filters;
659 u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic)
661 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
663 return rx_fltrs->active_filters;
666 struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic)
668 return &aq_nic->aq_hw_rx_fltrs;
671 int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
673 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
674 struct ethtool_rx_flow_spec *fsp =
675 (struct ethtool_rx_flow_spec *)&cmd->fs;
676 struct aq_rx_filter *aq_rx_fltr;
679 err = aq_check_rule(aq_nic, fsp);
683 aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL);
684 if (unlikely(!aq_rx_fltr)) {
689 memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp));
691 err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL);
695 err = aq_add_del_rule(aq_nic, aq_rx_fltr, true);
697 hlist_del(&aq_rx_fltr->aq_node);
698 --rx_fltrs->active_filters;
710 int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd)
712 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
713 struct aq_rx_filter *rule = NULL;
714 struct hlist_node *aq_node2;
717 hlist_for_each_entry_safe(rule, aq_node2,
718 &rx_fltrs->filter_list, aq_node) {
719 if (rule->aq_fsp.location == cmd->fs.location)
723 if (rule && rule->aq_fsp.location == cmd->fs.location) {
724 err = aq_add_del_rule(aq_nic, rule, false);
725 hlist_del(&rule->aq_node);
727 --rx_fltrs->active_filters;
732 int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd)
734 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
735 struct ethtool_rx_flow_spec *fsp =
736 (struct ethtool_rx_flow_spec *)&cmd->fs;
737 struct aq_rx_filter *rule = NULL;
738 struct hlist_node *aq_node2;
740 hlist_for_each_entry_safe(rule, aq_node2,
741 &rx_fltrs->filter_list, aq_node)
742 if (fsp->location <= rule->aq_fsp.location)
745 if (unlikely(!rule || fsp->location != rule->aq_fsp.location))
748 memcpy(fsp, &rule->aq_fsp, sizeof(*fsp));
753 int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd,
756 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
757 struct hlist_node *aq_node2;
758 struct aq_rx_filter *rule;
761 cmd->data = aq_get_rxnfc_count_all_rules(aq_nic);
763 hlist_for_each_entry_safe(rule, aq_node2,
764 &rx_fltrs->filter_list, aq_node) {
765 if (unlikely(count == cmd->rule_cnt))
768 rule_locs[count++] = rule->aq_fsp.location;
771 cmd->rule_cnt = count;
776 int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic)
778 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
779 struct hlist_node *aq_node2;
780 struct aq_rx_filter *rule;
783 hlist_for_each_entry_safe(rule, aq_node2,
784 &rx_fltrs->filter_list, aq_node) {
785 err = aq_add_del_rule(aq_nic, rule, false);
788 hlist_del(&rule->aq_node);
790 --rx_fltrs->active_filters;
797 int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic)
799 struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic);
800 struct hlist_node *aq_node2;
801 struct aq_rx_filter *rule;
804 hlist_for_each_entry_safe(rule, aq_node2,
805 &rx_fltrs->filter_list, aq_node) {
806 err = aq_add_del_rule(aq_nic, rule, true);
815 int aq_filters_vlans_update(struct aq_nic_s *aq_nic)
817 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
818 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
823 if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
825 if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
828 aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
829 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
831 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
832 for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++)
833 hweight += hweight_long(aq_nic->active_vlans[i]);
835 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
840 err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
841 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans
846 if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) {
847 if (hweight <= AQ_VLAN_MAX_FILTERS && hweight > 0) {
848 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw,
849 !(aq_nic->packet_filter & IFF_PROMISC));
850 aq_nic->aq_nic_cfg.is_vlan_force_promisc = false;
852 /* otherwise left in promiscue mode */
853 aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
860 int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic)
862 const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops;
863 struct aq_hw_s *aq_hw = aq_nic->aq_hw;
866 memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans));
867 aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans,
868 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans);
870 if (unlikely(!aq_hw_ops->hw_filter_vlan_set))
872 if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl))
875 aq_nic->aq_nic_cfg.is_vlan_force_promisc = true;
876 err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false);
879 err = aq_hw_ops->hw_filter_vlan_set(aq_hw,
880 aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans