2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
55 struct mlx5_nic_flow_attr {
62 MLX5E_TC_FLOW_ESWITCH = BIT(0),
63 MLX5E_TC_FLOW_NIC = BIT(1),
64 MLX5E_TC_FLOW_OFFLOADED = BIT(2),
67 struct mlx5e_tc_flow {
68 struct rhash_head node;
71 struct mlx5_flow_handle *rule;
72 struct list_head encap; /* flows sharing the same encap ID */
73 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
75 struct mlx5_esw_flow_attr esw_attr[0];
76 struct mlx5_nic_flow_attr nic_attr[0];
80 struct mlx5e_tc_flow_parse_attr {
81 struct mlx5_flow_spec spec;
82 int num_mod_hdr_actions;
83 void *mod_hdr_actions;
87 MLX5_HEADER_TYPE_VXLAN = 0x0,
88 MLX5_HEADER_TYPE_NVGRE = 0x1,
91 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
92 #define MLX5E_TC_TABLE_NUM_GROUPS 4
99 struct mlx5e_mod_hdr_entry {
100 /* a node of a hash table which keeps all the mod_hdr entries */
101 struct hlist_node mod_hdr_hlist;
103 /* flows sharing the same mod_hdr entry */
104 struct list_head flows;
106 struct mod_hdr_key key;
111 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
113 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
115 return jhash(key->actions,
116 key->num_actions * MLX5_MH_ACT_SZ, 0);
119 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
120 struct mod_hdr_key *b)
122 if (a->num_actions != b->num_actions)
125 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
128 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
129 struct mlx5e_tc_flow *flow,
130 struct mlx5e_tc_flow_parse_attr *parse_attr)
132 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
133 int num_actions, actions_size, namespace, err;
134 struct mlx5e_mod_hdr_entry *mh;
135 struct mod_hdr_key key;
139 num_actions = parse_attr->num_mod_hdr_actions;
140 actions_size = MLX5_MH_ACT_SZ * num_actions;
142 key.actions = parse_attr->mod_hdr_actions;
143 key.num_actions = num_actions;
145 hash_key = hash_mod_hdr_info(&key);
147 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
148 namespace = MLX5_FLOW_NAMESPACE_FDB;
149 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
150 mod_hdr_hlist, hash_key) {
151 if (!cmp_mod_hdr_info(&mh->key, &key)) {
157 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
158 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
159 mod_hdr_hlist, hash_key) {
160 if (!cmp_mod_hdr_info(&mh->key, &key)) {
170 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
174 mh->key.actions = (void *)mh + sizeof(*mh);
175 memcpy(mh->key.actions, key.actions, actions_size);
176 mh->key.num_actions = num_actions;
177 INIT_LIST_HEAD(&mh->flows);
179 err = mlx5_modify_header_alloc(priv->mdev, namespace,
186 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
187 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
189 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
192 list_add(&flow->mod_hdr, &mh->flows);
193 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
194 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
196 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
205 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
206 struct mlx5e_tc_flow *flow)
208 struct list_head *next = flow->mod_hdr.next;
210 list_del(&flow->mod_hdr);
212 if (list_empty(next)) {
213 struct mlx5e_mod_hdr_entry *mh;
215 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
217 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
218 hash_del(&mh->mod_hdr_hlist);
223 static struct mlx5_flow_handle *
224 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
225 struct mlx5e_tc_flow_parse_attr *parse_attr,
226 struct mlx5e_tc_flow *flow)
228 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
229 struct mlx5_core_dev *dev = priv->mdev;
230 struct mlx5_flow_destination dest = {};
231 struct mlx5_flow_act flow_act = {
232 .action = attr->action,
233 .flow_tag = attr->flow_tag,
236 struct mlx5_fc *counter = NULL;
237 struct mlx5_flow_handle *rule;
238 bool table_created = false;
241 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
242 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
243 dest.ft = priv->fs.vlan.ft.t;
244 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
245 counter = mlx5_fc_create(dev, true);
247 return ERR_CAST(counter);
249 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
250 dest.counter = counter;
253 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
254 err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
255 parse_attr->num_mod_hdr_actions,
256 parse_attr->mod_hdr_actions,
258 flow_act.modify_id = attr->mod_hdr_id;
259 kfree(parse_attr->mod_hdr_actions);
262 goto err_create_mod_hdr_id;
266 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
268 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
270 MLX5E_TC_TABLE_NUM_ENTRIES,
271 MLX5E_TC_TABLE_NUM_GROUPS,
273 if (IS_ERR(priv->fs.tc.t)) {
274 netdev_err(priv->netdev,
275 "Failed to create tc offload table\n");
276 rule = ERR_CAST(priv->fs.tc.t);
280 table_created = true;
283 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
284 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
285 &flow_act, &dest, 1);
294 mlx5_destroy_flow_table(priv->fs.tc.t);
295 priv->fs.tc.t = NULL;
298 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
299 mlx5_modify_header_dealloc(priv->mdev,
301 err_create_mod_hdr_id:
302 mlx5_fc_destroy(dev, counter);
307 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
308 struct mlx5e_tc_flow *flow)
310 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
311 struct mlx5_fc *counter = NULL;
313 counter = mlx5_flow_rule_counter(flow->rule);
314 mlx5_del_flow_rules(flow->rule);
315 mlx5_fc_destroy(priv->mdev, counter);
317 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
318 mlx5_destroy_flow_table(priv->fs.tc.t);
319 priv->fs.tc.t = NULL;
322 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
323 mlx5_modify_header_dealloc(priv->mdev,
327 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
328 struct mlx5e_tc_flow *flow);
330 static struct mlx5_flow_handle *
331 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
332 struct mlx5e_tc_flow_parse_attr *parse_attr,
333 struct mlx5e_tc_flow *flow)
335 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
336 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
337 struct mlx5_flow_handle *rule;
340 err = mlx5_eswitch_add_vlan_action(esw, attr);
346 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
347 err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
348 parse_attr->num_mod_hdr_actions,
349 parse_attr->mod_hdr_actions,
351 kfree(parse_attr->mod_hdr_actions);
358 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
365 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
366 mlx5_modify_header_dealloc(priv->mdev,
369 mlx5_eswitch_del_vlan_action(esw, attr);
371 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
372 mlx5e_detach_encap(priv, flow);
376 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
377 struct mlx5e_tc_flow *flow)
379 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
380 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
382 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
383 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
384 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
387 mlx5_eswitch_del_vlan_action(esw, attr);
389 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
390 mlx5e_detach_encap(priv, flow);
391 kvfree(attr->parse_attr);
394 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
395 mlx5_modify_header_dealloc(priv->mdev,
399 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
400 struct mlx5e_encap_entry *e)
402 struct mlx5e_tc_flow *flow;
405 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
406 e->encap_size, e->encap_header,
409 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
413 e->flags |= MLX5_ENCAP_ENTRY_VALID;
414 mlx5e_rep_queue_neigh_stats_work(priv);
416 list_for_each_entry(flow, &e->flows, encap) {
417 flow->esw_attr->encap_id = e->encap_id;
418 flow->rule = mlx5e_tc_add_fdb_flow(priv,
419 flow->esw_attr->parse_attr,
421 if (IS_ERR(flow->rule)) {
422 err = PTR_ERR(flow->rule);
423 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
427 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
431 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
432 struct mlx5e_encap_entry *e)
434 struct mlx5e_tc_flow *flow;
435 struct mlx5_fc *counter;
437 list_for_each_entry(flow, &e->flows, encap) {
438 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
439 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
440 counter = mlx5_flow_rule_counter(flow->rule);
441 mlx5_del_flow_rules(flow->rule);
442 mlx5_fc_destroy(priv->mdev, counter);
446 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
447 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
448 mlx5_encap_dealloc(priv->mdev, e->encap_id);
452 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
454 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
455 u64 bytes, packets, lastuse = 0;
456 struct mlx5e_tc_flow *flow;
457 struct mlx5e_encap_entry *e;
458 struct mlx5_fc *counter;
459 struct neigh_table *tbl;
460 bool neigh_used = false;
463 if (m_neigh->family == AF_INET)
465 #if IS_ENABLED(CONFIG_IPV6)
466 else if (m_neigh->family == AF_INET6)
467 tbl = ipv6_stub->nd_tbl;
472 list_for_each_entry(e, &nhe->encap_list, encap_list) {
473 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
475 list_for_each_entry(flow, &e->flows, encap) {
476 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
477 counter = mlx5_flow_rule_counter(flow->rule);
478 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
479 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
488 nhe->reported_lastuse = jiffies;
490 /* find the relevant neigh according to the cached device and
493 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
495 WARN(1, "The neighbour already freed\n");
499 neigh_event_send(n, NULL);
504 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
505 struct mlx5e_tc_flow *flow)
507 struct list_head *next = flow->encap.next;
509 list_del(&flow->encap);
510 if (list_empty(next)) {
511 struct mlx5e_encap_entry *e;
513 e = list_entry(next, struct mlx5e_encap_entry, flows);
514 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
516 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
517 mlx5_encap_dealloc(priv->mdev, e->encap_id);
519 hash_del_rcu(&e->encap_hlist);
520 kfree(e->encap_header);
525 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
526 struct mlx5e_tc_flow *flow)
528 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
529 mlx5e_tc_del_fdb_flow(priv, flow);
531 mlx5e_tc_del_nic_flow(priv, flow);
534 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
535 struct tc_cls_flower_offload *f)
537 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
539 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
541 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
543 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
546 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
547 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
549 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
550 struct flow_dissector_key_keyid *key =
551 skb_flow_dissector_target(f->dissector,
552 FLOW_DISSECTOR_KEY_ENC_KEYID,
554 struct flow_dissector_key_keyid *mask =
555 skb_flow_dissector_target(f->dissector,
556 FLOW_DISSECTOR_KEY_ENC_KEYID,
558 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
559 be32_to_cpu(mask->keyid));
560 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
561 be32_to_cpu(key->keyid));
565 static int parse_tunnel_attr(struct mlx5e_priv *priv,
566 struct mlx5_flow_spec *spec,
567 struct tc_cls_flower_offload *f)
569 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
571 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
574 struct flow_dissector_key_control *enc_control =
575 skb_flow_dissector_target(f->dissector,
576 FLOW_DISSECTOR_KEY_ENC_CONTROL,
579 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
580 struct flow_dissector_key_ports *key =
581 skb_flow_dissector_target(f->dissector,
582 FLOW_DISSECTOR_KEY_ENC_PORTS,
584 struct flow_dissector_key_ports *mask =
585 skb_flow_dissector_target(f->dissector,
586 FLOW_DISSECTOR_KEY_ENC_PORTS,
588 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
589 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
590 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
592 /* Full udp dst port must be given */
593 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
594 goto vxlan_match_offload_err;
596 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
597 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
598 parse_vxlan_attr(spec, f);
600 netdev_warn(priv->netdev,
601 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
605 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
606 udp_dport, ntohs(mask->dst));
607 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
608 udp_dport, ntohs(key->dst));
610 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
611 udp_sport, ntohs(mask->src));
612 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
613 udp_sport, ntohs(key->src));
614 } else { /* udp dst port must be given */
615 vxlan_match_offload_err:
616 netdev_warn(priv->netdev,
617 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
621 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
622 struct flow_dissector_key_ipv4_addrs *key =
623 skb_flow_dissector_target(f->dissector,
624 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
626 struct flow_dissector_key_ipv4_addrs *mask =
627 skb_flow_dissector_target(f->dissector,
628 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
630 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
631 src_ipv4_src_ipv6.ipv4_layout.ipv4,
633 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
634 src_ipv4_src_ipv6.ipv4_layout.ipv4,
637 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
638 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
640 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
641 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
644 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
645 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
646 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
647 struct flow_dissector_key_ipv6_addrs *key =
648 skb_flow_dissector_target(f->dissector,
649 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
651 struct flow_dissector_key_ipv6_addrs *mask =
652 skb_flow_dissector_target(f->dissector,
653 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
656 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
657 src_ipv4_src_ipv6.ipv6_layout.ipv6),
658 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
659 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
660 src_ipv4_src_ipv6.ipv6_layout.ipv6),
661 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
663 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
664 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
665 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
666 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
667 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
668 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
670 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
671 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
674 /* Enforce DMAC when offloading incoming tunneled flows.
675 * Flow counters require a match on the DMAC.
677 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
678 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
679 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
680 dmac_47_16), priv->netdev->dev_addr);
682 /* let software handle IP fragments */
683 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
684 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
689 static int __parse_cls_flower(struct mlx5e_priv *priv,
690 struct mlx5_flow_spec *spec,
691 struct tc_cls_flower_offload *f,
694 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
696 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
701 *min_inline = MLX5_INLINE_MODE_L2;
703 if (f->dissector->used_keys &
704 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
705 BIT(FLOW_DISSECTOR_KEY_BASIC) |
706 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
707 BIT(FLOW_DISSECTOR_KEY_VLAN) |
708 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
709 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
710 BIT(FLOW_DISSECTOR_KEY_PORTS) |
711 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
712 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
713 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
714 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
715 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
716 BIT(FLOW_DISSECTOR_KEY_TCP) |
717 BIT(FLOW_DISSECTOR_KEY_IP))) {
718 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
719 f->dissector->used_keys);
723 if ((dissector_uses_key(f->dissector,
724 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
725 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
726 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
727 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
728 struct flow_dissector_key_control *key =
729 skb_flow_dissector_target(f->dissector,
730 FLOW_DISSECTOR_KEY_ENC_CONTROL,
732 switch (key->addr_type) {
733 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
734 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
735 if (parse_tunnel_attr(priv, spec, f))
742 /* In decap flow, header pointers should point to the inner
743 * headers, outer header were already set by parse_tunnel_attr
745 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
747 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
751 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
752 struct flow_dissector_key_control *key =
753 skb_flow_dissector_target(f->dissector,
754 FLOW_DISSECTOR_KEY_CONTROL,
757 struct flow_dissector_key_control *mask =
758 skb_flow_dissector_target(f->dissector,
759 FLOW_DISSECTOR_KEY_CONTROL,
761 addr_type = key->addr_type;
763 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
764 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
765 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
766 key->flags & FLOW_DIS_IS_FRAGMENT);
768 /* the HW doesn't need L3 inline to match on frag=no */
769 if (key->flags & FLOW_DIS_IS_FRAGMENT)
770 *min_inline = MLX5_INLINE_MODE_IP;
774 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
775 struct flow_dissector_key_basic *key =
776 skb_flow_dissector_target(f->dissector,
777 FLOW_DISSECTOR_KEY_BASIC,
779 struct flow_dissector_key_basic *mask =
780 skb_flow_dissector_target(f->dissector,
781 FLOW_DISSECTOR_KEY_BASIC,
783 ip_proto = key->ip_proto;
785 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
786 ntohs(mask->n_proto));
787 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
788 ntohs(key->n_proto));
790 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
792 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
796 *min_inline = MLX5_INLINE_MODE_IP;
799 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
800 struct flow_dissector_key_eth_addrs *key =
801 skb_flow_dissector_target(f->dissector,
802 FLOW_DISSECTOR_KEY_ETH_ADDRS,
804 struct flow_dissector_key_eth_addrs *mask =
805 skb_flow_dissector_target(f->dissector,
806 FLOW_DISSECTOR_KEY_ETH_ADDRS,
809 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
812 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
816 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
819 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
824 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
825 struct flow_dissector_key_vlan *key =
826 skb_flow_dissector_target(f->dissector,
827 FLOW_DISSECTOR_KEY_VLAN,
829 struct flow_dissector_key_vlan *mask =
830 skb_flow_dissector_target(f->dissector,
831 FLOW_DISSECTOR_KEY_VLAN,
833 if (mask->vlan_id || mask->vlan_priority) {
834 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
835 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
837 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
838 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
840 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
841 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
845 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
846 struct flow_dissector_key_ipv4_addrs *key =
847 skb_flow_dissector_target(f->dissector,
848 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
850 struct flow_dissector_key_ipv4_addrs *mask =
851 skb_flow_dissector_target(f->dissector,
852 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
855 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
856 src_ipv4_src_ipv6.ipv4_layout.ipv4),
857 &mask->src, sizeof(mask->src));
858 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
859 src_ipv4_src_ipv6.ipv4_layout.ipv4),
860 &key->src, sizeof(key->src));
861 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
862 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
863 &mask->dst, sizeof(mask->dst));
864 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
865 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
866 &key->dst, sizeof(key->dst));
868 if (mask->src || mask->dst)
869 *min_inline = MLX5_INLINE_MODE_IP;
872 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
873 struct flow_dissector_key_ipv6_addrs *key =
874 skb_flow_dissector_target(f->dissector,
875 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
877 struct flow_dissector_key_ipv6_addrs *mask =
878 skb_flow_dissector_target(f->dissector,
879 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
882 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
883 src_ipv4_src_ipv6.ipv6_layout.ipv6),
884 &mask->src, sizeof(mask->src));
885 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
886 src_ipv4_src_ipv6.ipv6_layout.ipv6),
887 &key->src, sizeof(key->src));
889 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
890 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
891 &mask->dst, sizeof(mask->dst));
892 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
893 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
894 &key->dst, sizeof(key->dst));
896 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
897 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
898 *min_inline = MLX5_INLINE_MODE_IP;
901 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
902 struct flow_dissector_key_ports *key =
903 skb_flow_dissector_target(f->dissector,
904 FLOW_DISSECTOR_KEY_PORTS,
906 struct flow_dissector_key_ports *mask =
907 skb_flow_dissector_target(f->dissector,
908 FLOW_DISSECTOR_KEY_PORTS,
912 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
913 tcp_sport, ntohs(mask->src));
914 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
915 tcp_sport, ntohs(key->src));
917 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
918 tcp_dport, ntohs(mask->dst));
919 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
920 tcp_dport, ntohs(key->dst));
924 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
925 udp_sport, ntohs(mask->src));
926 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
927 udp_sport, ntohs(key->src));
929 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
930 udp_dport, ntohs(mask->dst));
931 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
932 udp_dport, ntohs(key->dst));
935 netdev_err(priv->netdev,
936 "Only UDP and TCP transport are supported\n");
940 if (mask->src || mask->dst)
941 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
944 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
945 struct flow_dissector_key_ip *key =
946 skb_flow_dissector_target(f->dissector,
947 FLOW_DISSECTOR_KEY_IP,
949 struct flow_dissector_key_ip *mask =
950 skb_flow_dissector_target(f->dissector,
951 FLOW_DISSECTOR_KEY_IP,
954 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
955 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
957 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
958 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
961 *min_inline = MLX5_INLINE_MODE_IP;
963 if (mask->ttl) /* currently not supported */
967 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
968 struct flow_dissector_key_tcp *key =
969 skb_flow_dissector_target(f->dissector,
970 FLOW_DISSECTOR_KEY_TCP,
972 struct flow_dissector_key_tcp *mask =
973 skb_flow_dissector_target(f->dissector,
974 FLOW_DISSECTOR_KEY_TCP,
977 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
979 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
983 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
989 static int parse_cls_flower(struct mlx5e_priv *priv,
990 struct mlx5e_tc_flow *flow,
991 struct mlx5_flow_spec *spec,
992 struct tc_cls_flower_offload *f)
994 struct mlx5_core_dev *dev = priv->mdev;
995 struct mlx5_eswitch *esw = dev->priv.eswitch;
996 struct mlx5e_rep_priv *rpriv = priv->ppriv;
997 struct mlx5_eswitch_rep *rep;
1001 err = __parse_cls_flower(priv, spec, f, &min_inline);
1003 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1005 if (rep->vport != FDB_UPLINK_VPORT &&
1006 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1007 esw->offloads.inline_mode < min_inline)) {
1008 netdev_warn(priv->netdev,
1009 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1010 min_inline, esw->offloads.inline_mode);
1018 struct pedit_headers {
1026 static int pedit_header_offsets[] = {
1027 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1028 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1029 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1030 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1031 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1034 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1036 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1037 struct pedit_headers *masks,
1038 struct pedit_headers *vals)
1040 u32 *curr_pmask, *curr_pval;
1042 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1045 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1046 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
1048 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1051 *curr_pmask |= mask;
1052 *curr_pval |= (val & mask);
1060 struct mlx5_fields {
1066 static struct mlx5_fields fields[] = {
1067 {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
1068 {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
1069 {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
1070 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
1071 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
1073 {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
1074 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
1075 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
1076 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
1078 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
1079 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
1080 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
1081 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
1082 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
1083 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
1084 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
1085 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
1087 {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
1088 {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
1089 {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
1091 {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
1092 {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
1095 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1096 * max from the SW pedit action. On success, it says how many HW actions were
1099 static int offload_pedit_fields(struct pedit_headers *masks,
1100 struct pedit_headers *vals,
1101 struct mlx5e_tc_flow_parse_attr *parse_attr)
1103 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1104 int i, action_size, nactions, max_actions, first, last, first_z;
1105 void *s_masks_p, *a_masks_p, *vals_p;
1106 struct mlx5_fields *f;
1107 u8 cmd, field_bsize;
1112 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1113 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1114 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1115 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1117 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1118 action = parse_attr->mod_hdr_actions;
1119 max_actions = parse_attr->num_mod_hdr_actions;
1122 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1124 /* avoid seeing bits set from previous iterations */
1128 s_masks_p = (void *)set_masks + f->offset;
1129 a_masks_p = (void *)add_masks + f->offset;
1131 memcpy(&s_mask, s_masks_p, f->size);
1132 memcpy(&a_mask, a_masks_p, f->size);
1134 if (!s_mask && !a_mask) /* nothing to offload here */
1137 if (s_mask && a_mask) {
1138 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1142 if (nactions == max_actions) {
1143 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1148 cmd = MLX5_ACTION_TYPE_SET;
1150 vals_p = (void *)set_vals + f->offset;
1151 /* clear to denote we consumed this field */
1152 memset(s_masks_p, 0, f->size);
1154 cmd = MLX5_ACTION_TYPE_ADD;
1156 vals_p = (void *)add_vals + f->offset;
1157 /* clear to denote we consumed this field */
1158 memset(a_masks_p, 0, f->size);
1161 field_bsize = f->size * BITS_PER_BYTE;
1163 first_z = find_first_zero_bit(&mask, field_bsize);
1164 first = find_first_bit(&mask, field_bsize);
1165 last = find_last_bit(&mask, field_bsize);
1166 if (first > 0 || last != (field_bsize - 1) || first_z < last) {
1167 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
1172 MLX5_SET(set_action_in, action, action_type, cmd);
1173 MLX5_SET(set_action_in, action, field, f->field);
1175 if (cmd == MLX5_ACTION_TYPE_SET) {
1176 MLX5_SET(set_action_in, action, offset, 0);
1177 /* length is num of bits to be written, zero means length of 32 */
1178 MLX5_SET(set_action_in, action, length, field_bsize);
1181 if (field_bsize == 32)
1182 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p));
1183 else if (field_bsize == 16)
1184 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p));
1185 else if (field_bsize == 8)
1186 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p);
1188 action += action_size;
1192 parse_attr->num_mod_hdr_actions = nactions;
1196 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1197 const struct tc_action *a, int namespace,
1198 struct mlx5e_tc_flow_parse_attr *parse_attr)
1200 int nkeys, action_size, max_actions;
1202 nkeys = tcf_pedit_nkeys(a);
1203 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1205 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1206 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1207 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1208 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1210 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1211 max_actions = min(max_actions, nkeys * 16);
1213 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1214 if (!parse_attr->mod_hdr_actions)
1217 parse_attr->num_mod_hdr_actions = max_actions;
1221 static const struct pedit_headers zero_masks = {};
1223 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1224 const struct tc_action *a, int namespace,
1225 struct mlx5e_tc_flow_parse_attr *parse_attr)
1227 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1228 int nkeys, i, err = -EOPNOTSUPP;
1229 u32 mask, val, offset;
1232 nkeys = tcf_pedit_nkeys(a);
1234 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1235 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1237 for (i = 0; i < nkeys; i++) {
1238 htype = tcf_pedit_htype(a, i);
1239 cmd = tcf_pedit_cmd(a, i);
1240 err = -EOPNOTSUPP; /* can't be all optimistic */
1242 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1243 printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1247 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1248 printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1252 mask = tcf_pedit_mask(a, i);
1253 val = tcf_pedit_val(a, i);
1254 offset = tcf_pedit_offset(a, i);
1256 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1261 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1265 err = offload_pedit_fields(masks, vals, parse_attr);
1267 goto out_dealloc_parsed_actions;
1269 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1270 cmd_masks = &masks[cmd];
1271 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1272 printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1274 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1275 16, 1, cmd_masks, sizeof(zero_masks), true);
1277 goto out_dealloc_parsed_actions;
1283 out_dealloc_parsed_actions:
1284 kfree(parse_attr->mod_hdr_actions);
1289 static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1291 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1292 TCA_CSUM_UPDATE_FLAG_UDP;
1294 /* The HW recalcs checksums only if re-writing headers */
1295 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1296 netdev_warn(priv->netdev,
1297 "TC csum action is only offloaded with pedit\n");
1301 if (update_flags & ~prot_flags) {
1302 netdev_warn(priv->netdev,
1303 "can't offload TC csum action for some header/s - flags %#x\n",
1311 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1312 struct mlx5e_tc_flow_parse_attr *parse_attr,
1313 struct mlx5e_tc_flow *flow)
1315 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1316 const struct tc_action *a;
1320 if (tc_no_actions(exts))
1323 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1326 tcf_exts_to_list(exts, &actions);
1327 list_for_each_entry(a, &actions, list) {
1328 if (is_tcf_gact_shot(a)) {
1329 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1330 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1331 flow_table_properties_nic_receive.flow_counter))
1332 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1336 if (is_tcf_pedit(a)) {
1337 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1342 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1343 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1347 if (is_tcf_csum(a)) {
1348 if (csum_offload_supported(priv, attr->action,
1349 tcf_csum_update_flags(a)))
1355 if (is_tcf_skbedit_mark(a)) {
1356 u32 mark = tcf_skbedit_mark(a);
1358 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1359 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1364 attr->flow_tag = mark;
1365 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1375 static inline int cmp_encap_info(struct ip_tunnel_key *a,
1376 struct ip_tunnel_key *b)
1378 return memcmp(a, b, sizeof(*a));
1381 static inline int hash_encap_info(struct ip_tunnel_key *key)
1383 return jhash(key, sizeof(*key), 0);
1386 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1387 struct net_device *mirred_dev,
1388 struct net_device **out_dev,
1390 struct neighbour **out_n,
1393 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1395 struct neighbour *n = NULL;
1397 #if IS_ENABLED(CONFIG_INET)
1400 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
1401 ret = PTR_ERR_OR_ZERO(rt);
1407 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1408 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1409 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1411 *out_dev = rt->dst.dev;
1413 *out_ttl = ip4_dst_hoplimit(&rt->dst);
1414 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1423 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1424 struct net_device *mirred_dev,
1425 struct net_device **out_dev,
1427 struct neighbour **out_n,
1430 struct neighbour *n = NULL;
1431 struct dst_entry *dst;
1433 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1434 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1437 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
1444 *out_ttl = ip6_dst_hoplimit(dst);
1446 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1447 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1448 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1450 *out_dev = dst->dev;
1455 n = dst_neigh_lookup(dst, &fl6->daddr);
1464 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1465 char buf[], int encap_size,
1466 unsigned char h_dest[ETH_ALEN],
1470 __be16 udp_dst_port,
1473 struct ethhdr *eth = (struct ethhdr *)buf;
1474 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1475 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1476 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1478 memset(buf, 0, encap_size);
1480 ether_addr_copy(eth->h_dest, h_dest);
1481 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1482 eth->h_proto = htons(ETH_P_IP);
1488 ip->protocol = IPPROTO_UDP;
1492 udp->dest = udp_dst_port;
1493 vxh->vx_flags = VXLAN_HF_VNI;
1494 vxh->vx_vni = vxlan_vni_field(vx_vni);
1497 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1498 char buf[], int encap_size,
1499 unsigned char h_dest[ETH_ALEN],
1501 struct in6_addr *daddr,
1502 struct in6_addr *saddr,
1503 __be16 udp_dst_port,
1506 struct ethhdr *eth = (struct ethhdr *)buf;
1507 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1508 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1509 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1511 memset(buf, 0, encap_size);
1513 ether_addr_copy(eth->h_dest, h_dest);
1514 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1515 eth->h_proto = htons(ETH_P_IPV6);
1517 ip6_flow_hdr(ip6h, 0, 0);
1518 /* the HW fills up ipv6 payload len */
1519 ip6h->nexthdr = IPPROTO_UDP;
1520 ip6h->hop_limit = ttl;
1521 ip6h->daddr = *daddr;
1522 ip6h->saddr = *saddr;
1524 udp->dest = udp_dst_port;
1525 vxh->vx_flags = VXLAN_HF_VNI;
1526 vxh->vx_vni = vxlan_vni_field(vx_vni);
1529 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1530 struct net_device *mirred_dev,
1531 struct mlx5e_encap_entry *e)
1533 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1534 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
1535 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1536 struct net_device *out_dev;
1537 struct neighbour *n = NULL;
1538 struct flowi4 fl4 = {};
1543 if (max_encap_size < ipv4_encap_size) {
1544 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1545 ipv4_encap_size, max_encap_size);
1549 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
1553 switch (e->tunnel_type) {
1554 case MLX5_HEADER_TYPE_VXLAN:
1555 fl4.flowi4_proto = IPPROTO_UDP;
1556 fl4.fl4_dport = tun_key->tp_dst;
1562 fl4.flowi4_tos = tun_key->tos;
1563 fl4.daddr = tun_key->u.ipv4.dst;
1564 fl4.saddr = tun_key->u.ipv4.src;
1566 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
1571 /* used by mlx5e_detach_encap to lookup a neigh hash table
1572 * entry in the neigh hash table when a user deletes a rule
1574 e->m_neigh.dev = n->dev;
1575 e->m_neigh.family = n->ops->family;
1576 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1577 e->out_dev = out_dev;
1579 /* It's importent to add the neigh to the hash table before checking
1580 * the neigh validity state. So if we'll get a notification, in case the
1581 * neigh changes it's validity state, we would find the relevant neigh
1584 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1588 read_lock_bh(&n->lock);
1589 nud_state = n->nud_state;
1590 ether_addr_copy(e->h_dest, n->ha);
1591 read_unlock_bh(&n->lock);
1593 switch (e->tunnel_type) {
1594 case MLX5_HEADER_TYPE_VXLAN:
1595 gen_vxlan_header_ipv4(out_dev, encap_header,
1596 ipv4_encap_size, e->h_dest, ttl,
1598 fl4.saddr, tun_key->tp_dst,
1599 tunnel_id_to_key32(tun_key->tun_id));
1603 goto destroy_neigh_entry;
1605 e->encap_size = ipv4_encap_size;
1606 e->encap_header = encap_header;
1608 if (!(nud_state & NUD_VALID)) {
1609 neigh_event_send(n, NULL);
1614 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1615 ipv4_encap_size, encap_header, &e->encap_id);
1617 goto destroy_neigh_entry;
1619 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1620 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1624 destroy_neigh_entry:
1625 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1627 kfree(encap_header);
1633 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1634 struct net_device *mirred_dev,
1635 struct mlx5e_encap_entry *e)
1637 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1638 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
1639 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1640 struct net_device *out_dev;
1641 struct neighbour *n = NULL;
1642 struct flowi6 fl6 = {};
1647 if (max_encap_size < ipv6_encap_size) {
1648 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1649 ipv6_encap_size, max_encap_size);
1653 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
1657 switch (e->tunnel_type) {
1658 case MLX5_HEADER_TYPE_VXLAN:
1659 fl6.flowi6_proto = IPPROTO_UDP;
1660 fl6.fl6_dport = tun_key->tp_dst;
1667 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1668 fl6.daddr = tun_key->u.ipv6.dst;
1669 fl6.saddr = tun_key->u.ipv6.src;
1671 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
1676 /* used by mlx5e_detach_encap to lookup a neigh hash table
1677 * entry in the neigh hash table when a user deletes a rule
1679 e->m_neigh.dev = n->dev;
1680 e->m_neigh.family = n->ops->family;
1681 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1682 e->out_dev = out_dev;
1684 /* It's importent to add the neigh to the hash table before checking
1685 * the neigh validity state. So if we'll get a notification, in case the
1686 * neigh changes it's validity state, we would find the relevant neigh
1689 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1693 read_lock_bh(&n->lock);
1694 nud_state = n->nud_state;
1695 ether_addr_copy(e->h_dest, n->ha);
1696 read_unlock_bh(&n->lock);
1698 switch (e->tunnel_type) {
1699 case MLX5_HEADER_TYPE_VXLAN:
1700 gen_vxlan_header_ipv6(out_dev, encap_header,
1701 ipv6_encap_size, e->h_dest, ttl,
1703 &fl6.saddr, tun_key->tp_dst,
1704 tunnel_id_to_key32(tun_key->tun_id));
1708 goto destroy_neigh_entry;
1711 e->encap_size = ipv6_encap_size;
1712 e->encap_header = encap_header;
1714 if (!(nud_state & NUD_VALID)) {
1715 neigh_event_send(n, NULL);
1720 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1721 ipv6_encap_size, encap_header, &e->encap_id);
1723 goto destroy_neigh_entry;
1725 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1726 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1730 destroy_neigh_entry:
1731 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1733 kfree(encap_header);
1739 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1740 struct ip_tunnel_info *tun_info,
1741 struct net_device *mirred_dev,
1742 struct net_device **encap_dev,
1743 struct mlx5e_tc_flow *flow)
1745 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1746 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1747 unsigned short family = ip_tunnel_info_af(tun_info);
1748 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1749 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1750 struct ip_tunnel_key *key = &tun_info->key;
1751 struct mlx5e_encap_entry *e;
1752 int tunnel_type, err = 0;
1756 /* udp dst port must be set */
1757 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
1758 goto vxlan_encap_offload_err;
1760 /* setting udp src port isn't supported */
1761 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1762 vxlan_encap_offload_err:
1763 netdev_warn(priv->netdev,
1764 "must set udp dst port and not set udp src port\n");
1768 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
1769 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
1770 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1772 netdev_warn(priv->netdev,
1773 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
1777 hash_key = hash_encap_info(key);
1779 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1780 encap_hlist, hash_key) {
1781 if (!cmp_encap_info(&e->tun_info.key, key)) {
1790 e = kzalloc(sizeof(*e), GFP_KERNEL);
1794 e->tun_info = *tun_info;
1795 e->tunnel_type = tunnel_type;
1796 INIT_LIST_HEAD(&e->flows);
1798 if (family == AF_INET)
1799 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
1800 else if (family == AF_INET6)
1801 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
1803 if (err && err != -EAGAIN)
1806 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1809 list_add(&flow->encap, &e->flows);
1810 *encap_dev = e->out_dev;
1811 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1812 attr->encap_id = e->encap_id;
1821 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1822 struct mlx5e_tc_flow_parse_attr *parse_attr,
1823 struct mlx5e_tc_flow *flow)
1825 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1826 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1827 struct ip_tunnel_info *info = NULL;
1828 const struct tc_action *a;
1833 if (tc_no_actions(exts))
1836 memset(attr, 0, sizeof(*attr));
1837 attr->in_rep = rpriv->rep;
1839 tcf_exts_to_list(exts, &actions);
1840 list_for_each_entry(a, &actions, list) {
1841 if (is_tcf_gact_shot(a)) {
1842 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1843 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1847 if (is_tcf_pedit(a)) {
1848 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1853 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1857 if (is_tcf_csum(a)) {
1858 if (csum_offload_supported(priv, attr->action,
1859 tcf_csum_update_flags(a)))
1865 if (is_tcf_mirred_egress_redirect(a)) {
1866 int ifindex = tcf_mirred_ifindex(a);
1867 struct net_device *out_dev, *encap_dev = NULL;
1868 struct mlx5e_priv *out_priv;
1870 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1872 if (switchdev_port_same_parent_id(priv->netdev,
1874 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1875 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1876 out_priv = netdev_priv(out_dev);
1877 rpriv = out_priv->ppriv;
1878 attr->out_rep = rpriv->rep;
1880 err = mlx5e_attach_encap(priv, info,
1881 out_dev, &encap_dev, flow);
1882 if (err && err != -EAGAIN)
1884 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1885 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1886 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1887 out_priv = netdev_priv(encap_dev);
1888 rpriv = out_priv->ppriv;
1889 attr->out_rep = rpriv->rep;
1890 attr->parse_attr = parse_attr;
1892 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1893 priv->netdev->name, out_dev->name);
1899 if (is_tcf_tunnel_set(a)) {
1900 info = tcf_tunnel_info(a);
1908 if (is_tcf_vlan(a)) {
1909 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
1910 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
1911 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
1912 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1915 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1916 attr->vlan = tcf_vlan_push_vid(a);
1917 } else { /* action is TCA_VLAN_ACT_MODIFY */
1923 if (is_tcf_tunnel_release(a)) {
1924 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1933 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1934 struct tc_cls_flower_offload *f)
1936 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1937 struct mlx5e_tc_flow_parse_attr *parse_attr;
1938 struct mlx5e_tc_table *tc = &priv->fs.tc;
1939 struct mlx5e_tc_flow *flow;
1940 int attr_size, err = 0;
1943 if (esw && esw->mode == SRIOV_OFFLOADS) {
1944 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1945 attr_size = sizeof(struct mlx5_esw_flow_attr);
1947 flow_flags = MLX5E_TC_FLOW_NIC;
1948 attr_size = sizeof(struct mlx5_nic_flow_attr);
1951 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1952 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
1953 if (!parse_attr || !flow) {
1958 flow->cookie = f->cookie;
1959 flow->flags = flow_flags;
1961 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
1965 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1966 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
1968 goto err_handle_encap_flow;
1969 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
1971 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
1974 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
1977 if (IS_ERR(flow->rule)) {
1978 err = PTR_ERR(flow->rule);
1982 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
1983 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1988 if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
1989 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
1994 mlx5e_tc_del_flow(priv, flow);
1996 err_handle_encap_flow:
1997 if (err == -EAGAIN) {
1998 err = rhashtable_insert_fast(&tc->ht, &flow->node,
2001 mlx5e_tc_del_flow(priv, flow);
2012 int mlx5e_delete_flower(struct mlx5e_priv *priv,
2013 struct tc_cls_flower_offload *f)
2015 struct mlx5e_tc_flow *flow;
2016 struct mlx5e_tc_table *tc = &priv->fs.tc;
2018 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2023 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
2025 mlx5e_tc_del_flow(priv, flow);
2032 int mlx5e_stats_flower(struct mlx5e_priv *priv,
2033 struct tc_cls_flower_offload *f)
2035 struct mlx5e_tc_table *tc = &priv->fs.tc;
2036 struct mlx5e_tc_flow *flow;
2037 struct mlx5_fc *counter;
2042 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2047 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
2050 counter = mlx5_flow_rule_counter(flow->rule);
2054 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
2056 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
2061 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
2062 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2063 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2064 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2065 .automatic_shrinking = true,
2068 int mlx5e_tc_init(struct mlx5e_priv *priv)
2070 struct mlx5e_tc_table *tc = &priv->fs.tc;
2072 hash_init(tc->mod_hdr_tbl);
2074 tc->ht_params = mlx5e_tc_flow_ht_params;
2075 return rhashtable_init(&tc->ht, &tc->ht_params);
2078 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
2080 struct mlx5e_tc_flow *flow = ptr;
2081 struct mlx5e_priv *priv = arg;
2083 mlx5e_tc_del_flow(priv, flow);
2087 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
2089 struct mlx5e_tc_table *tc = &priv->fs.tc;
2091 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
2093 if (!IS_ERR_OR_NULL(tc->t)) {
2094 mlx5_destroy_flow_table(tc->t);