2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
55 struct mlx5_nic_flow_attr {
62 MLX5E_TC_FLOW_ESWITCH = BIT(0),
63 MLX5E_TC_FLOW_NIC = BIT(1),
64 MLX5E_TC_FLOW_OFFLOADED = BIT(2),
67 struct mlx5e_tc_flow {
68 struct rhash_head node;
71 struct mlx5_flow_handle *rule;
72 struct list_head encap; /* flows sharing the same encap ID */
73 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
75 struct mlx5_esw_flow_attr esw_attr[0];
76 struct mlx5_nic_flow_attr nic_attr[0];
80 struct mlx5e_tc_flow_parse_attr {
81 struct ip_tunnel_info tun_info;
82 struct mlx5_flow_spec spec;
83 int num_mod_hdr_actions;
84 void *mod_hdr_actions;
89 MLX5_HEADER_TYPE_VXLAN = 0x0,
90 MLX5_HEADER_TYPE_NVGRE = 0x1,
93 #define MLX5E_TC_TABLE_NUM_GROUPS 4
94 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
101 struct mlx5e_mod_hdr_entry {
102 /* a node of a hash table which keeps all the mod_hdr entries */
103 struct hlist_node mod_hdr_hlist;
105 /* flows sharing the same mod_hdr entry */
106 struct list_head flows;
108 struct mod_hdr_key key;
113 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
115 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
117 return jhash(key->actions,
118 key->num_actions * MLX5_MH_ACT_SZ, 0);
121 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
122 struct mod_hdr_key *b)
124 if (a->num_actions != b->num_actions)
127 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
130 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
131 struct mlx5e_tc_flow *flow,
132 struct mlx5e_tc_flow_parse_attr *parse_attr)
134 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
135 int num_actions, actions_size, namespace, err;
136 struct mlx5e_mod_hdr_entry *mh;
137 struct mod_hdr_key key;
141 num_actions = parse_attr->num_mod_hdr_actions;
142 actions_size = MLX5_MH_ACT_SZ * num_actions;
144 key.actions = parse_attr->mod_hdr_actions;
145 key.num_actions = num_actions;
147 hash_key = hash_mod_hdr_info(&key);
149 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
150 namespace = MLX5_FLOW_NAMESPACE_FDB;
151 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
152 mod_hdr_hlist, hash_key) {
153 if (!cmp_mod_hdr_info(&mh->key, &key)) {
159 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
160 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
161 mod_hdr_hlist, hash_key) {
162 if (!cmp_mod_hdr_info(&mh->key, &key)) {
172 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
176 mh->key.actions = (void *)mh + sizeof(*mh);
177 memcpy(mh->key.actions, key.actions, actions_size);
178 mh->key.num_actions = num_actions;
179 INIT_LIST_HEAD(&mh->flows);
181 err = mlx5_modify_header_alloc(priv->mdev, namespace,
188 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
189 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
191 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
194 list_add(&flow->mod_hdr, &mh->flows);
195 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
196 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
198 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
207 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
208 struct mlx5e_tc_flow *flow)
210 struct list_head *next = flow->mod_hdr.next;
212 list_del(&flow->mod_hdr);
214 if (list_empty(next)) {
215 struct mlx5e_mod_hdr_entry *mh;
217 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
219 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
220 hash_del(&mh->mod_hdr_hlist);
225 static struct mlx5_flow_handle *
226 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
227 struct mlx5e_tc_flow_parse_attr *parse_attr,
228 struct mlx5e_tc_flow *flow)
230 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
231 struct mlx5_core_dev *dev = priv->mdev;
232 struct mlx5_flow_destination dest = {};
233 struct mlx5_flow_act flow_act = {
234 .action = attr->action,
235 .flow_tag = attr->flow_tag,
238 struct mlx5_fc *counter = NULL;
239 struct mlx5_flow_handle *rule;
240 bool table_created = false;
243 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
244 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
245 dest.ft = priv->fs.vlan.ft.t;
246 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
247 counter = mlx5_fc_create(dev, true);
249 return ERR_CAST(counter);
251 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
252 dest.counter = counter;
255 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
256 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
257 flow_act.modify_id = attr->mod_hdr_id;
258 kfree(parse_attr->mod_hdr_actions);
261 goto err_create_mod_hdr_id;
265 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
266 int tc_grp_size, tc_tbl_size;
267 u32 max_flow_counter;
269 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
270 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
272 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
274 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
275 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
278 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
281 MLX5E_TC_TABLE_NUM_GROUPS,
283 if (IS_ERR(priv->fs.tc.t)) {
284 netdev_err(priv->netdev,
285 "Failed to create tc offload table\n");
286 rule = ERR_CAST(priv->fs.tc.t);
290 table_created = true;
293 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
294 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
295 &flow_act, &dest, 1);
304 mlx5_destroy_flow_table(priv->fs.tc.t);
305 priv->fs.tc.t = NULL;
308 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
309 mlx5e_detach_mod_hdr(priv, flow);
310 err_create_mod_hdr_id:
311 mlx5_fc_destroy(dev, counter);
316 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
317 struct mlx5e_tc_flow *flow)
319 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
320 struct mlx5_fc *counter = NULL;
322 counter = mlx5_flow_rule_counter(flow->rule);
323 mlx5_del_flow_rules(flow->rule);
324 mlx5_fc_destroy(priv->mdev, counter);
326 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
327 mlx5_destroy_flow_table(priv->fs.tc.t);
328 priv->fs.tc.t = NULL;
331 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
332 mlx5e_detach_mod_hdr(priv, flow);
335 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
336 struct mlx5e_tc_flow *flow);
338 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
339 struct ip_tunnel_info *tun_info,
340 struct net_device *mirred_dev,
341 struct net_device **encap_dev,
342 struct mlx5e_tc_flow *flow);
344 static struct mlx5_flow_handle *
345 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
346 struct mlx5e_tc_flow_parse_attr *parse_attr,
347 struct mlx5e_tc_flow *flow)
349 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
350 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
351 struct net_device *out_dev, *encap_dev = NULL;
352 struct mlx5_flow_handle *rule = NULL;
353 struct mlx5e_rep_priv *rpriv;
354 struct mlx5e_priv *out_priv;
357 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
358 out_dev = __dev_get_by_index(dev_net(priv->netdev),
359 attr->parse_attr->mirred_ifindex);
360 err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
361 out_dev, &encap_dev, flow);
365 goto err_attach_encap;
367 out_priv = netdev_priv(encap_dev);
368 rpriv = out_priv->ppriv;
369 attr->out_rep = rpriv->rep;
372 err = mlx5_eswitch_add_vlan_action(esw, attr);
378 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
379 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
380 kfree(parse_attr->mod_hdr_actions);
387 /* we get here if (1) there's no error (rule being null) or when
388 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
390 if (rule != ERR_PTR(-EAGAIN)) {
391 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
398 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
399 mlx5e_detach_mod_hdr(priv, flow);
401 mlx5_eswitch_del_vlan_action(esw, attr);
403 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
404 mlx5e_detach_encap(priv, flow);
409 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
410 struct mlx5e_tc_flow *flow)
412 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
413 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
415 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
416 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
417 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
420 mlx5_eswitch_del_vlan_action(esw, attr);
422 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
423 mlx5e_detach_encap(priv, flow);
424 kvfree(attr->parse_attr);
427 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
428 mlx5e_detach_mod_hdr(priv, flow);
431 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
432 struct mlx5e_encap_entry *e)
434 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
435 struct mlx5_esw_flow_attr *esw_attr;
436 struct mlx5e_tc_flow *flow;
439 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
440 e->encap_size, e->encap_header,
443 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
447 e->flags |= MLX5_ENCAP_ENTRY_VALID;
448 mlx5e_rep_queue_neigh_stats_work(priv);
450 list_for_each_entry(flow, &e->flows, encap) {
451 esw_attr = flow->esw_attr;
452 esw_attr->encap_id = e->encap_id;
453 flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
454 if (IS_ERR(flow->rule)) {
455 err = PTR_ERR(flow->rule);
456 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
460 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
464 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
465 struct mlx5e_encap_entry *e)
467 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
468 struct mlx5e_tc_flow *flow;
470 list_for_each_entry(flow, &e->flows, encap) {
471 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
472 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
473 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
477 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
478 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
479 mlx5_encap_dealloc(priv->mdev, e->encap_id);
483 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
485 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
486 u64 bytes, packets, lastuse = 0;
487 struct mlx5e_tc_flow *flow;
488 struct mlx5e_encap_entry *e;
489 struct mlx5_fc *counter;
490 struct neigh_table *tbl;
491 bool neigh_used = false;
494 if (m_neigh->family == AF_INET)
496 #if IS_ENABLED(CONFIG_IPV6)
497 else if (m_neigh->family == AF_INET6)
498 tbl = ipv6_stub->nd_tbl;
503 list_for_each_entry(e, &nhe->encap_list, encap_list) {
504 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
506 list_for_each_entry(flow, &e->flows, encap) {
507 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
508 counter = mlx5_flow_rule_counter(flow->rule);
509 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
510 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
519 nhe->reported_lastuse = jiffies;
521 /* find the relevant neigh according to the cached device and
524 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
526 WARN(1, "The neighbour already freed\n");
530 neigh_event_send(n, NULL);
535 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
536 struct mlx5e_tc_flow *flow)
538 struct list_head *next = flow->encap.next;
540 list_del(&flow->encap);
541 if (list_empty(next)) {
542 struct mlx5e_encap_entry *e;
544 e = list_entry(next, struct mlx5e_encap_entry, flows);
545 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
547 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
548 mlx5_encap_dealloc(priv->mdev, e->encap_id);
550 hash_del_rcu(&e->encap_hlist);
551 kfree(e->encap_header);
556 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
557 struct mlx5e_tc_flow *flow)
559 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
560 mlx5e_tc_del_fdb_flow(priv, flow);
562 mlx5e_tc_del_nic_flow(priv, flow);
565 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
566 struct tc_cls_flower_offload *f)
568 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
570 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
572 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
574 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
577 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
578 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
580 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
581 struct flow_dissector_key_keyid *key =
582 skb_flow_dissector_target(f->dissector,
583 FLOW_DISSECTOR_KEY_ENC_KEYID,
585 struct flow_dissector_key_keyid *mask =
586 skb_flow_dissector_target(f->dissector,
587 FLOW_DISSECTOR_KEY_ENC_KEYID,
589 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
590 be32_to_cpu(mask->keyid));
591 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
592 be32_to_cpu(key->keyid));
596 static int parse_tunnel_attr(struct mlx5e_priv *priv,
597 struct mlx5_flow_spec *spec,
598 struct tc_cls_flower_offload *f)
600 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
602 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
605 struct flow_dissector_key_control *enc_control =
606 skb_flow_dissector_target(f->dissector,
607 FLOW_DISSECTOR_KEY_ENC_CONTROL,
610 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
611 struct flow_dissector_key_ports *key =
612 skb_flow_dissector_target(f->dissector,
613 FLOW_DISSECTOR_KEY_ENC_PORTS,
615 struct flow_dissector_key_ports *mask =
616 skb_flow_dissector_target(f->dissector,
617 FLOW_DISSECTOR_KEY_ENC_PORTS,
619 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
620 struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
621 struct net_device *up_dev = uplink_rpriv->netdev;
622 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
624 /* Full udp dst port must be given */
625 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
626 goto vxlan_match_offload_err;
628 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
629 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
630 parse_vxlan_attr(spec, f);
632 netdev_warn(priv->netdev,
633 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
637 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
638 udp_dport, ntohs(mask->dst));
639 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
640 udp_dport, ntohs(key->dst));
642 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
643 udp_sport, ntohs(mask->src));
644 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
645 udp_sport, ntohs(key->src));
646 } else { /* udp dst port must be given */
647 vxlan_match_offload_err:
648 netdev_warn(priv->netdev,
649 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
653 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
654 struct flow_dissector_key_ipv4_addrs *key =
655 skb_flow_dissector_target(f->dissector,
656 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
658 struct flow_dissector_key_ipv4_addrs *mask =
659 skb_flow_dissector_target(f->dissector,
660 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
662 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
663 src_ipv4_src_ipv6.ipv4_layout.ipv4,
665 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
666 src_ipv4_src_ipv6.ipv4_layout.ipv4,
669 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
670 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
672 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
673 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
676 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
677 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
678 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
679 struct flow_dissector_key_ipv6_addrs *key =
680 skb_flow_dissector_target(f->dissector,
681 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
683 struct flow_dissector_key_ipv6_addrs *mask =
684 skb_flow_dissector_target(f->dissector,
685 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
688 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
689 src_ipv4_src_ipv6.ipv6_layout.ipv6),
690 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
691 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
692 src_ipv4_src_ipv6.ipv6_layout.ipv6),
693 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
695 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
696 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
697 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
698 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
699 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
700 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
702 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
703 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
706 /* Enforce DMAC when offloading incoming tunneled flows.
707 * Flow counters require a match on the DMAC.
709 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
710 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
711 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
712 dmac_47_16), priv->netdev->dev_addr);
714 /* let software handle IP fragments */
715 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
716 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
721 static int __parse_cls_flower(struct mlx5e_priv *priv,
722 struct mlx5_flow_spec *spec,
723 struct tc_cls_flower_offload *f,
726 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
728 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
733 *min_inline = MLX5_INLINE_MODE_L2;
735 if (f->dissector->used_keys &
736 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
737 BIT(FLOW_DISSECTOR_KEY_BASIC) |
738 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
739 BIT(FLOW_DISSECTOR_KEY_VLAN) |
740 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
741 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
742 BIT(FLOW_DISSECTOR_KEY_PORTS) |
743 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
744 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
745 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
746 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
747 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
748 BIT(FLOW_DISSECTOR_KEY_TCP) |
749 BIT(FLOW_DISSECTOR_KEY_IP))) {
750 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
751 f->dissector->used_keys);
755 if ((dissector_uses_key(f->dissector,
756 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
757 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
758 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
759 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
760 struct flow_dissector_key_control *key =
761 skb_flow_dissector_target(f->dissector,
762 FLOW_DISSECTOR_KEY_ENC_CONTROL,
764 switch (key->addr_type) {
765 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
766 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
767 if (parse_tunnel_attr(priv, spec, f))
774 /* In decap flow, header pointers should point to the inner
775 * headers, outer header were already set by parse_tunnel_attr
777 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
779 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
783 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
784 struct flow_dissector_key_control *key =
785 skb_flow_dissector_target(f->dissector,
786 FLOW_DISSECTOR_KEY_CONTROL,
789 struct flow_dissector_key_control *mask =
790 skb_flow_dissector_target(f->dissector,
791 FLOW_DISSECTOR_KEY_CONTROL,
793 addr_type = key->addr_type;
795 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
796 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
797 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
798 key->flags & FLOW_DIS_IS_FRAGMENT);
800 /* the HW doesn't need L3 inline to match on frag=no */
801 if (key->flags & FLOW_DIS_IS_FRAGMENT)
802 *min_inline = MLX5_INLINE_MODE_IP;
806 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
807 struct flow_dissector_key_basic *key =
808 skb_flow_dissector_target(f->dissector,
809 FLOW_DISSECTOR_KEY_BASIC,
811 struct flow_dissector_key_basic *mask =
812 skb_flow_dissector_target(f->dissector,
813 FLOW_DISSECTOR_KEY_BASIC,
815 ip_proto = key->ip_proto;
817 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
818 ntohs(mask->n_proto));
819 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
820 ntohs(key->n_proto));
822 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
824 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
828 *min_inline = MLX5_INLINE_MODE_IP;
831 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
832 struct flow_dissector_key_eth_addrs *key =
833 skb_flow_dissector_target(f->dissector,
834 FLOW_DISSECTOR_KEY_ETH_ADDRS,
836 struct flow_dissector_key_eth_addrs *mask =
837 skb_flow_dissector_target(f->dissector,
838 FLOW_DISSECTOR_KEY_ETH_ADDRS,
841 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
844 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
848 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
851 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
856 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
857 struct flow_dissector_key_vlan *key =
858 skb_flow_dissector_target(f->dissector,
859 FLOW_DISSECTOR_KEY_VLAN,
861 struct flow_dissector_key_vlan *mask =
862 skb_flow_dissector_target(f->dissector,
863 FLOW_DISSECTOR_KEY_VLAN,
865 if (mask->vlan_id || mask->vlan_priority) {
866 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
867 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
869 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
870 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
872 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
873 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
877 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
878 struct flow_dissector_key_ipv4_addrs *key =
879 skb_flow_dissector_target(f->dissector,
880 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
882 struct flow_dissector_key_ipv4_addrs *mask =
883 skb_flow_dissector_target(f->dissector,
884 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
887 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
888 src_ipv4_src_ipv6.ipv4_layout.ipv4),
889 &mask->src, sizeof(mask->src));
890 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
891 src_ipv4_src_ipv6.ipv4_layout.ipv4),
892 &key->src, sizeof(key->src));
893 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
894 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
895 &mask->dst, sizeof(mask->dst));
896 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
897 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
898 &key->dst, sizeof(key->dst));
900 if (mask->src || mask->dst)
901 *min_inline = MLX5_INLINE_MODE_IP;
904 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
905 struct flow_dissector_key_ipv6_addrs *key =
906 skb_flow_dissector_target(f->dissector,
907 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
909 struct flow_dissector_key_ipv6_addrs *mask =
910 skb_flow_dissector_target(f->dissector,
911 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
914 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
915 src_ipv4_src_ipv6.ipv6_layout.ipv6),
916 &mask->src, sizeof(mask->src));
917 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
918 src_ipv4_src_ipv6.ipv6_layout.ipv6),
919 &key->src, sizeof(key->src));
921 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
922 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
923 &mask->dst, sizeof(mask->dst));
924 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
925 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
926 &key->dst, sizeof(key->dst));
928 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
929 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
930 *min_inline = MLX5_INLINE_MODE_IP;
933 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
934 struct flow_dissector_key_ip *key =
935 skb_flow_dissector_target(f->dissector,
936 FLOW_DISSECTOR_KEY_IP,
938 struct flow_dissector_key_ip *mask =
939 skb_flow_dissector_target(f->dissector,
940 FLOW_DISSECTOR_KEY_IP,
943 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
944 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
946 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
947 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
949 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
950 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
953 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
954 ft_field_support.outer_ipv4_ttl))
957 if (mask->tos || mask->ttl)
958 *min_inline = MLX5_INLINE_MODE_IP;
961 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
962 struct flow_dissector_key_ports *key =
963 skb_flow_dissector_target(f->dissector,
964 FLOW_DISSECTOR_KEY_PORTS,
966 struct flow_dissector_key_ports *mask =
967 skb_flow_dissector_target(f->dissector,
968 FLOW_DISSECTOR_KEY_PORTS,
972 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
973 tcp_sport, ntohs(mask->src));
974 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
975 tcp_sport, ntohs(key->src));
977 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
978 tcp_dport, ntohs(mask->dst));
979 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
980 tcp_dport, ntohs(key->dst));
984 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
985 udp_sport, ntohs(mask->src));
986 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
987 udp_sport, ntohs(key->src));
989 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
990 udp_dport, ntohs(mask->dst));
991 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
992 udp_dport, ntohs(key->dst));
995 netdev_err(priv->netdev,
996 "Only UDP and TCP transport are supported\n");
1000 if (mask->src || mask->dst)
1001 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
1004 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1005 struct flow_dissector_key_tcp *key =
1006 skb_flow_dissector_target(f->dissector,
1007 FLOW_DISSECTOR_KEY_TCP,
1009 struct flow_dissector_key_tcp *mask =
1010 skb_flow_dissector_target(f->dissector,
1011 FLOW_DISSECTOR_KEY_TCP,
1014 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1015 ntohs(mask->flags));
1016 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1020 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
1026 static int parse_cls_flower(struct mlx5e_priv *priv,
1027 struct mlx5e_tc_flow *flow,
1028 struct mlx5_flow_spec *spec,
1029 struct tc_cls_flower_offload *f)
1031 struct mlx5_core_dev *dev = priv->mdev;
1032 struct mlx5_eswitch *esw = dev->priv.eswitch;
1033 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1034 struct mlx5_eswitch_rep *rep;
1038 err = __parse_cls_flower(priv, spec, f, &min_inline);
1040 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1042 if (rep->vport != FDB_UPLINK_VPORT &&
1043 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1044 esw->offloads.inline_mode < min_inline)) {
1045 netdev_warn(priv->netdev,
1046 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1047 min_inline, esw->offloads.inline_mode);
1055 struct pedit_headers {
1063 static int pedit_header_offsets[] = {
1064 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1065 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1066 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1067 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1068 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1071 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1073 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1074 struct pedit_headers *masks,
1075 struct pedit_headers *vals)
1077 u32 *curr_pmask, *curr_pval;
1079 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1082 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1083 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
1085 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1088 *curr_pmask |= mask;
1089 *curr_pval |= (val & mask);
1097 struct mlx5_fields {
1103 #define OFFLOAD(fw_field, size, field, off) \
1104 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1106 static struct mlx5_fields fields[] = {
1107 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1108 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1109 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1110 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1111 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1112 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1114 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1115 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1116 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1118 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1119 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1120 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1121 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1122 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1123 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1124 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1125 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
1126 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1128 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1129 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1130 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1132 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1133 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1136 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1137 * max from the SW pedit action. On success, it says how many HW actions were
1140 static int offload_pedit_fields(struct pedit_headers *masks,
1141 struct pedit_headers *vals,
1142 struct mlx5e_tc_flow_parse_attr *parse_attr)
1144 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1145 int i, action_size, nactions, max_actions, first, last, next_z;
1146 void *s_masks_p, *a_masks_p, *vals_p;
1147 struct mlx5_fields *f;
1148 u8 cmd, field_bsize;
1155 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1156 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1157 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1158 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1160 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1161 action = parse_attr->mod_hdr_actions;
1162 max_actions = parse_attr->num_mod_hdr_actions;
1165 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1167 /* avoid seeing bits set from previous iterations */
1171 s_masks_p = (void *)set_masks + f->offset;
1172 a_masks_p = (void *)add_masks + f->offset;
1174 memcpy(&s_mask, s_masks_p, f->size);
1175 memcpy(&a_mask, a_masks_p, f->size);
1177 if (!s_mask && !a_mask) /* nothing to offload here */
1180 if (s_mask && a_mask) {
1181 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1185 if (nactions == max_actions) {
1186 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1191 cmd = MLX5_ACTION_TYPE_SET;
1193 vals_p = (void *)set_vals + f->offset;
1194 /* clear to denote we consumed this field */
1195 memset(s_masks_p, 0, f->size);
1197 cmd = MLX5_ACTION_TYPE_ADD;
1199 vals_p = (void *)add_vals + f->offset;
1200 /* clear to denote we consumed this field */
1201 memset(a_masks_p, 0, f->size);
1204 field_bsize = f->size * BITS_PER_BYTE;
1206 if (field_bsize == 32) {
1207 mask_be32 = *(__be32 *)&mask;
1208 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1209 } else if (field_bsize == 16) {
1210 mask_be16 = *(__be16 *)&mask;
1211 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1214 first = find_first_bit(&mask, field_bsize);
1215 next_z = find_next_zero_bit(&mask, field_bsize, first);
1216 last = find_last_bit(&mask, field_bsize);
1217 if (first < next_z && next_z < last) {
1218 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
1223 MLX5_SET(set_action_in, action, action_type, cmd);
1224 MLX5_SET(set_action_in, action, field, f->field);
1226 if (cmd == MLX5_ACTION_TYPE_SET) {
1227 MLX5_SET(set_action_in, action, offset, first);
1228 /* length is num of bits to be written, zero means length of 32 */
1229 MLX5_SET(set_action_in, action, length, (last - first + 1));
1232 if (field_bsize == 32)
1233 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
1234 else if (field_bsize == 16)
1235 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
1236 else if (field_bsize == 8)
1237 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
1239 action += action_size;
1243 parse_attr->num_mod_hdr_actions = nactions;
1247 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1248 const struct tc_action *a, int namespace,
1249 struct mlx5e_tc_flow_parse_attr *parse_attr)
1251 int nkeys, action_size, max_actions;
1253 nkeys = tcf_pedit_nkeys(a);
1254 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1256 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1257 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1258 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1259 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1261 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1262 max_actions = min(max_actions, nkeys * 16);
1264 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1265 if (!parse_attr->mod_hdr_actions)
1268 parse_attr->num_mod_hdr_actions = max_actions;
1272 static const struct pedit_headers zero_masks = {};
1274 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1275 const struct tc_action *a, int namespace,
1276 struct mlx5e_tc_flow_parse_attr *parse_attr)
1278 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1279 int nkeys, i, err = -EOPNOTSUPP;
1280 u32 mask, val, offset;
1283 nkeys = tcf_pedit_nkeys(a);
1285 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1286 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1288 for (i = 0; i < nkeys; i++) {
1289 htype = tcf_pedit_htype(a, i);
1290 cmd = tcf_pedit_cmd(a, i);
1291 err = -EOPNOTSUPP; /* can't be all optimistic */
1293 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1294 printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
1298 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1299 printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
1303 mask = tcf_pedit_mask(a, i);
1304 val = tcf_pedit_val(a, i);
1305 offset = tcf_pedit_offset(a, i);
1307 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1312 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1316 err = offload_pedit_fields(masks, vals, parse_attr);
1318 goto out_dealloc_parsed_actions;
1320 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1321 cmd_masks = &masks[cmd];
1322 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1323 printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
1325 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1326 16, 1, cmd_masks, sizeof(zero_masks), true);
1328 goto out_dealloc_parsed_actions;
1334 out_dealloc_parsed_actions:
1335 kfree(parse_attr->mod_hdr_actions);
1340 static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1342 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1343 TCA_CSUM_UPDATE_FLAG_UDP;
1345 /* The HW recalcs checksums only if re-writing headers */
1346 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1347 netdev_warn(priv->netdev,
1348 "TC csum action is only offloaded with pedit\n");
1352 if (update_flags & ~prot_flags) {
1353 netdev_warn(priv->netdev,
1354 "can't offload TC csum action for some header/s - flags %#x\n",
1362 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1363 struct tcf_exts *exts)
1365 const struct tc_action *a;
1366 bool modify_ip_header;
1373 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1374 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1376 /* for non-IP we only re-write MACs, so we're okay */
1377 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
1380 modify_ip_header = false;
1381 tcf_exts_to_list(exts, &actions);
1382 list_for_each_entry(a, &actions, list) {
1383 if (!is_tcf_pedit(a))
1386 nkeys = tcf_pedit_nkeys(a);
1387 for (i = 0; i < nkeys; i++) {
1388 htype = tcf_pedit_htype(a, i);
1389 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1390 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1391 modify_ip_header = true;
1397 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1398 if (modify_ip_header && ip_proto != IPPROTO_TCP && ip_proto != IPPROTO_UDP) {
1399 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
1407 static bool actions_match_supported(struct mlx5e_priv *priv,
1408 struct tcf_exts *exts,
1409 struct mlx5e_tc_flow_parse_attr *parse_attr,
1410 struct mlx5e_tc_flow *flow)
1414 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1415 actions = flow->esw_attr->action;
1417 actions = flow->nic_attr->action;
1419 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1420 return modify_header_match_supported(&parse_attr->spec, exts);
1425 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1426 struct mlx5e_tc_flow_parse_attr *parse_attr,
1427 struct mlx5e_tc_flow *flow)
1429 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1430 const struct tc_action *a;
1434 if (!tcf_exts_has_actions(exts))
1437 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1440 tcf_exts_to_list(exts, &actions);
1441 list_for_each_entry(a, &actions, list) {
1442 if (is_tcf_gact_shot(a)) {
1443 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1444 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1445 flow_table_properties_nic_receive.flow_counter))
1446 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1450 if (is_tcf_pedit(a)) {
1451 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1456 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1457 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1461 if (is_tcf_csum(a)) {
1462 if (csum_offload_supported(priv, attr->action,
1463 tcf_csum_update_flags(a)))
1469 if (is_tcf_skbedit_mark(a)) {
1470 u32 mark = tcf_skbedit_mark(a);
1472 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1473 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1478 attr->flow_tag = mark;
1479 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1486 if (!actions_match_supported(priv, exts, parse_attr, flow))
1492 static inline int cmp_encap_info(struct ip_tunnel_key *a,
1493 struct ip_tunnel_key *b)
1495 return memcmp(a, b, sizeof(*a));
1498 static inline int hash_encap_info(struct ip_tunnel_key *key)
1500 return jhash(key, sizeof(*key), 0);
1503 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1504 struct net_device *mirred_dev,
1505 struct net_device **out_dev,
1507 struct neighbour **out_n,
1510 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1511 struct mlx5e_rep_priv *uplink_rpriv;
1513 struct neighbour *n = NULL;
1515 #if IS_ENABLED(CONFIG_INET)
1518 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
1519 ret = PTR_ERR_OR_ZERO(rt);
1525 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1526 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1527 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1528 *out_dev = uplink_rpriv->netdev;
1530 *out_dev = rt->dst.dev;
1532 *out_ttl = ip4_dst_hoplimit(&rt->dst);
1533 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1542 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1543 struct net_device *mirred_dev,
1544 struct net_device **out_dev,
1546 struct neighbour **out_n,
1549 struct neighbour *n = NULL;
1550 struct dst_entry *dst;
1552 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1553 struct mlx5e_rep_priv *uplink_rpriv;
1554 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1557 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
1562 *out_ttl = ip6_dst_hoplimit(dst);
1564 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1565 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1566 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1567 *out_dev = uplink_rpriv->netdev;
1569 *out_dev = dst->dev;
1574 n = dst_neigh_lookup(dst, &fl6->daddr);
1583 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1584 char buf[], int encap_size,
1585 unsigned char h_dest[ETH_ALEN],
1589 __be16 udp_dst_port,
1592 struct ethhdr *eth = (struct ethhdr *)buf;
1593 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1594 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1595 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1597 memset(buf, 0, encap_size);
1599 ether_addr_copy(eth->h_dest, h_dest);
1600 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1601 eth->h_proto = htons(ETH_P_IP);
1607 ip->protocol = IPPROTO_UDP;
1611 udp->dest = udp_dst_port;
1612 vxh->vx_flags = VXLAN_HF_VNI;
1613 vxh->vx_vni = vxlan_vni_field(vx_vni);
1616 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1617 char buf[], int encap_size,
1618 unsigned char h_dest[ETH_ALEN],
1620 struct in6_addr *daddr,
1621 struct in6_addr *saddr,
1622 __be16 udp_dst_port,
1625 struct ethhdr *eth = (struct ethhdr *)buf;
1626 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1627 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1628 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1630 memset(buf, 0, encap_size);
1632 ether_addr_copy(eth->h_dest, h_dest);
1633 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1634 eth->h_proto = htons(ETH_P_IPV6);
1636 ip6_flow_hdr(ip6h, 0, 0);
1637 /* the HW fills up ipv6 payload len */
1638 ip6h->nexthdr = IPPROTO_UDP;
1639 ip6h->hop_limit = ttl;
1640 ip6h->daddr = *daddr;
1641 ip6h->saddr = *saddr;
1643 udp->dest = udp_dst_port;
1644 vxh->vx_flags = VXLAN_HF_VNI;
1645 vxh->vx_vni = vxlan_vni_field(vx_vni);
1648 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1649 struct net_device *mirred_dev,
1650 struct mlx5e_encap_entry *e)
1652 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1653 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
1654 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1655 struct net_device *out_dev;
1656 struct neighbour *n = NULL;
1657 struct flowi4 fl4 = {};
1662 if (max_encap_size < ipv4_encap_size) {
1663 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1664 ipv4_encap_size, max_encap_size);
1668 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
1672 switch (e->tunnel_type) {
1673 case MLX5_HEADER_TYPE_VXLAN:
1674 fl4.flowi4_proto = IPPROTO_UDP;
1675 fl4.fl4_dport = tun_key->tp_dst;
1681 fl4.flowi4_tos = tun_key->tos;
1682 fl4.daddr = tun_key->u.ipv4.dst;
1683 fl4.saddr = tun_key->u.ipv4.src;
1685 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
1690 /* used by mlx5e_detach_encap to lookup a neigh hash table
1691 * entry in the neigh hash table when a user deletes a rule
1693 e->m_neigh.dev = n->dev;
1694 e->m_neigh.family = n->ops->family;
1695 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1696 e->out_dev = out_dev;
1698 /* It's importent to add the neigh to the hash table before checking
1699 * the neigh validity state. So if we'll get a notification, in case the
1700 * neigh changes it's validity state, we would find the relevant neigh
1703 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1707 read_lock_bh(&n->lock);
1708 nud_state = n->nud_state;
1709 ether_addr_copy(e->h_dest, n->ha);
1710 read_unlock_bh(&n->lock);
1712 switch (e->tunnel_type) {
1713 case MLX5_HEADER_TYPE_VXLAN:
1714 gen_vxlan_header_ipv4(out_dev, encap_header,
1715 ipv4_encap_size, e->h_dest, ttl,
1717 fl4.saddr, tun_key->tp_dst,
1718 tunnel_id_to_key32(tun_key->tun_id));
1722 goto destroy_neigh_entry;
1724 e->encap_size = ipv4_encap_size;
1725 e->encap_header = encap_header;
1727 if (!(nud_state & NUD_VALID)) {
1728 neigh_event_send(n, NULL);
1733 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1734 ipv4_encap_size, encap_header, &e->encap_id);
1736 goto destroy_neigh_entry;
1738 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1739 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1743 destroy_neigh_entry:
1744 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1746 kfree(encap_header);
1753 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1754 struct net_device *mirred_dev,
1755 struct mlx5e_encap_entry *e)
1757 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1758 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
1759 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1760 struct net_device *out_dev;
1761 struct neighbour *n = NULL;
1762 struct flowi6 fl6 = {};
1767 if (max_encap_size < ipv6_encap_size) {
1768 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1769 ipv6_encap_size, max_encap_size);
1773 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
1777 switch (e->tunnel_type) {
1778 case MLX5_HEADER_TYPE_VXLAN:
1779 fl6.flowi6_proto = IPPROTO_UDP;
1780 fl6.fl6_dport = tun_key->tp_dst;
1787 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1788 fl6.daddr = tun_key->u.ipv6.dst;
1789 fl6.saddr = tun_key->u.ipv6.src;
1791 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
1796 /* used by mlx5e_detach_encap to lookup a neigh hash table
1797 * entry in the neigh hash table when a user deletes a rule
1799 e->m_neigh.dev = n->dev;
1800 e->m_neigh.family = n->ops->family;
1801 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
1802 e->out_dev = out_dev;
1804 /* It's importent to add the neigh to the hash table before checking
1805 * the neigh validity state. So if we'll get a notification, in case the
1806 * neigh changes it's validity state, we would find the relevant neigh
1809 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
1813 read_lock_bh(&n->lock);
1814 nud_state = n->nud_state;
1815 ether_addr_copy(e->h_dest, n->ha);
1816 read_unlock_bh(&n->lock);
1818 switch (e->tunnel_type) {
1819 case MLX5_HEADER_TYPE_VXLAN:
1820 gen_vxlan_header_ipv6(out_dev, encap_header,
1821 ipv6_encap_size, e->h_dest, ttl,
1823 &fl6.saddr, tun_key->tp_dst,
1824 tunnel_id_to_key32(tun_key->tun_id));
1828 goto destroy_neigh_entry;
1831 e->encap_size = ipv6_encap_size;
1832 e->encap_header = encap_header;
1834 if (!(nud_state & NUD_VALID)) {
1835 neigh_event_send(n, NULL);
1840 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1841 ipv6_encap_size, encap_header, &e->encap_id);
1843 goto destroy_neigh_entry;
1845 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1846 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
1850 destroy_neigh_entry:
1851 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1853 kfree(encap_header);
1860 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1861 struct ip_tunnel_info *tun_info,
1862 struct net_device *mirred_dev,
1863 struct net_device **encap_dev,
1864 struct mlx5e_tc_flow *flow)
1866 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1867 struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
1869 struct net_device *up_dev = uplink_rpriv->netdev;
1870 unsigned short family = ip_tunnel_info_af(tun_info);
1871 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1872 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1873 struct ip_tunnel_key *key = &tun_info->key;
1874 struct mlx5e_encap_entry *e;
1875 int tunnel_type, err = 0;
1879 /* udp dst port must be set */
1880 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
1881 goto vxlan_encap_offload_err;
1883 /* setting udp src port isn't supported */
1884 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1885 vxlan_encap_offload_err:
1886 netdev_warn(priv->netdev,
1887 "must set udp dst port and not set udp src port\n");
1891 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
1892 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
1893 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1895 netdev_warn(priv->netdev,
1896 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
1900 hash_key = hash_encap_info(key);
1902 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1903 encap_hlist, hash_key) {
1904 if (!cmp_encap_info(&e->tun_info.key, key)) {
1910 /* must verify if encap is valid or not */
1914 e = kzalloc(sizeof(*e), GFP_KERNEL);
1918 e->tun_info = *tun_info;
1919 e->tunnel_type = tunnel_type;
1920 INIT_LIST_HEAD(&e->flows);
1922 if (family == AF_INET)
1923 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
1924 else if (family == AF_INET6)
1925 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
1927 if (err && err != -EAGAIN)
1930 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1933 list_add(&flow->encap, &e->flows);
1934 *encap_dev = e->out_dev;
1935 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1936 attr->encap_id = e->encap_id;
1947 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1948 struct mlx5e_tc_flow_parse_attr *parse_attr,
1949 struct mlx5e_tc_flow *flow)
1951 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1952 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1953 struct ip_tunnel_info *info = NULL;
1954 const struct tc_action *a;
1959 if (!tcf_exts_has_actions(exts))
1962 memset(attr, 0, sizeof(*attr));
1963 attr->in_rep = rpriv->rep;
1965 tcf_exts_to_list(exts, &actions);
1966 list_for_each_entry(a, &actions, list) {
1967 if (is_tcf_gact_shot(a)) {
1968 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1969 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1973 if (is_tcf_pedit(a)) {
1974 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1979 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1983 if (is_tcf_csum(a)) {
1984 if (csum_offload_supported(priv, attr->action,
1985 tcf_csum_update_flags(a)))
1991 if (is_tcf_mirred_egress_redirect(a)) {
1992 struct net_device *out_dev;
1993 struct mlx5e_priv *out_priv;
1995 out_dev = tcf_mirred_dev(a);
1997 if (switchdev_port_same_parent_id(priv->netdev,
1999 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2000 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2001 out_priv = netdev_priv(out_dev);
2002 rpriv = out_priv->ppriv;
2003 attr->out_rep = rpriv->rep;
2005 parse_attr->mirred_ifindex = out_dev->ifindex;
2006 parse_attr->tun_info = *info;
2007 attr->parse_attr = parse_attr;
2008 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
2009 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2010 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2011 /* attr->out_rep is resolved when we handle encap */
2013 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2014 priv->netdev->name, out_dev->name);
2020 if (is_tcf_tunnel_set(a)) {
2021 info = tcf_tunnel_info(a);
2029 if (is_tcf_vlan(a)) {
2030 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2031 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2032 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2033 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
2036 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2037 attr->vlan = tcf_vlan_push_vid(a);
2038 } else { /* action is TCA_VLAN_ACT_MODIFY */
2044 if (is_tcf_tunnel_release(a)) {
2045 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2052 if (!actions_match_supported(priv, exts, parse_attr, flow))
2058 int mlx5e_configure_flower(struct mlx5e_priv *priv,
2059 struct tc_cls_flower_offload *f)
2061 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2062 struct mlx5e_tc_flow_parse_attr *parse_attr;
2063 struct mlx5e_tc_table *tc = &priv->fs.tc;
2064 struct mlx5e_tc_flow *flow;
2065 int attr_size, err = 0;
2068 if (esw && esw->mode == SRIOV_OFFLOADS) {
2069 flow_flags = MLX5E_TC_FLOW_ESWITCH;
2070 attr_size = sizeof(struct mlx5_esw_flow_attr);
2072 flow_flags = MLX5E_TC_FLOW_NIC;
2073 attr_size = sizeof(struct mlx5_nic_flow_attr);
2076 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2077 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2078 if (!parse_attr || !flow) {
2083 flow->cookie = f->cookie;
2084 flow->flags = flow_flags;
2086 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
2090 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
2091 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
2094 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
2096 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
2099 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
2102 if (IS_ERR(flow->rule)) {
2103 err = PTR_ERR(flow->rule);
2109 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2111 err = rhashtable_insert_fast(&tc->ht, &flow->node,
2116 if (flow->flags & MLX5E_TC_FLOW_ESWITCH &&
2117 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
2122 mlx5e_tc_del_flow(priv, flow);
2130 int mlx5e_delete_flower(struct mlx5e_priv *priv,
2131 struct tc_cls_flower_offload *f)
2133 struct mlx5e_tc_flow *flow;
2134 struct mlx5e_tc_table *tc = &priv->fs.tc;
2136 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2141 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
2143 mlx5e_tc_del_flow(priv, flow);
2150 int mlx5e_stats_flower(struct mlx5e_priv *priv,
2151 struct tc_cls_flower_offload *f)
2153 struct mlx5e_tc_table *tc = &priv->fs.tc;
2154 struct mlx5e_tc_flow *flow;
2155 struct mlx5_fc *counter;
2160 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
2165 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
2168 counter = mlx5_flow_rule_counter(flow->rule);
2172 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
2174 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
2179 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
2180 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2181 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2182 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2183 .automatic_shrinking = true,
2186 int mlx5e_tc_init(struct mlx5e_priv *priv)
2188 struct mlx5e_tc_table *tc = &priv->fs.tc;
2190 hash_init(tc->mod_hdr_tbl);
2192 tc->ht_params = mlx5e_tc_flow_ht_params;
2193 return rhashtable_init(&tc->ht, &tc->ht_params);
2196 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
2198 struct mlx5e_tc_flow *flow = ptr;
2199 struct mlx5e_priv *priv = arg;
2201 mlx5e_tc_del_flow(priv, flow);
2205 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
2207 struct mlx5e_tc_table *tc = &priv->fs.tc;
2209 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
2211 if (!IS_ERR_OR_NULL(tc->t)) {
2212 mlx5_destroy_flow_table(tc->t);