2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/vxlan.h>
53 struct mlx5_nic_flow_attr {
60 MLX5E_TC_FLOW_ESWITCH = BIT(0),
61 MLX5E_TC_FLOW_NIC = BIT(1),
64 struct mlx5e_tc_flow {
65 struct rhash_head node;
68 struct mlx5_flow_handle *rule;
69 struct list_head encap; /* flows sharing the same encap */
71 struct mlx5_esw_flow_attr esw_attr[0];
72 struct mlx5_nic_flow_attr nic_attr[0];
76 struct mlx5e_tc_flow_parse_attr {
77 struct mlx5_flow_spec spec;
78 int num_mod_hdr_actions;
79 void *mod_hdr_actions;
83 MLX5_HEADER_TYPE_VXLAN = 0x0,
84 MLX5_HEADER_TYPE_NVGRE = 0x1,
87 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
88 #define MLX5E_TC_TABLE_NUM_GROUPS 4
90 static struct mlx5_flow_handle *
91 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
92 struct mlx5e_tc_flow_parse_attr *parse_attr,
93 struct mlx5e_tc_flow *flow)
95 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
96 struct mlx5_core_dev *dev = priv->mdev;
97 struct mlx5_flow_destination dest = {};
98 struct mlx5_flow_act flow_act = {
99 .action = attr->action,
100 .flow_tag = attr->flow_tag,
103 struct mlx5_fc *counter = NULL;
104 struct mlx5_flow_handle *rule;
105 bool table_created = false;
108 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
109 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
110 dest.ft = priv->fs.vlan.ft.t;
111 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
112 counter = mlx5_fc_create(dev, true);
114 return ERR_CAST(counter);
116 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
117 dest.counter = counter;
120 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
121 err = mlx5_modify_header_alloc(dev, MLX5_FLOW_NAMESPACE_KERNEL,
122 parse_attr->num_mod_hdr_actions,
123 parse_attr->mod_hdr_actions,
125 flow_act.modify_id = attr->mod_hdr_id;
126 kfree(parse_attr->mod_hdr_actions);
129 goto err_create_mod_hdr_id;
133 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
135 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
137 MLX5E_TC_TABLE_NUM_ENTRIES,
138 MLX5E_TC_TABLE_NUM_GROUPS,
140 if (IS_ERR(priv->fs.tc.t)) {
141 netdev_err(priv->netdev,
142 "Failed to create tc offload table\n");
143 rule = ERR_CAST(priv->fs.tc.t);
147 table_created = true;
150 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
151 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
152 &flow_act, &dest, 1);
161 mlx5_destroy_flow_table(priv->fs.tc.t);
162 priv->fs.tc.t = NULL;
165 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
166 mlx5_modify_header_dealloc(priv->mdev,
168 err_create_mod_hdr_id:
169 mlx5_fc_destroy(dev, counter);
174 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
175 struct mlx5e_tc_flow *flow)
177 struct mlx5_fc *counter = NULL;
179 counter = mlx5_flow_rule_counter(flow->rule);
180 mlx5_del_flow_rules(flow->rule);
181 mlx5_fc_destroy(priv->mdev, counter);
183 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
184 mlx5_destroy_flow_table(priv->fs.tc.t);
185 priv->fs.tc.t = NULL;
188 if (flow->nic_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
189 mlx5_modify_header_dealloc(priv->mdev,
190 flow->nic_attr->mod_hdr_id);
193 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
194 struct mlx5e_tc_flow *flow);
196 static struct mlx5_flow_handle *
197 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
198 struct mlx5e_tc_flow_parse_attr *parse_attr,
199 struct mlx5e_tc_flow *flow)
201 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
202 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
203 struct mlx5_flow_handle *rule;
206 err = mlx5_eswitch_add_vlan_action(esw, attr);
212 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
213 err = mlx5_modify_header_alloc(priv->mdev, MLX5_FLOW_NAMESPACE_FDB,
214 parse_attr->num_mod_hdr_actions,
215 parse_attr->mod_hdr_actions,
217 kfree(parse_attr->mod_hdr_actions);
224 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
231 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
232 mlx5_modify_header_dealloc(priv->mdev,
235 mlx5_eswitch_del_vlan_action(esw, attr);
237 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
238 mlx5e_detach_encap(priv, flow);
242 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
243 struct mlx5e_tc_flow *flow)
245 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
246 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
248 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
250 mlx5_eswitch_del_vlan_action(esw, flow->esw_attr);
252 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
253 mlx5e_detach_encap(priv, flow);
255 if (flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
256 mlx5_modify_header_dealloc(priv->mdev,
260 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
261 struct mlx5e_tc_flow *flow)
263 struct list_head *next = flow->encap.next;
265 list_del(&flow->encap);
266 if (list_empty(next)) {
267 struct mlx5_encap_entry *e;
269 e = list_entry(next, struct mlx5_encap_entry, flows);
271 mlx5_encap_dealloc(priv->mdev, e->encap_id);
274 hlist_del_rcu(&e->encap_hlist);
279 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
280 struct mlx5e_tc_flow *flow)
282 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
283 mlx5e_tc_del_fdb_flow(priv, flow);
285 mlx5e_tc_del_nic_flow(priv, flow);
288 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
289 struct tc_cls_flower_offload *f)
291 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
293 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
295 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
297 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
300 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
301 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
303 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
304 struct flow_dissector_key_keyid *key =
305 skb_flow_dissector_target(f->dissector,
306 FLOW_DISSECTOR_KEY_ENC_KEYID,
308 struct flow_dissector_key_keyid *mask =
309 skb_flow_dissector_target(f->dissector,
310 FLOW_DISSECTOR_KEY_ENC_KEYID,
312 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
313 be32_to_cpu(mask->keyid));
314 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
315 be32_to_cpu(key->keyid));
319 static int parse_tunnel_attr(struct mlx5e_priv *priv,
320 struct mlx5_flow_spec *spec,
321 struct tc_cls_flower_offload *f)
323 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
325 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
328 struct flow_dissector_key_control *enc_control =
329 skb_flow_dissector_target(f->dissector,
330 FLOW_DISSECTOR_KEY_ENC_CONTROL,
333 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
334 struct flow_dissector_key_ports *key =
335 skb_flow_dissector_target(f->dissector,
336 FLOW_DISSECTOR_KEY_ENC_PORTS,
338 struct flow_dissector_key_ports *mask =
339 skb_flow_dissector_target(f->dissector,
340 FLOW_DISSECTOR_KEY_ENC_PORTS,
342 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
343 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
344 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
346 /* Full udp dst port must be given */
347 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
348 goto vxlan_match_offload_err;
350 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
351 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
352 parse_vxlan_attr(spec, f);
354 netdev_warn(priv->netdev,
355 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
359 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
360 udp_dport, ntohs(mask->dst));
361 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
362 udp_dport, ntohs(key->dst));
364 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
365 udp_sport, ntohs(mask->src));
366 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
367 udp_sport, ntohs(key->src));
368 } else { /* udp dst port must be given */
369 vxlan_match_offload_err:
370 netdev_warn(priv->netdev,
371 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
375 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
376 struct flow_dissector_key_ipv4_addrs *key =
377 skb_flow_dissector_target(f->dissector,
378 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
380 struct flow_dissector_key_ipv4_addrs *mask =
381 skb_flow_dissector_target(f->dissector,
382 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
384 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
385 src_ipv4_src_ipv6.ipv4_layout.ipv4,
387 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
388 src_ipv4_src_ipv6.ipv4_layout.ipv4,
391 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
392 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
394 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
395 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
398 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
399 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
400 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
401 struct flow_dissector_key_ipv6_addrs *key =
402 skb_flow_dissector_target(f->dissector,
403 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
405 struct flow_dissector_key_ipv6_addrs *mask =
406 skb_flow_dissector_target(f->dissector,
407 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
410 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
411 src_ipv4_src_ipv6.ipv6_layout.ipv6),
412 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
413 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
414 src_ipv4_src_ipv6.ipv6_layout.ipv6),
415 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
417 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
418 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
419 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
420 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
421 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
422 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
424 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
425 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
428 /* Enforce DMAC when offloading incoming tunneled flows.
429 * Flow counters require a match on the DMAC.
431 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
432 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
433 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
434 dmac_47_16), priv->netdev->dev_addr);
436 /* let software handle IP fragments */
437 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
438 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
443 static int __parse_cls_flower(struct mlx5e_priv *priv,
444 struct mlx5_flow_spec *spec,
445 struct tc_cls_flower_offload *f,
448 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
450 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
455 *min_inline = MLX5_INLINE_MODE_L2;
457 if (f->dissector->used_keys &
458 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
459 BIT(FLOW_DISSECTOR_KEY_BASIC) |
460 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
461 BIT(FLOW_DISSECTOR_KEY_VLAN) |
462 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
463 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
464 BIT(FLOW_DISSECTOR_KEY_PORTS) |
465 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
466 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
467 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
468 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
469 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
470 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
471 f->dissector->used_keys);
475 if ((dissector_uses_key(f->dissector,
476 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
477 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
478 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
479 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
480 struct flow_dissector_key_control *key =
481 skb_flow_dissector_target(f->dissector,
482 FLOW_DISSECTOR_KEY_ENC_CONTROL,
484 switch (key->addr_type) {
485 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
486 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
487 if (parse_tunnel_attr(priv, spec, f))
494 /* In decap flow, header pointers should point to the inner
495 * headers, outer header were already set by parse_tunnel_attr
497 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
499 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
503 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
504 struct flow_dissector_key_control *key =
505 skb_flow_dissector_target(f->dissector,
506 FLOW_DISSECTOR_KEY_CONTROL,
509 struct flow_dissector_key_control *mask =
510 skb_flow_dissector_target(f->dissector,
511 FLOW_DISSECTOR_KEY_CONTROL,
513 addr_type = key->addr_type;
515 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
516 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
517 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
518 key->flags & FLOW_DIS_IS_FRAGMENT);
520 /* the HW doesn't need L3 inline to match on frag=no */
521 if (key->flags & FLOW_DIS_IS_FRAGMENT)
522 *min_inline = MLX5_INLINE_MODE_IP;
526 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
527 struct flow_dissector_key_basic *key =
528 skb_flow_dissector_target(f->dissector,
529 FLOW_DISSECTOR_KEY_BASIC,
531 struct flow_dissector_key_basic *mask =
532 skb_flow_dissector_target(f->dissector,
533 FLOW_DISSECTOR_KEY_BASIC,
535 ip_proto = key->ip_proto;
537 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
538 ntohs(mask->n_proto));
539 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
540 ntohs(key->n_proto));
542 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
544 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
548 *min_inline = MLX5_INLINE_MODE_IP;
551 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
552 struct flow_dissector_key_eth_addrs *key =
553 skb_flow_dissector_target(f->dissector,
554 FLOW_DISSECTOR_KEY_ETH_ADDRS,
556 struct flow_dissector_key_eth_addrs *mask =
557 skb_flow_dissector_target(f->dissector,
558 FLOW_DISSECTOR_KEY_ETH_ADDRS,
561 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
564 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
568 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
571 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
576 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
577 struct flow_dissector_key_vlan *key =
578 skb_flow_dissector_target(f->dissector,
579 FLOW_DISSECTOR_KEY_VLAN,
581 struct flow_dissector_key_vlan *mask =
582 skb_flow_dissector_target(f->dissector,
583 FLOW_DISSECTOR_KEY_VLAN,
585 if (mask->vlan_id || mask->vlan_priority) {
586 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
587 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
589 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
590 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
592 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
593 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
597 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
598 struct flow_dissector_key_ipv4_addrs *key =
599 skb_flow_dissector_target(f->dissector,
600 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
602 struct flow_dissector_key_ipv4_addrs *mask =
603 skb_flow_dissector_target(f->dissector,
604 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
607 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
608 src_ipv4_src_ipv6.ipv4_layout.ipv4),
609 &mask->src, sizeof(mask->src));
610 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
611 src_ipv4_src_ipv6.ipv4_layout.ipv4),
612 &key->src, sizeof(key->src));
613 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
614 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
615 &mask->dst, sizeof(mask->dst));
616 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
617 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
618 &key->dst, sizeof(key->dst));
620 if (mask->src || mask->dst)
621 *min_inline = MLX5_INLINE_MODE_IP;
624 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
625 struct flow_dissector_key_ipv6_addrs *key =
626 skb_flow_dissector_target(f->dissector,
627 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
629 struct flow_dissector_key_ipv6_addrs *mask =
630 skb_flow_dissector_target(f->dissector,
631 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
634 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
635 src_ipv4_src_ipv6.ipv6_layout.ipv6),
636 &mask->src, sizeof(mask->src));
637 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
638 src_ipv4_src_ipv6.ipv6_layout.ipv6),
639 &key->src, sizeof(key->src));
641 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
642 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
643 &mask->dst, sizeof(mask->dst));
644 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
645 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
646 &key->dst, sizeof(key->dst));
648 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
649 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
650 *min_inline = MLX5_INLINE_MODE_IP;
653 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
654 struct flow_dissector_key_ports *key =
655 skb_flow_dissector_target(f->dissector,
656 FLOW_DISSECTOR_KEY_PORTS,
658 struct flow_dissector_key_ports *mask =
659 skb_flow_dissector_target(f->dissector,
660 FLOW_DISSECTOR_KEY_PORTS,
664 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
665 tcp_sport, ntohs(mask->src));
666 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
667 tcp_sport, ntohs(key->src));
669 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
670 tcp_dport, ntohs(mask->dst));
671 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
672 tcp_dport, ntohs(key->dst));
676 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
677 udp_sport, ntohs(mask->src));
678 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
679 udp_sport, ntohs(key->src));
681 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
682 udp_dport, ntohs(mask->dst));
683 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
684 udp_dport, ntohs(key->dst));
687 netdev_err(priv->netdev,
688 "Only UDP and TCP transport are supported\n");
692 if (mask->src || mask->dst)
693 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
699 static int parse_cls_flower(struct mlx5e_priv *priv,
700 struct mlx5e_tc_flow *flow,
701 struct mlx5_flow_spec *spec,
702 struct tc_cls_flower_offload *f)
704 struct mlx5_core_dev *dev = priv->mdev;
705 struct mlx5_eswitch *esw = dev->priv.eswitch;
706 struct mlx5e_rep_priv *rpriv = priv->ppriv;
707 struct mlx5_eswitch_rep *rep;
711 err = __parse_cls_flower(priv, spec, f, &min_inline);
713 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
715 if (rep->vport != FDB_UPLINK_VPORT &&
716 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
717 esw->offloads.inline_mode < min_inline)) {
718 netdev_warn(priv->netdev,
719 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
720 min_inline, esw->offloads.inline_mode);
728 struct pedit_headers {
736 static int pedit_header_offsets[] = {
737 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
738 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
739 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
740 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
741 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
744 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
746 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
747 struct pedit_headers *masks,
748 struct pedit_headers *vals)
750 u32 *curr_pmask, *curr_pval;
752 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
755 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
756 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
758 if (*curr_pmask & mask) /* disallow acting twice on the same location */
762 *curr_pval |= (val & mask);
776 static struct mlx5_fields fields[] = {
777 {MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_dest[0])},
778 {MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_dest[4])},
779 {MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16, 4, offsetof(struct pedit_headers, eth.h_source[0])},
780 {MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0, 2, offsetof(struct pedit_headers, eth.h_source[4])},
781 {MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE, 2, offsetof(struct pedit_headers, eth.h_proto)},
783 {MLX5_ACTION_IN_FIELD_OUT_IP_DSCP, 1, offsetof(struct pedit_headers, ip4.tos)},
784 {MLX5_ACTION_IN_FIELD_OUT_IP_TTL, 1, offsetof(struct pedit_headers, ip4.ttl)},
785 {MLX5_ACTION_IN_FIELD_OUT_SIPV4, 4, offsetof(struct pedit_headers, ip4.saddr)},
786 {MLX5_ACTION_IN_FIELD_OUT_DIPV4, 4, offsetof(struct pedit_headers, ip4.daddr)},
788 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[0])},
789 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[1])},
790 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[2])},
791 {MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.saddr.s6_addr32[3])},
792 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[0])},
793 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[1])},
794 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[2])},
795 {MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0, 4, offsetof(struct pedit_headers, ip6.daddr.s6_addr32[3])},
797 {MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT, 2, offsetof(struct pedit_headers, tcp.source)},
798 {MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT, 2, offsetof(struct pedit_headers, tcp.dest)},
799 {MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS, 1, offsetof(struct pedit_headers, tcp.ack_seq) + 5},
801 {MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT, 2, offsetof(struct pedit_headers, udp.source)},
802 {MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT, 2, offsetof(struct pedit_headers, udp.dest)},
805 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
806 * max from the SW pedit action. On success, it says how many HW actions were
809 static int offload_pedit_fields(struct pedit_headers *masks,
810 struct pedit_headers *vals,
811 struct mlx5e_tc_flow_parse_attr *parse_attr)
813 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
814 int i, action_size, nactions, max_actions, first, last;
815 void *s_masks_p, *a_masks_p, *vals_p;
816 u32 s_mask, a_mask, val;
817 struct mlx5_fields *f;
822 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
823 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
824 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
825 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
827 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
828 action = parse_attr->mod_hdr_actions;
829 max_actions = parse_attr->num_mod_hdr_actions;
832 for (i = 0; i < ARRAY_SIZE(fields); i++) {
834 /* avoid seeing bits set from previous iterations */
835 s_mask = a_mask = mask = val = 0;
837 s_masks_p = (void *)set_masks + f->offset;
838 a_masks_p = (void *)add_masks + f->offset;
840 memcpy(&s_mask, s_masks_p, f->size);
841 memcpy(&a_mask, a_masks_p, f->size);
843 if (!s_mask && !a_mask) /* nothing to offload here */
846 if (s_mask && a_mask) {
847 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
851 if (nactions == max_actions) {
852 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
857 cmd = MLX5_ACTION_TYPE_SET;
859 vals_p = (void *)set_vals + f->offset;
860 /* clear to denote we consumed this field */
861 memset(s_masks_p, 0, f->size);
863 cmd = MLX5_ACTION_TYPE_ADD;
865 vals_p = (void *)add_vals + f->offset;
866 /* clear to denote we consumed this field */
867 memset(a_masks_p, 0, f->size);
870 memcpy(&val, vals_p, f->size);
872 field_bsize = f->size * BITS_PER_BYTE;
873 first = find_first_bit(&mask, field_bsize);
874 last = find_last_bit(&mask, field_bsize);
875 if (first > 0 || last != (field_bsize - 1)) {
876 printk(KERN_WARNING "mlx5: partial rewrite (mask %lx) is currently not offloaded\n",
881 MLX5_SET(set_action_in, action, action_type, cmd);
882 MLX5_SET(set_action_in, action, field, f->field);
884 if (cmd == MLX5_ACTION_TYPE_SET) {
885 MLX5_SET(set_action_in, action, offset, 0);
886 /* length is num of bits to be written, zero means length of 32 */
887 MLX5_SET(set_action_in, action, length, field_bsize);
890 if (field_bsize == 32)
891 MLX5_SET(set_action_in, action, data, ntohl(val));
892 else if (field_bsize == 16)
893 MLX5_SET(set_action_in, action, data, ntohs(val));
894 else if (field_bsize == 8)
895 MLX5_SET(set_action_in, action, data, val);
897 action += action_size;
901 parse_attr->num_mod_hdr_actions = nactions;
905 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
906 const struct tc_action *a, int namespace,
907 struct mlx5e_tc_flow_parse_attr *parse_attr)
909 int nkeys, action_size, max_actions;
911 nkeys = tcf_pedit_nkeys(a);
912 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
914 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
915 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
916 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
917 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
919 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
920 max_actions = min(max_actions, nkeys * 16);
922 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
923 if (!parse_attr->mod_hdr_actions)
926 parse_attr->num_mod_hdr_actions = max_actions;
930 static const struct pedit_headers zero_masks = {};
932 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
933 const struct tc_action *a, int namespace,
934 struct mlx5e_tc_flow_parse_attr *parse_attr)
936 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
937 int nkeys, i, err = -EOPNOTSUPP;
938 u32 mask, val, offset;
941 nkeys = tcf_pedit_nkeys(a);
943 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
944 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
946 for (i = 0; i < nkeys; i++) {
947 htype = tcf_pedit_htype(a, i);
948 cmd = tcf_pedit_cmd(a, i);
949 err = -EOPNOTSUPP; /* can't be all optimistic */
951 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
952 printk(KERN_WARNING "mlx5: legacy pedit isn't offloaded\n");
956 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
957 printk(KERN_WARNING "mlx5: pedit cmd %d isn't offloaded\n", cmd);
961 mask = tcf_pedit_mask(a, i);
962 val = tcf_pedit_val(a, i);
963 offset = tcf_pedit_offset(a, i);
965 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
970 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
974 err = offload_pedit_fields(masks, vals, parse_attr);
976 goto out_dealloc_parsed_actions;
978 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
979 cmd_masks = &masks[cmd];
980 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
981 printk(KERN_WARNING "mlx5: attempt to offload an unsupported field (cmd %d)\n",
983 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
984 16, 1, cmd_masks, sizeof(zero_masks), true);
986 goto out_dealloc_parsed_actions;
992 out_dealloc_parsed_actions:
993 kfree(parse_attr->mod_hdr_actions);
998 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
999 struct mlx5e_tc_flow_parse_attr *parse_attr,
1000 struct mlx5e_tc_flow *flow)
1002 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1003 const struct tc_action *a;
1007 if (tc_no_actions(exts))
1010 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1013 tcf_exts_to_list(exts, &actions);
1014 list_for_each_entry(a, &actions, list) {
1015 /* Only support a single action per rule */
1019 if (is_tcf_gact_shot(a)) {
1020 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1021 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1022 flow_table_properties_nic_receive.flow_counter))
1023 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1027 if (is_tcf_pedit(a)) {
1028 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1033 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1034 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1038 if (is_tcf_skbedit_mark(a)) {
1039 u32 mark = tcf_skbedit_mark(a);
1041 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
1042 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
1047 attr->flow_tag = mark;
1048 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1058 static inline int cmp_encap_info(struct ip_tunnel_key *a,
1059 struct ip_tunnel_key *b)
1061 return memcmp(a, b, sizeof(*a));
1064 static inline int hash_encap_info(struct ip_tunnel_key *key)
1066 return jhash(key, sizeof(*key), 0);
1069 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
1070 struct net_device *mirred_dev,
1071 struct net_device **out_dev,
1073 struct neighbour **out_n,
1076 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1078 struct neighbour *n = NULL;
1080 #if IS_ENABLED(CONFIG_INET)
1083 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
1084 ret = PTR_ERR_OR_ZERO(rt);
1090 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1091 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
1092 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1094 *out_dev = rt->dst.dev;
1096 *out_ttl = ip4_dst_hoplimit(&rt->dst);
1097 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
1106 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
1107 struct net_device *mirred_dev,
1108 struct net_device **out_dev,
1110 struct neighbour **out_n,
1113 struct neighbour *n = NULL;
1114 struct dst_entry *dst;
1116 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
1117 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1120 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
1127 *out_ttl = ip6_dst_hoplimit(dst);
1129 /* if the egress device isn't on the same HW e-switch, we use the uplink */
1130 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
1131 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
1133 *out_dev = dst->dev;
1138 n = dst_neigh_lookup(dst, &fl6->daddr);
1147 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
1148 char buf[], int encap_size,
1149 unsigned char h_dest[ETH_ALEN],
1153 __be16 udp_dst_port,
1156 struct ethhdr *eth = (struct ethhdr *)buf;
1157 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
1158 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
1159 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1161 memset(buf, 0, encap_size);
1163 ether_addr_copy(eth->h_dest, h_dest);
1164 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1165 eth->h_proto = htons(ETH_P_IP);
1171 ip->protocol = IPPROTO_UDP;
1175 udp->dest = udp_dst_port;
1176 vxh->vx_flags = VXLAN_HF_VNI;
1177 vxh->vx_vni = vxlan_vni_field(vx_vni);
1180 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
1181 char buf[], int encap_size,
1182 unsigned char h_dest[ETH_ALEN],
1184 struct in6_addr *daddr,
1185 struct in6_addr *saddr,
1186 __be16 udp_dst_port,
1189 struct ethhdr *eth = (struct ethhdr *)buf;
1190 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
1191 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
1192 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
1194 memset(buf, 0, encap_size);
1196 ether_addr_copy(eth->h_dest, h_dest);
1197 ether_addr_copy(eth->h_source, out_dev->dev_addr);
1198 eth->h_proto = htons(ETH_P_IPV6);
1200 ip6_flow_hdr(ip6h, 0, 0);
1201 /* the HW fills up ipv6 payload len */
1202 ip6h->nexthdr = IPPROTO_UDP;
1203 ip6h->hop_limit = ttl;
1204 ip6h->daddr = *daddr;
1205 ip6h->saddr = *saddr;
1207 udp->dest = udp_dst_port;
1208 vxh->vx_flags = VXLAN_HF_VNI;
1209 vxh->vx_vni = vxlan_vni_field(vx_vni);
1212 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
1213 struct net_device *mirred_dev,
1214 struct mlx5_encap_entry *e,
1215 struct net_device **out_dev)
1217 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1218 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
1219 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1220 struct neighbour *n = NULL;
1221 struct flowi4 fl4 = {};
1225 if (max_encap_size < ipv4_encap_size) {
1226 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1227 ipv4_encap_size, max_encap_size);
1231 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
1235 switch (e->tunnel_type) {
1236 case MLX5_HEADER_TYPE_VXLAN:
1237 fl4.flowi4_proto = IPPROTO_UDP;
1238 fl4.fl4_dport = tun_key->tp_dst;
1244 fl4.flowi4_tos = tun_key->tos;
1245 fl4.daddr = tun_key->u.ipv4.dst;
1246 fl4.saddr = tun_key->u.ipv4.src;
1248 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
1253 if (!(n->nud_state & NUD_VALID)) {
1254 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
1260 e->out_dev = *out_dev;
1262 neigh_ha_snapshot(e->h_dest, n, *out_dev);
1264 switch (e->tunnel_type) {
1265 case MLX5_HEADER_TYPE_VXLAN:
1266 gen_vxlan_header_ipv4(*out_dev, encap_header,
1267 ipv4_encap_size, e->h_dest, ttl,
1269 fl4.saddr, tun_key->tp_dst,
1270 tunnel_id_to_key32(tun_key->tun_id));
1277 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1278 ipv4_encap_size, encap_header, &e->encap_id);
1282 kfree(encap_header);
1286 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
1287 struct net_device *mirred_dev,
1288 struct mlx5_encap_entry *e,
1289 struct net_device **out_dev)
1292 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
1293 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
1294 struct ip_tunnel_key *tun_key = &e->tun_info.key;
1295 struct neighbour *n = NULL;
1296 struct flowi6 fl6 = {};
1300 if (max_encap_size < ipv6_encap_size) {
1301 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
1302 ipv6_encap_size, max_encap_size);
1306 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
1310 switch (e->tunnel_type) {
1311 case MLX5_HEADER_TYPE_VXLAN:
1312 fl6.flowi6_proto = IPPROTO_UDP;
1313 fl6.fl6_dport = tun_key->tp_dst;
1320 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
1321 fl6.daddr = tun_key->u.ipv6.dst;
1322 fl6.saddr = tun_key->u.ipv6.src;
1324 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
1329 if (!(n->nud_state & NUD_VALID)) {
1330 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
1336 e->out_dev = *out_dev;
1338 neigh_ha_snapshot(e->h_dest, n, *out_dev);
1340 switch (e->tunnel_type) {
1341 case MLX5_HEADER_TYPE_VXLAN:
1342 gen_vxlan_header_ipv6(*out_dev, encap_header,
1343 ipv6_encap_size, e->h_dest, ttl,
1345 &fl6.saddr, tun_key->tp_dst,
1346 tunnel_id_to_key32(tun_key->tun_id));
1353 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
1354 ipv6_encap_size, encap_header, &e->encap_id);
1358 kfree(encap_header);
1362 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
1363 struct ip_tunnel_info *tun_info,
1364 struct net_device *mirred_dev,
1365 struct net_device **encap_dev,
1366 struct mlx5e_tc_flow *flow)
1368 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1369 struct net_device *up_dev = mlx5_eswitch_get_uplink_netdev(esw);
1370 unsigned short family = ip_tunnel_info_af(tun_info);
1371 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1372 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1373 struct ip_tunnel_key *key = &tun_info->key;
1374 struct mlx5_encap_entry *e;
1375 struct net_device *out_dev;
1376 int tunnel_type, err = 0;
1380 /* udp dst port must be set */
1381 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
1382 goto vxlan_encap_offload_err;
1384 /* setting udp src port isn't supported */
1385 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1386 vxlan_encap_offload_err:
1387 netdev_warn(priv->netdev,
1388 "must set udp dst port and not set udp src port\n");
1392 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
1393 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
1394 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1396 netdev_warn(priv->netdev,
1397 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
1401 hash_key = hash_encap_info(key);
1403 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1404 encap_hlist, hash_key) {
1405 if (!cmp_encap_info(&e->tun_info.key, key)) {
1414 e = kzalloc(sizeof(*e), GFP_KERNEL);
1418 e->tun_info = *tun_info;
1419 e->tunnel_type = tunnel_type;
1420 INIT_LIST_HEAD(&e->flows);
1422 if (family == AF_INET)
1423 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
1424 else if (family == AF_INET6)
1425 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
1430 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1433 list_add(&flow->encap, &e->flows);
1434 *encap_dev = e->out_dev;
1435 attr->encap_id = e->encap_id;
1444 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1445 struct mlx5e_tc_flow_parse_attr *parse_attr,
1446 struct mlx5e_tc_flow *flow)
1448 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1449 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1450 struct ip_tunnel_info *info = NULL;
1451 const struct tc_action *a;
1456 if (tc_no_actions(exts))
1459 memset(attr, 0, sizeof(*attr));
1460 attr->in_rep = rpriv->rep;
1462 tcf_exts_to_list(exts, &actions);
1463 list_for_each_entry(a, &actions, list) {
1464 if (is_tcf_gact_shot(a)) {
1465 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1466 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1470 if (is_tcf_pedit(a)) {
1471 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
1476 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1480 if (is_tcf_mirred_egress_redirect(a)) {
1481 int ifindex = tcf_mirred_ifindex(a);
1482 struct net_device *out_dev, *encap_dev = NULL;
1483 struct mlx5e_priv *out_priv;
1485 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1487 if (switchdev_port_same_parent_id(priv->netdev,
1489 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1490 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1491 out_priv = netdev_priv(out_dev);
1492 rpriv = out_priv->ppriv;
1493 attr->out_rep = rpriv->rep;
1495 err = mlx5e_attach_encap(priv, info,
1496 out_dev, &encap_dev, flow);
1499 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1500 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1501 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1502 out_priv = netdev_priv(encap_dev);
1503 rpriv = out_priv->ppriv;
1504 attr->out_rep = rpriv->rep;
1506 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1507 priv->netdev->name, out_dev->name);
1513 if (is_tcf_tunnel_set(a)) {
1514 info = tcf_tunnel_info(a);
1522 if (is_tcf_vlan(a)) {
1523 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
1524 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
1525 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
1526 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1529 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1530 attr->vlan = tcf_vlan_push_vid(a);
1531 } else { /* action is TCA_VLAN_ACT_MODIFY */
1537 if (is_tcf_tunnel_release(a)) {
1538 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1547 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1548 struct tc_cls_flower_offload *f)
1550 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1551 struct mlx5e_tc_flow_parse_attr *parse_attr;
1552 struct mlx5e_tc_table *tc = &priv->fs.tc;
1553 struct mlx5e_tc_flow *flow;
1554 int attr_size, err = 0;
1557 if (esw && esw->mode == SRIOV_OFFLOADS) {
1558 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1559 attr_size = sizeof(struct mlx5_esw_flow_attr);
1561 flow_flags = MLX5E_TC_FLOW_NIC;
1562 attr_size = sizeof(struct mlx5_nic_flow_attr);
1565 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1566 parse_attr = mlx5_vzalloc(sizeof(*parse_attr));
1567 if (!parse_attr || !flow) {
1572 flow->cookie = f->cookie;
1573 flow->flags = flow_flags;
1575 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
1579 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1580 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
1583 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
1585 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
1588 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
1591 if (IS_ERR(flow->rule)) {
1592 err = PTR_ERR(flow->rule);
1596 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1604 mlx5e_tc_del_flow(priv, flow);
1613 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1614 struct tc_cls_flower_offload *f)
1616 struct mlx5e_tc_flow *flow;
1617 struct mlx5e_tc_table *tc = &priv->fs.tc;
1619 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1624 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1626 mlx5e_tc_del_flow(priv, flow);
1634 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1635 struct tc_cls_flower_offload *f)
1637 struct mlx5e_tc_table *tc = &priv->fs.tc;
1638 struct mlx5e_tc_flow *flow;
1639 struct tc_action *a;
1640 struct mlx5_fc *counter;
1646 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1651 counter = mlx5_flow_rule_counter(flow->rule);
1655 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1659 tcf_exts_to_list(f->exts, &actions);
1660 list_for_each_entry(a, &actions, list)
1661 tcf_action_stats_update(a, bytes, packets, lastuse);
1668 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1669 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1670 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1671 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1672 .automatic_shrinking = true,
1675 int mlx5e_tc_init(struct mlx5e_priv *priv)
1677 struct mlx5e_tc_table *tc = &priv->fs.tc;
1679 tc->ht_params = mlx5e_tc_flow_ht_params;
1680 return rhashtable_init(&tc->ht, &tc->ht_params);
1683 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1685 struct mlx5e_tc_flow *flow = ptr;
1686 struct mlx5e_priv *priv = arg;
1688 mlx5e_tc_del_flow(priv, flow);
1692 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1694 struct mlx5e_tc_table *tc = &priv->fs.tc;
1696 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1698 if (!IS_ERR_OR_NULL(tc->t)) {
1699 mlx5_destroy_flow_table(tc->t);