2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/vxlan.h>
52 MLX5E_TC_FLOW_ESWITCH = BIT(0),
55 struct mlx5e_tc_flow {
56 struct rhash_head node;
59 struct mlx5_flow_handle *rule;
60 struct list_head encap; /* flows sharing the same encap */
61 struct mlx5_esw_flow_attr *attr;
65 MLX5_HEADER_TYPE_VXLAN = 0x0,
66 MLX5_HEADER_TYPE_NVGRE = 0x1,
69 #define MLX5E_TC_TABLE_NUM_ENTRIES 1024
70 #define MLX5E_TC_TABLE_NUM_GROUPS 4
72 static struct mlx5_flow_handle *
73 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
74 struct mlx5_flow_spec *spec,
75 u32 action, u32 flow_tag)
77 struct mlx5_core_dev *dev = priv->mdev;
78 struct mlx5_flow_destination dest = { 0 };
79 struct mlx5_flow_act flow_act = {
84 struct mlx5_fc *counter = NULL;
85 struct mlx5_flow_handle *rule;
86 bool table_created = false;
88 if (action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
89 dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
90 dest.ft = priv->fs.vlan.ft.t;
91 } else if (action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
92 counter = mlx5_fc_create(dev, true);
94 return ERR_CAST(counter);
96 dest.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
97 dest.counter = counter;
100 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
102 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
104 MLX5E_TC_TABLE_NUM_ENTRIES,
105 MLX5E_TC_TABLE_NUM_GROUPS,
107 if (IS_ERR(priv->fs.tc.t)) {
108 netdev_err(priv->netdev,
109 "Failed to create tc offload table\n");
110 rule = ERR_CAST(priv->fs.tc.t);
114 table_created = true;
117 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
118 rule = mlx5_add_flow_rules(priv->fs.tc.t, spec, &flow_act, &dest, 1);
127 mlx5_destroy_flow_table(priv->fs.tc.t);
128 priv->fs.tc.t = NULL;
131 mlx5_fc_destroy(dev, counter);
136 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
137 struct mlx5e_tc_flow *flow)
139 struct mlx5_fc *counter = NULL;
141 if (!IS_ERR(flow->rule)) {
142 counter = mlx5_flow_rule_counter(flow->rule);
143 mlx5_del_flow_rules(flow->rule);
144 mlx5_fc_destroy(priv->mdev, counter);
147 if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
148 mlx5_destroy_flow_table(priv->fs.tc.t);
149 priv->fs.tc.t = NULL;
153 static struct mlx5_flow_handle *
154 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
155 struct mlx5_flow_spec *spec,
156 struct mlx5_esw_flow_attr *attr)
158 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
161 err = mlx5_eswitch_add_vlan_action(esw, attr);
165 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
168 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
169 struct mlx5e_tc_flow *flow);
171 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
172 struct mlx5e_tc_flow *flow)
174 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
176 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->attr);
178 mlx5_eswitch_del_vlan_action(esw, flow->attr);
180 if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
181 mlx5e_detach_encap(priv, flow);
184 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
185 struct mlx5e_tc_flow *flow)
187 struct list_head *next = flow->encap.next;
189 list_del(&flow->encap);
190 if (list_empty(next)) {
191 struct mlx5_encap_entry *e;
193 e = list_entry(next, struct mlx5_encap_entry, flows);
195 mlx5_encap_dealloc(priv->mdev, e->encap_id);
198 hlist_del_rcu(&e->encap_hlist);
203 /* we get here also when setting rule to the FW failed, etc. It means that the
204 * flow rule itself might not exist, but some offloading related to the actions
207 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
208 struct mlx5e_tc_flow *flow)
210 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
211 mlx5e_tc_del_fdb_flow(priv, flow);
213 mlx5e_tc_del_nic_flow(priv, flow);
216 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
217 struct tc_cls_flower_offload *f)
219 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
221 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
223 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
225 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
228 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
229 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
231 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
232 struct flow_dissector_key_keyid *key =
233 skb_flow_dissector_target(f->dissector,
234 FLOW_DISSECTOR_KEY_ENC_KEYID,
236 struct flow_dissector_key_keyid *mask =
237 skb_flow_dissector_target(f->dissector,
238 FLOW_DISSECTOR_KEY_ENC_KEYID,
240 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
241 be32_to_cpu(mask->keyid));
242 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
243 be32_to_cpu(key->keyid));
247 static int parse_tunnel_attr(struct mlx5e_priv *priv,
248 struct mlx5_flow_spec *spec,
249 struct tc_cls_flower_offload *f)
251 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
253 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
256 struct flow_dissector_key_control *enc_control =
257 skb_flow_dissector_target(f->dissector,
258 FLOW_DISSECTOR_KEY_ENC_CONTROL,
261 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
262 struct flow_dissector_key_ports *key =
263 skb_flow_dissector_target(f->dissector,
264 FLOW_DISSECTOR_KEY_ENC_PORTS,
266 struct flow_dissector_key_ports *mask =
267 skb_flow_dissector_target(f->dissector,
268 FLOW_DISSECTOR_KEY_ENC_PORTS,
271 /* Full udp dst port must be given */
272 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
273 goto vxlan_match_offload_err;
275 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
276 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
277 parse_vxlan_attr(spec, f);
279 netdev_warn(priv->netdev,
280 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
284 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
285 udp_dport, ntohs(mask->dst));
286 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
287 udp_dport, ntohs(key->dst));
289 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
290 udp_sport, ntohs(mask->src));
291 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
292 udp_sport, ntohs(key->src));
293 } else { /* udp dst port must be given */
294 vxlan_match_offload_err:
295 netdev_warn(priv->netdev,
296 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
300 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
301 struct flow_dissector_key_ipv4_addrs *key =
302 skb_flow_dissector_target(f->dissector,
303 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
305 struct flow_dissector_key_ipv4_addrs *mask =
306 skb_flow_dissector_target(f->dissector,
307 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
309 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
310 src_ipv4_src_ipv6.ipv4_layout.ipv4,
312 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
313 src_ipv4_src_ipv6.ipv4_layout.ipv4,
316 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
317 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
319 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
320 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
323 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
324 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
325 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
326 struct flow_dissector_key_ipv6_addrs *key =
327 skb_flow_dissector_target(f->dissector,
328 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
330 struct flow_dissector_key_ipv6_addrs *mask =
331 skb_flow_dissector_target(f->dissector,
332 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
335 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
336 src_ipv4_src_ipv6.ipv6_layout.ipv6),
337 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
338 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
339 src_ipv4_src_ipv6.ipv6_layout.ipv6),
340 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
342 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
343 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
344 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
345 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
346 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
347 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
349 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
350 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
353 /* Enforce DMAC when offloading incoming tunneled flows.
354 * Flow counters require a match on the DMAC.
356 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
357 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
358 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
359 dmac_47_16), priv->netdev->dev_addr);
361 /* let software handle IP fragments */
362 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
363 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
368 static int __parse_cls_flower(struct mlx5e_priv *priv,
369 struct mlx5_flow_spec *spec,
370 struct tc_cls_flower_offload *f,
373 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
375 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
380 *min_inline = MLX5_INLINE_MODE_L2;
382 if (f->dissector->used_keys &
383 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
384 BIT(FLOW_DISSECTOR_KEY_BASIC) |
385 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
386 BIT(FLOW_DISSECTOR_KEY_VLAN) |
387 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
388 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
389 BIT(FLOW_DISSECTOR_KEY_PORTS) |
390 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
391 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
392 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
393 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
394 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL))) {
395 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
396 f->dissector->used_keys);
400 if ((dissector_uses_key(f->dissector,
401 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
402 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
403 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
404 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
405 struct flow_dissector_key_control *key =
406 skb_flow_dissector_target(f->dissector,
407 FLOW_DISSECTOR_KEY_ENC_CONTROL,
409 switch (key->addr_type) {
410 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
411 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
412 if (parse_tunnel_attr(priv, spec, f))
419 /* In decap flow, header pointers should point to the inner
420 * headers, outer header were already set by parse_tunnel_attr
422 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
424 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
428 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
429 struct flow_dissector_key_control *key =
430 skb_flow_dissector_target(f->dissector,
431 FLOW_DISSECTOR_KEY_CONTROL,
434 struct flow_dissector_key_control *mask =
435 skb_flow_dissector_target(f->dissector,
436 FLOW_DISSECTOR_KEY_CONTROL,
438 addr_type = key->addr_type;
440 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
441 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
442 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
443 key->flags & FLOW_DIS_IS_FRAGMENT);
445 /* the HW doesn't need L3 inline to match on frag=no */
446 if (key->flags & FLOW_DIS_IS_FRAGMENT)
447 *min_inline = MLX5_INLINE_MODE_IP;
451 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
452 struct flow_dissector_key_basic *key =
453 skb_flow_dissector_target(f->dissector,
454 FLOW_DISSECTOR_KEY_BASIC,
456 struct flow_dissector_key_basic *mask =
457 skb_flow_dissector_target(f->dissector,
458 FLOW_DISSECTOR_KEY_BASIC,
460 ip_proto = key->ip_proto;
462 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
463 ntohs(mask->n_proto));
464 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
465 ntohs(key->n_proto));
467 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
469 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
473 *min_inline = MLX5_INLINE_MODE_IP;
476 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
477 struct flow_dissector_key_eth_addrs *key =
478 skb_flow_dissector_target(f->dissector,
479 FLOW_DISSECTOR_KEY_ETH_ADDRS,
481 struct flow_dissector_key_eth_addrs *mask =
482 skb_flow_dissector_target(f->dissector,
483 FLOW_DISSECTOR_KEY_ETH_ADDRS,
486 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
489 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
493 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
496 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
501 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
502 struct flow_dissector_key_vlan *key =
503 skb_flow_dissector_target(f->dissector,
504 FLOW_DISSECTOR_KEY_VLAN,
506 struct flow_dissector_key_vlan *mask =
507 skb_flow_dissector_target(f->dissector,
508 FLOW_DISSECTOR_KEY_VLAN,
510 if (mask->vlan_id || mask->vlan_priority) {
511 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
512 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
514 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
515 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
517 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
518 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
522 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
523 struct flow_dissector_key_ipv4_addrs *key =
524 skb_flow_dissector_target(f->dissector,
525 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
527 struct flow_dissector_key_ipv4_addrs *mask =
528 skb_flow_dissector_target(f->dissector,
529 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
532 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
533 src_ipv4_src_ipv6.ipv4_layout.ipv4),
534 &mask->src, sizeof(mask->src));
535 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
536 src_ipv4_src_ipv6.ipv4_layout.ipv4),
537 &key->src, sizeof(key->src));
538 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
539 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
540 &mask->dst, sizeof(mask->dst));
541 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
542 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
543 &key->dst, sizeof(key->dst));
545 if (mask->src || mask->dst)
546 *min_inline = MLX5_INLINE_MODE_IP;
549 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
550 struct flow_dissector_key_ipv6_addrs *key =
551 skb_flow_dissector_target(f->dissector,
552 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
554 struct flow_dissector_key_ipv6_addrs *mask =
555 skb_flow_dissector_target(f->dissector,
556 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
559 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
560 src_ipv4_src_ipv6.ipv6_layout.ipv6),
561 &mask->src, sizeof(mask->src));
562 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
563 src_ipv4_src_ipv6.ipv6_layout.ipv6),
564 &key->src, sizeof(key->src));
566 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
567 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
568 &mask->dst, sizeof(mask->dst));
569 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
570 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
571 &key->dst, sizeof(key->dst));
573 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
574 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
575 *min_inline = MLX5_INLINE_MODE_IP;
578 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
579 struct flow_dissector_key_ports *key =
580 skb_flow_dissector_target(f->dissector,
581 FLOW_DISSECTOR_KEY_PORTS,
583 struct flow_dissector_key_ports *mask =
584 skb_flow_dissector_target(f->dissector,
585 FLOW_DISSECTOR_KEY_PORTS,
589 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
590 tcp_sport, ntohs(mask->src));
591 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
592 tcp_sport, ntohs(key->src));
594 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
595 tcp_dport, ntohs(mask->dst));
596 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
597 tcp_dport, ntohs(key->dst));
601 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
602 udp_sport, ntohs(mask->src));
603 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
604 udp_sport, ntohs(key->src));
606 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
607 udp_dport, ntohs(mask->dst));
608 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
609 udp_dport, ntohs(key->dst));
612 netdev_err(priv->netdev,
613 "Only UDP and TCP transport are supported\n");
617 if (mask->src || mask->dst)
618 *min_inline = MLX5_INLINE_MODE_TCP_UDP;
624 static int parse_cls_flower(struct mlx5e_priv *priv,
625 struct mlx5e_tc_flow *flow,
626 struct mlx5_flow_spec *spec,
627 struct tc_cls_flower_offload *f)
629 struct mlx5_core_dev *dev = priv->mdev;
630 struct mlx5_eswitch *esw = dev->priv.eswitch;
631 struct mlx5_eswitch_rep *rep = priv->ppriv;
635 err = __parse_cls_flower(priv, spec, f, &min_inline);
637 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
638 rep->vport != FDB_UPLINK_VPORT) {
639 if (min_inline > esw->offloads.inline_mode) {
640 netdev_warn(priv->netdev,
641 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
642 min_inline, esw->offloads.inline_mode);
650 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
651 u32 *action, u32 *flow_tag)
653 const struct tc_action *a;
656 if (tc_no_actions(exts))
659 *flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
662 tcf_exts_to_list(exts, &actions);
663 list_for_each_entry(a, &actions, list) {
664 /* Only support a single action per rule */
668 if (is_tcf_gact_shot(a)) {
669 *action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
670 if (MLX5_CAP_FLOWTABLE(priv->mdev,
671 flow_table_properties_nic_receive.flow_counter))
672 *action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
676 if (is_tcf_skbedit_mark(a)) {
677 u32 mark = tcf_skbedit_mark(a);
679 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
680 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
686 *action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
696 static inline int cmp_encap_info(struct ip_tunnel_key *a,
697 struct ip_tunnel_key *b)
699 return memcmp(a, b, sizeof(*a));
702 static inline int hash_encap_info(struct ip_tunnel_key *key)
704 return jhash(key, sizeof(*key), 0);
707 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
708 struct net_device *mirred_dev,
709 struct net_device **out_dev,
711 struct neighbour **out_n,
714 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
716 struct neighbour *n = NULL;
718 #if IS_ENABLED(CONFIG_INET)
721 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
722 ret = PTR_ERR_OR_ZERO(rt);
728 /* if the egress device isn't on the same HW e-switch, we use the uplink */
729 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
730 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
732 *out_dev = rt->dst.dev;
734 *out_ttl = ip4_dst_hoplimit(&rt->dst);
735 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
744 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
745 struct net_device *mirred_dev,
746 struct net_device **out_dev,
748 struct neighbour **out_n,
751 struct neighbour *n = NULL;
752 struct dst_entry *dst;
754 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
755 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
758 dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
765 *out_ttl = ip6_dst_hoplimit(dst);
767 /* if the egress device isn't on the same HW e-switch, we use the uplink */
768 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
769 *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
776 n = dst_neigh_lookup(dst, &fl6->daddr);
785 static int gen_vxlan_header_ipv4(struct net_device *out_dev,
787 unsigned char h_dest[ETH_ALEN],
794 int encap_size = VXLAN_HLEN + sizeof(struct iphdr) + ETH_HLEN;
795 struct ethhdr *eth = (struct ethhdr *)buf;
796 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
797 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
798 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
800 memset(buf, 0, encap_size);
802 ether_addr_copy(eth->h_dest, h_dest);
803 ether_addr_copy(eth->h_source, out_dev->dev_addr);
804 eth->h_proto = htons(ETH_P_IP);
810 ip->protocol = IPPROTO_UDP;
814 udp->dest = udp_dst_port;
815 vxh->vx_flags = VXLAN_HF_VNI;
816 vxh->vx_vni = vxlan_vni_field(vx_vni);
821 static int gen_vxlan_header_ipv6(struct net_device *out_dev,
823 unsigned char h_dest[ETH_ALEN],
825 struct in6_addr *daddr,
826 struct in6_addr *saddr,
830 int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
831 struct ethhdr *eth = (struct ethhdr *)buf;
832 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
833 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
834 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
836 memset(buf, 0, encap_size);
838 ether_addr_copy(eth->h_dest, h_dest);
839 ether_addr_copy(eth->h_source, out_dev->dev_addr);
840 eth->h_proto = htons(ETH_P_IPV6);
842 ip6_flow_hdr(ip6h, 0, 0);
843 /* the HW fills up ipv6 payload len */
844 ip6h->nexthdr = IPPROTO_UDP;
845 ip6h->hop_limit = ttl;
846 ip6h->daddr = *daddr;
847 ip6h->saddr = *saddr;
849 udp->dest = udp_dst_port;
850 vxh->vx_flags = VXLAN_HF_VNI;
851 vxh->vx_vni = vxlan_vni_field(vx_vni);
856 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
857 struct net_device *mirred_dev,
858 struct mlx5_encap_entry *e,
859 struct net_device **out_dev)
861 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
862 struct ip_tunnel_key *tun_key = &e->tun_info.key;
863 int encap_size, ttl, err;
864 struct neighbour *n = NULL;
865 struct flowi4 fl4 = {};
868 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
872 switch (e->tunnel_type) {
873 case MLX5_HEADER_TYPE_VXLAN:
874 fl4.flowi4_proto = IPPROTO_UDP;
875 fl4.fl4_dport = tun_key->tp_dst;
881 fl4.flowi4_tos = tun_key->tos;
882 fl4.daddr = tun_key->u.ipv4.dst;
883 fl4.saddr = tun_key->u.ipv4.src;
885 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
890 if (!(n->nud_state & NUD_VALID)) {
891 pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
897 e->out_dev = *out_dev;
899 neigh_ha_snapshot(e->h_dest, n, *out_dev);
901 switch (e->tunnel_type) {
902 case MLX5_HEADER_TYPE_VXLAN:
903 encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
906 fl4.saddr, tun_key->tp_dst,
907 tunnel_id_to_key32(tun_key->tun_id));
914 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
915 encap_size, encap_header, &e->encap_id);
923 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
924 struct net_device *mirred_dev,
925 struct mlx5_encap_entry *e,
926 struct net_device **out_dev)
929 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
930 struct ip_tunnel_key *tun_key = &e->tun_info.key;
931 int encap_size, err, ttl = 0;
932 struct neighbour *n = NULL;
933 struct flowi6 fl6 = {};
936 encap_header = kzalloc(max_encap_size, GFP_KERNEL);
940 switch (e->tunnel_type) {
941 case MLX5_HEADER_TYPE_VXLAN:
942 fl6.flowi6_proto = IPPROTO_UDP;
943 fl6.fl6_dport = tun_key->tp_dst;
950 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
951 fl6.daddr = tun_key->u.ipv6.dst;
952 fl6.saddr = tun_key->u.ipv6.src;
954 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
959 if (!(n->nud_state & NUD_VALID)) {
960 pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
966 e->out_dev = *out_dev;
968 neigh_ha_snapshot(e->h_dest, n, *out_dev);
970 switch (e->tunnel_type) {
971 case MLX5_HEADER_TYPE_VXLAN:
972 encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
975 &fl6.saddr, tun_key->tp_dst,
976 tunnel_id_to_key32(tun_key->tun_id));
983 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
984 encap_size, encap_header, &e->encap_id);
992 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
993 struct ip_tunnel_info *tun_info,
994 struct net_device *mirred_dev,
995 struct mlx5_esw_flow_attr *attr)
997 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
998 unsigned short family = ip_tunnel_info_af(tun_info);
999 struct ip_tunnel_key *key = &tun_info->key;
1000 struct mlx5_encap_entry *e;
1001 struct net_device *out_dev;
1002 int tunnel_type, err = -EOPNOTSUPP;
1006 /* udp dst port must be set */
1007 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
1008 goto vxlan_encap_offload_err;
1010 /* setting udp src port isn't supported */
1011 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
1012 vxlan_encap_offload_err:
1013 netdev_warn(priv->netdev,
1014 "must set udp dst port and not set udp src port\n");
1018 if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
1019 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
1020 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
1022 netdev_warn(priv->netdev,
1023 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
1027 hash_key = hash_encap_info(key);
1029 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
1030 encap_hlist, hash_key) {
1031 if (!cmp_encap_info(&e->tun_info.key, key)) {
1042 e = kzalloc(sizeof(*e), GFP_KERNEL);
1046 e->tun_info = *tun_info;
1047 e->tunnel_type = tunnel_type;
1048 INIT_LIST_HEAD(&e->flows);
1050 if (family == AF_INET)
1051 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
1052 else if (family == AF_INET6)
1053 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
1059 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
1068 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1069 struct mlx5e_tc_flow *flow)
1071 struct mlx5_esw_flow_attr *attr = flow->attr;
1072 struct ip_tunnel_info *info = NULL;
1073 const struct tc_action *a;
1078 if (tc_no_actions(exts))
1081 memset(attr, 0, sizeof(*attr));
1082 attr->in_rep = priv->ppriv;
1084 tcf_exts_to_list(exts, &actions);
1085 list_for_each_entry(a, &actions, list) {
1086 if (is_tcf_gact_shot(a)) {
1087 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
1088 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1092 if (is_tcf_mirred_egress_redirect(a)) {
1093 int ifindex = tcf_mirred_ifindex(a);
1094 struct net_device *out_dev;
1095 struct mlx5e_priv *out_priv;
1097 out_dev = __dev_get_by_index(dev_net(priv->netdev), ifindex);
1099 if (switchdev_port_same_parent_id(priv->netdev,
1101 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1102 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1103 out_priv = netdev_priv(out_dev);
1104 attr->out_rep = out_priv->ppriv;
1106 err = mlx5e_attach_encap(priv, info,
1110 list_add(&flow->encap, &attr->encap->flows);
1111 attr->action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
1112 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
1113 MLX5_FLOW_CONTEXT_ACTION_COUNT;
1114 out_priv = netdev_priv(attr->encap->out_dev);
1115 attr->out_rep = out_priv->ppriv;
1117 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
1118 priv->netdev->name, out_dev->name);
1124 if (is_tcf_tunnel_set(a)) {
1125 info = tcf_tunnel_info(a);
1133 if (is_tcf_vlan(a)) {
1134 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
1135 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
1136 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
1137 if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q))
1140 attr->action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
1141 attr->vlan = tcf_vlan_push_vid(a);
1142 } else { /* action is TCA_VLAN_ACT_MODIFY */
1148 if (is_tcf_tunnel_release(a)) {
1149 attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1158 int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
1159 struct tc_cls_flower_offload *f)
1161 struct mlx5e_tc_table *tc = &priv->fs.tc;
1162 int err, attr_size = 0;
1163 u32 flow_tag, action;
1164 struct mlx5e_tc_flow *flow;
1165 struct mlx5_flow_spec *spec;
1166 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1169 if (esw && esw->mode == SRIOV_OFFLOADS) {
1170 flow_flags = MLX5E_TC_FLOW_ESWITCH;
1171 attr_size = sizeof(struct mlx5_esw_flow_attr);
1174 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
1175 spec = mlx5_vzalloc(sizeof(*spec));
1176 if (!spec || !flow) {
1181 flow->cookie = f->cookie;
1182 flow->flags = flow_flags;
1184 err = parse_cls_flower(priv, flow, spec, f);
1188 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1189 flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
1190 err = parse_tc_fdb_actions(priv, f->exts, flow);
1193 flow->rule = mlx5e_tc_add_fdb_flow(priv, spec, flow->attr);
1195 err = parse_tc_nic_actions(priv, f->exts, &action, &flow_tag);
1198 flow->rule = mlx5e_tc_add_nic_flow(priv, spec, action, flow_tag);
1201 if (IS_ERR(flow->rule)) {
1202 err = PTR_ERR(flow->rule);
1206 err = rhashtable_insert_fast(&tc->ht, &flow->node,
1214 mlx5e_tc_del_flow(priv, flow);
1223 int mlx5e_delete_flower(struct mlx5e_priv *priv,
1224 struct tc_cls_flower_offload *f)
1226 struct mlx5e_tc_flow *flow;
1227 struct mlx5e_tc_table *tc = &priv->fs.tc;
1229 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1234 rhashtable_remove_fast(&tc->ht, &flow->node, tc->ht_params);
1236 mlx5e_tc_del_flow(priv, flow);
1244 int mlx5e_stats_flower(struct mlx5e_priv *priv,
1245 struct tc_cls_flower_offload *f)
1247 struct mlx5e_tc_table *tc = &priv->fs.tc;
1248 struct mlx5e_tc_flow *flow;
1249 struct tc_action *a;
1250 struct mlx5_fc *counter;
1256 flow = rhashtable_lookup_fast(&tc->ht, &f->cookie,
1261 counter = mlx5_flow_rule_counter(flow->rule);
1265 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1269 tcf_exts_to_list(f->exts, &actions);
1270 list_for_each_entry(a, &actions, list)
1271 tcf_action_stats_update(a, bytes, packets, lastuse);
1278 static const struct rhashtable_params mlx5e_tc_flow_ht_params = {
1279 .head_offset = offsetof(struct mlx5e_tc_flow, node),
1280 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
1281 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
1282 .automatic_shrinking = true,
1285 int mlx5e_tc_init(struct mlx5e_priv *priv)
1287 struct mlx5e_tc_table *tc = &priv->fs.tc;
1289 tc->ht_params = mlx5e_tc_flow_ht_params;
1290 return rhashtable_init(&tc->ht, &tc->ht_params);
1293 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
1295 struct mlx5e_tc_flow *flow = ptr;
1296 struct mlx5e_priv *priv = arg;
1298 mlx5e_tc_del_flow(priv, flow);
1302 void mlx5e_tc_cleanup(struct mlx5e_priv *priv)
1304 struct mlx5e_tc_table *tc = &priv->fs.tc;
1306 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, priv);
1308 if (!IS_ERR_OR_NULL(tc->t)) {
1309 mlx5_destroy_flow_table(tc->t);