2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <linux/mlx5/fs.h>
38 #include <linux/mlx5/device.h>
39 #include <linux/rhashtable.h>
40 #include <linux/refcount.h>
41 #include <linux/completion.h>
43 #include <net/ipv6_stubs.h>
44 #include <net/bareudp.h>
45 #include <net/bonding.h>
47 #include "en/tc/post_act.h"
49 #include "en/rep/tc.h"
50 #include "en/rep/neigh.h"
55 #include "en/tc_tun.h"
56 #include "en/mapping.h"
58 #include "en/mod_hdr.h"
59 #include "en/tc_tun_encap.h"
60 #include "en/tc/sample.h"
61 #include "en/tc/act/act.h"
62 #include "en/tc/post_meter.h"
63 #include "lib/devcom.h"
64 #include "lib/geneve.h"
65 #include "lib/fs_chains.h"
66 #include "diag/en_tc_tracepoint.h"
67 #include <asm/div64.h>
71 #define MLX5E_TC_TABLE_NUM_GROUPS 4
72 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
74 struct mlx5e_tc_table {
75 /* Protects the dynamic assignment of the t parameter
76 * which is the nic tc root table.
79 struct mlx5e_priv *priv;
80 struct mlx5_flow_table *t;
81 struct mlx5_flow_table *miss_t;
82 struct mlx5_fs_chains *chains;
83 struct mlx5e_post_act *post_act;
87 struct mod_hdr_tbl mod_hdr;
88 struct mutex hairpin_tbl_lock; /* protects hairpin_tbl */
89 DECLARE_HASHTABLE(hairpin_tbl, 8);
91 struct notifier_block netdevice_nb;
92 struct netdev_net_notifier netdevice_nn;
94 struct mlx5_tc_ct_priv *ct;
95 struct mapping_ctx *mapping;
98 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
100 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
105 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
110 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
112 .mlen = ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS,
113 .soffset = MLX5_BYTE_OFF(fte_match_param,
114 misc_parameters_2.metadata_reg_c_1),
116 [ZONE_TO_REG] = zone_to_reg_ct,
117 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
118 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
119 [MARK_TO_REG] = mark_to_reg_ct,
120 [LABELS_TO_REG] = labels_to_reg_ct,
121 [FTEID_TO_REG] = fteid_to_reg_ct,
122 /* For NIC rules we store the restore metadata directly
123 * into reg_b that is passed to SW since we don't
124 * jump between steering domains.
126 [NIC_CHAIN_TO_REG] = {
127 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
131 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
132 [PACKET_COLOR_TO_REG] = packet_color_to_reg,
135 struct mlx5e_tc_table *mlx5e_tc_table_alloc(void)
137 struct mlx5e_tc_table *tc;
139 tc = kvzalloc(sizeof(*tc), GFP_KERNEL);
140 return tc ? tc : ERR_PTR(-ENOMEM);
143 void mlx5e_tc_table_free(struct mlx5e_tc_table *tc)
148 struct mlx5_fs_chains *mlx5e_nic_chains(struct mlx5e_tc_table *tc)
153 /* To avoid false lock dependency warning set the tc_ht lock
154 * class different than the lock class of the ht being used when deleting
155 * last flow from a group and then deleting a group, we get into del_sw_flow_group()
156 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
157 * it's different than the ht->mutex here.
159 static struct lock_class_key tc_ht_lock_key;
161 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
162 static void free_flow_post_acts(struct mlx5e_tc_flow *flow);
165 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
166 enum mlx5e_tc_attr_to_reg type,
170 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
171 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
172 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
173 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
174 u32 max_mask = GENMASK(match_len - 1, 0);
175 __be32 curr_mask_be, curr_val_be;
176 u32 curr_mask, curr_val;
178 fmask = headers_c + soffset;
179 fval = headers_v + soffset;
181 memcpy(&curr_mask_be, fmask, 4);
182 memcpy(&curr_val_be, fval, 4);
184 curr_mask = be32_to_cpu(curr_mask_be);
185 curr_val = be32_to_cpu(curr_val_be);
187 //move to correct offset
188 WARN_ON(mask > max_mask);
191 max_mask <<= moffset;
194 curr_mask &= ~max_mask;
195 curr_val &= ~max_mask;
197 //add current to mask
201 //back to be32 and write
202 curr_mask_be = cpu_to_be32(curr_mask);
203 curr_val_be = cpu_to_be32(curr_val);
205 memcpy(fmask, &curr_mask_be, 4);
206 memcpy(fval, &curr_val_be, 4);
208 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
212 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
213 enum mlx5e_tc_attr_to_reg type,
217 void *headers_c = spec->match_criteria, *headers_v = spec->match_value, *fmask, *fval;
218 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
219 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
220 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
221 u32 max_mask = GENMASK(match_len - 1, 0);
222 __be32 curr_mask_be, curr_val_be;
223 u32 curr_mask, curr_val;
225 fmask = headers_c + soffset;
226 fval = headers_v + soffset;
228 memcpy(&curr_mask_be, fmask, 4);
229 memcpy(&curr_val_be, fval, 4);
231 curr_mask = be32_to_cpu(curr_mask_be);
232 curr_val = be32_to_cpu(curr_val_be);
234 *mask = (curr_mask >> moffset) & max_mask;
235 *val = (curr_val >> moffset) & max_mask;
239 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
240 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
241 enum mlx5_flow_namespace_type ns,
242 enum mlx5e_tc_attr_to_reg type,
245 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
246 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
247 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
251 modact = mlx5e_mod_hdr_alloc(mdev, ns, mod_hdr_acts);
253 return PTR_ERR(modact);
255 /* Firmware has 5bit length field and 0 means 32bits */
259 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
260 MLX5_SET(set_action_in, modact, field, mfield);
261 MLX5_SET(set_action_in, modact, offset, moffset);
262 MLX5_SET(set_action_in, modact, length, mlen);
263 MLX5_SET(set_action_in, modact, data, data);
264 err = mod_hdr_acts->num_actions;
265 mod_hdr_acts->num_actions++;
270 struct mlx5e_tc_int_port_priv *
271 mlx5e_get_int_port_priv(struct mlx5e_priv *priv)
273 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
274 struct mlx5_rep_uplink_priv *uplink_priv;
275 struct mlx5e_rep_priv *uplink_rpriv;
277 if (is_mdev_switchdev_mode(priv->mdev)) {
278 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
279 uplink_priv = &uplink_rpriv->uplink_priv;
281 return uplink_priv->int_port_priv;
287 struct mlx5e_flow_meters *
288 mlx5e_get_flow_meters(struct mlx5_core_dev *dev)
290 struct mlx5_eswitch *esw = dev->priv.eswitch;
291 struct mlx5_rep_uplink_priv *uplink_priv;
292 struct mlx5e_rep_priv *uplink_rpriv;
293 struct mlx5e_priv *priv;
295 if (is_mdev_switchdev_mode(dev)) {
296 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
297 uplink_priv = &uplink_rpriv->uplink_priv;
298 priv = netdev_priv(uplink_rpriv->netdev);
299 if (!uplink_priv->flow_meters)
300 uplink_priv->flow_meters =
301 mlx5e_flow_meters_init(priv,
302 MLX5_FLOW_NAMESPACE_FDB,
303 uplink_priv->post_act);
304 if (!IS_ERR(uplink_priv->flow_meters))
305 return uplink_priv->flow_meters;
311 static struct mlx5_tc_ct_priv *
312 get_ct_priv(struct mlx5e_priv *priv)
314 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
315 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
316 struct mlx5_rep_uplink_priv *uplink_priv;
317 struct mlx5e_rep_priv *uplink_rpriv;
319 if (is_mdev_switchdev_mode(priv->mdev)) {
320 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
321 uplink_priv = &uplink_rpriv->uplink_priv;
323 return uplink_priv->ct_priv;
329 static struct mlx5e_tc_psample *
330 get_sample_priv(struct mlx5e_priv *priv)
332 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
333 struct mlx5_rep_uplink_priv *uplink_priv;
334 struct mlx5e_rep_priv *uplink_rpriv;
336 if (is_mdev_switchdev_mode(priv->mdev)) {
337 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
338 uplink_priv = &uplink_rpriv->uplink_priv;
340 return uplink_priv->tc_psample;
346 static struct mlx5e_post_act *
347 get_post_action(struct mlx5e_priv *priv)
349 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
350 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
351 struct mlx5_rep_uplink_priv *uplink_priv;
352 struct mlx5e_rep_priv *uplink_rpriv;
354 if (is_mdev_switchdev_mode(priv->mdev)) {
355 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
356 uplink_priv = &uplink_rpriv->uplink_priv;
358 return uplink_priv->post_act;
364 struct mlx5_flow_handle *
365 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
366 struct mlx5_flow_spec *spec,
367 struct mlx5_flow_attr *attr)
369 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
371 if (is_mdev_switchdev_mode(priv->mdev))
372 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
374 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
378 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
379 struct mlx5_flow_handle *rule,
380 struct mlx5_flow_attr *attr)
382 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
384 if (is_mdev_switchdev_mode(priv->mdev)) {
385 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
389 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
393 is_flow_meter_action(struct mlx5_flow_attr *attr)
395 return ((attr->action & MLX5_FLOW_CONTEXT_ACTION_EXECUTE_ASO) &&
396 (attr->exe_aso_type == MLX5_EXE_ASO_FLOW_METER));
400 mlx5e_tc_add_flow_meter(struct mlx5e_priv *priv,
401 struct mlx5_flow_attr *attr)
403 struct mlx5e_post_act *post_act = get_post_action(priv);
404 struct mlx5e_post_meter_priv *post_meter;
405 enum mlx5_flow_namespace_type ns_type;
406 struct mlx5e_flow_meter_handle *meter;
408 meter = mlx5e_tc_meter_replace(priv->mdev, &attr->meter_attr.params);
410 mlx5_core_err(priv->mdev, "Failed to get flow meter\n");
411 return PTR_ERR(meter);
414 ns_type = mlx5e_tc_meter_get_namespace(meter->flow_meters);
415 post_meter = mlx5e_post_meter_init(priv, ns_type, post_act, meter->green_counter,
417 if (IS_ERR(post_meter)) {
418 mlx5_core_err(priv->mdev, "Failed to init post meter\n");
422 attr->meter_attr.meter = meter;
423 attr->meter_attr.post_meter = post_meter;
424 attr->dest_ft = mlx5e_post_meter_get_ft(post_meter);
425 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
430 mlx5e_tc_meter_put(meter);
431 return PTR_ERR(post_meter);
435 mlx5e_tc_del_flow_meter(struct mlx5_flow_attr *attr)
437 mlx5e_post_meter_cleanup(attr->meter_attr.post_meter);
438 mlx5e_tc_meter_put(attr->meter_attr.meter);
441 struct mlx5_flow_handle *
442 mlx5e_tc_rule_offload(struct mlx5e_priv *priv,
443 struct mlx5_flow_spec *spec,
444 struct mlx5_flow_attr *attr)
446 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
449 if (attr->flags & MLX5_ATTR_FLAG_CT) {
450 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts =
451 &attr->parse_attr->mod_hdr_acts;
453 return mlx5_tc_ct_flow_offload(get_ct_priv(priv),
458 if (!is_mdev_switchdev_mode(priv->mdev))
459 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
461 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE)
462 return mlx5e_tc_sample_offload(get_sample_priv(priv), spec, attr);
464 if (is_flow_meter_action(attr)) {
465 err = mlx5e_tc_add_flow_meter(priv, attr);
470 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
474 mlx5e_tc_rule_unoffload(struct mlx5e_priv *priv,
475 struct mlx5_flow_handle *rule,
476 struct mlx5_flow_attr *attr)
478 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
480 if (attr->flags & MLX5_ATTR_FLAG_CT) {
481 mlx5_tc_ct_delete_flow(get_ct_priv(priv), attr);
485 if (!is_mdev_switchdev_mode(priv->mdev)) {
486 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
490 if (attr->flags & MLX5_ATTR_FLAG_SAMPLE) {
491 mlx5e_tc_sample_unoffload(get_sample_priv(priv), rule, attr);
495 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
497 if (attr->meter_attr.meter)
498 mlx5e_tc_del_flow_meter(attr);
502 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
503 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
504 enum mlx5_flow_namespace_type ns,
505 enum mlx5e_tc_attr_to_reg type,
508 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
510 return ret < 0 ? ret : 0;
513 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
514 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
515 enum mlx5e_tc_attr_to_reg type,
516 int act_id, u32 data)
518 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
519 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
520 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
523 modact = mlx5e_mod_hdr_get_item(mod_hdr_acts, act_id);
525 /* Firmware has 5bit length field and 0 means 32bits */
529 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
530 MLX5_SET(set_action_in, modact, field, mfield);
531 MLX5_SET(set_action_in, modact, offset, moffset);
532 MLX5_SET(set_action_in, modact, length, mlen);
533 MLX5_SET(set_action_in, modact, data, data);
536 struct mlx5e_hairpin {
537 struct mlx5_hairpin *pair;
539 struct mlx5_core_dev *func_mdev;
540 struct mlx5e_priv *func_priv;
542 struct mlx5e_tir direct_tir;
545 struct mlx5e_rqt indir_rqt;
546 struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS];
547 struct mlx5_ttc_table *ttc;
550 struct mlx5e_hairpin_entry {
551 /* a node of a hash table which keeps all the hairpin entries */
552 struct hlist_node hairpin_hlist;
554 /* protects flows list */
555 spinlock_t flows_lock;
556 /* flows sharing the same hairpin */
557 struct list_head flows;
558 /* hpe's that were not fully initialized when dead peer update event
559 * function traversed them.
561 struct list_head dead_peer_wait_list;
565 struct mlx5e_hairpin *hp;
567 struct completion res_ready;
570 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
571 struct mlx5e_tc_flow *flow);
573 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
575 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
576 return ERR_PTR(-EINVAL);
580 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
582 if (refcount_dec_and_test(&flow->refcnt)) {
583 mlx5e_tc_del_flow(priv, flow);
584 kfree_rcu(flow, rcu_head);
588 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
590 return flow_flag_test(flow, ESWITCH);
593 bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
595 return flow_flag_test(flow, FT);
598 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
600 return flow_flag_test(flow, OFFLOADED);
603 int mlx5e_get_flow_namespace(struct mlx5e_tc_flow *flow)
605 return mlx5e_is_eswitch_flow(flow) ?
606 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
609 static struct mod_hdr_tbl *
610 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
612 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
613 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
615 return mlx5e_get_flow_namespace(flow) == MLX5_FLOW_NAMESPACE_FDB ?
616 &esw->offloads.mod_hdr :
620 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
621 struct mlx5e_tc_flow *flow,
622 struct mlx5e_tc_flow_parse_attr *parse_attr)
624 struct mlx5_modify_hdr *modify_hdr;
625 struct mlx5e_mod_hdr_handle *mh;
627 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
628 mlx5e_get_flow_namespace(flow),
629 &parse_attr->mod_hdr_acts);
633 modify_hdr = mlx5e_mod_hdr_get(mh);
634 flow->attr->modify_hdr = modify_hdr;
640 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
641 struct mlx5e_tc_flow *flow)
643 /* flow wasn't fully initialized */
647 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
653 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
655 struct mlx5_core_dev *mdev;
656 struct net_device *netdev;
657 struct mlx5e_priv *priv;
659 netdev = dev_get_by_index(net, ifindex);
661 return ERR_PTR(-ENODEV);
663 priv = netdev_priv(netdev);
667 /* Mirred tc action holds a refcount on the ifindex net_device (see
668 * net/sched/act_mirred.c:tcf_mirred_get_dev). So, it's okay to continue using mdev
669 * after dev_put(netdev), while we're in the context of adding a tc flow.
671 * The mdev pointer corresponds to the peer/out net_device of a hairpin. It is then
672 * stored in a hairpin object, which exists until all flows, that refer to it, get
675 * On the other hand, after a hairpin object has been created, the peer net_device may
676 * be removed/unbound while there are still some hairpin flows that are using it. This
677 * case is handled by mlx5e_tc_hairpin_update_dead_peer, which is hooked to
678 * NETDEV_UNREGISTER event of the peer net_device.
683 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
685 struct mlx5e_tir_builder *builder;
688 builder = mlx5e_tir_builder_alloc(false);
692 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
696 mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]);
697 err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false);
702 mlx5e_tir_builder_free(builder);
706 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
711 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
713 mlx5e_tir_destroy(&hp->direct_tir);
714 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
717 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
719 struct mlx5e_priv *priv = hp->func_priv;
720 struct mlx5_core_dev *mdev = priv->mdev;
721 struct mlx5e_rss_params_indir *indir;
724 indir = kvmalloc(sizeof(*indir), GFP_KERNEL);
728 mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels);
729 err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels,
730 mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc,
737 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
739 struct mlx5e_priv *priv = hp->func_priv;
740 struct mlx5e_rss_params_hash rss_hash;
741 enum mlx5_traffic_types tt, max_tt;
742 struct mlx5e_tir_builder *builder;
745 builder = mlx5e_tir_builder_alloc(false);
749 rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res);
751 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
752 struct mlx5e_rss_params_traffic_type rss_tt;
754 rss_tt = mlx5e_rss_get_default_tt_config(tt);
756 mlx5e_tir_builder_build_rqt(builder, hp->tdn,
757 mlx5e_rqt_get_rqtn(&hp->indir_rqt),
759 mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false);
761 err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false);
763 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
764 goto err_destroy_tirs;
767 mlx5e_tir_builder_clear(builder);
771 mlx5e_tir_builder_free(builder);
776 for (tt = 0; tt < max_tt; tt++)
777 mlx5e_tir_destroy(&hp->indir_tir[tt]);
782 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
786 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
787 mlx5e_tir_destroy(&hp->indir_tir[tt]);
790 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
791 struct ttc_params *ttc_params)
793 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
796 memset(ttc_params, 0, sizeof(*ttc_params));
798 ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev,
799 MLX5_FLOW_NAMESPACE_KERNEL);
800 for (tt = 0; tt < MLX5_NUM_TT; tt++) {
801 ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
802 ttc_params->dests[tt].tir_num =
804 mlx5e_tir_get_tirn(&hp->direct_tir) :
805 mlx5e_tir_get_tirn(&hp->indir_tir[tt]);
808 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
809 ft_attr->prio = MLX5E_TC_PRIO;
812 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
814 struct mlx5e_priv *priv = hp->func_priv;
815 struct ttc_params ttc_params;
816 struct mlx5_ttc_table *ttc;
819 err = mlx5e_hairpin_create_indirect_rqt(hp);
823 err = mlx5e_hairpin_create_indirect_tirs(hp);
825 goto err_create_indirect_tirs;
827 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
828 hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params);
829 if (IS_ERR(hp->ttc)) {
830 err = PTR_ERR(hp->ttc);
831 goto err_create_ttc_table;
834 ttc = mlx5e_fs_get_ttc(priv->fs, false);
835 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
837 mlx5_get_ttc_flow_table(ttc)->id);
841 err_create_ttc_table:
842 mlx5e_hairpin_destroy_indirect_tirs(hp);
843 err_create_indirect_tirs:
844 mlx5e_rqt_destroy(&hp->indir_rqt);
849 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
851 mlx5_destroy_ttc_table(hp->ttc);
852 mlx5e_hairpin_destroy_indirect_tirs(hp);
853 mlx5e_rqt_destroy(&hp->indir_rqt);
856 static struct mlx5e_hairpin *
857 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
860 struct mlx5_core_dev *func_mdev, *peer_mdev;
861 struct mlx5e_hairpin *hp;
862 struct mlx5_hairpin *pair;
865 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
867 return ERR_PTR(-ENOMEM);
869 func_mdev = priv->mdev;
870 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
871 if (IS_ERR(peer_mdev)) {
872 err = PTR_ERR(peer_mdev);
873 goto create_pair_err;
876 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
879 goto create_pair_err;
882 hp->func_mdev = func_mdev;
883 hp->func_priv = priv;
884 hp->num_channels = params->num_channels;
886 err = mlx5e_hairpin_create_transport(hp);
888 goto create_transport_err;
890 if (hp->num_channels > 1) {
891 err = mlx5e_hairpin_rss_init(hp);
899 mlx5e_hairpin_destroy_transport(hp);
900 create_transport_err:
901 mlx5_core_hairpin_destroy(hp->pair);
907 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
909 if (hp->num_channels > 1)
910 mlx5e_hairpin_rss_cleanup(hp);
911 mlx5e_hairpin_destroy_transport(hp);
912 mlx5_core_hairpin_destroy(hp->pair);
916 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
918 return (peer_vhca_id << 16 | prio);
921 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
922 u16 peer_vhca_id, u8 prio)
924 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
925 struct mlx5e_hairpin_entry *hpe;
926 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
928 hash_for_each_possible(tc->hairpin_tbl, hpe,
929 hairpin_hlist, hash_key) {
930 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
931 refcount_inc(&hpe->refcnt);
939 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
940 struct mlx5e_hairpin_entry *hpe)
942 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
943 /* no more hairpin flows for us, release the hairpin pair */
944 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &tc->hairpin_tbl_lock))
946 hash_del(&hpe->hairpin_hlist);
947 mutex_unlock(&tc->hairpin_tbl_lock);
949 if (!IS_ERR_OR_NULL(hpe->hp)) {
950 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
951 dev_name(hpe->hp->pair->peer_mdev->device));
953 mlx5e_hairpin_destroy(hpe->hp);
956 WARN_ON(!list_empty(&hpe->flows));
960 #define UNKNOWN_MATCH_PRIO 8
962 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
963 struct mlx5_flow_spec *spec, u8 *match_prio,
964 struct netlink_ext_ack *extack)
966 void *headers_c, *headers_v;
967 u8 prio_val, prio_mask = 0;
970 #ifdef CONFIG_MLX5_CORE_EN_DCB
971 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
972 NL_SET_ERR_MSG_MOD(extack,
973 "only PCP trust state supported for hairpin");
977 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
978 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
980 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
982 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
983 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
986 if (!vlan_present || !prio_mask) {
987 prio_val = UNKNOWN_MATCH_PRIO;
988 } else if (prio_mask != 0x7) {
989 NL_SET_ERR_MSG_MOD(extack,
990 "masked priority match not supported for hairpin");
994 *match_prio = prio_val;
998 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
999 struct mlx5e_tc_flow *flow,
1000 struct mlx5e_tc_flow_parse_attr *parse_attr,
1001 struct netlink_ext_ack *extack)
1003 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1004 int peer_ifindex = parse_attr->mirred_ifindex[0];
1005 struct mlx5_hairpin_params params;
1006 struct mlx5_core_dev *peer_mdev;
1007 struct mlx5e_hairpin_entry *hpe;
1008 struct mlx5e_hairpin *hp;
1015 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
1016 if (IS_ERR(peer_mdev)) {
1017 NL_SET_ERR_MSG_MOD(extack, "invalid ifindex of mirred device");
1018 return PTR_ERR(peer_mdev);
1021 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
1022 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
1026 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
1027 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
1032 mutex_lock(&tc->hairpin_tbl_lock);
1033 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
1035 mutex_unlock(&tc->hairpin_tbl_lock);
1036 wait_for_completion(&hpe->res_ready);
1038 if (IS_ERR(hpe->hp)) {
1045 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
1047 mutex_unlock(&tc->hairpin_tbl_lock);
1051 spin_lock_init(&hpe->flows_lock);
1052 INIT_LIST_HEAD(&hpe->flows);
1053 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
1054 hpe->peer_vhca_id = peer_id;
1055 hpe->prio = match_prio;
1056 refcount_set(&hpe->refcnt, 1);
1057 init_completion(&hpe->res_ready);
1059 hash_add(tc->hairpin_tbl, &hpe->hairpin_hlist,
1060 hash_hairpin_info(peer_id, match_prio));
1061 mutex_unlock(&tc->hairpin_tbl_lock);
1063 params.log_data_size = 16;
1064 params.log_data_size = min_t(u8, params.log_data_size,
1065 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
1066 params.log_data_size = max_t(u8, params.log_data_size,
1067 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
1069 params.log_num_packets = params.log_data_size -
1070 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
1071 params.log_num_packets = min_t(u8, params.log_num_packets,
1072 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
1074 params.q_counter = priv->q_counter;
1075 /* set hairpin pair per each 50Gbs share of the link */
1076 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
1077 link_speed = max_t(u32, link_speed, 50000);
1078 link_speed64 = link_speed;
1079 do_div(link_speed64, 50000);
1080 params.num_channels = link_speed64;
1082 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
1084 complete_all(&hpe->res_ready);
1090 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
1091 mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0],
1092 dev_name(hp->pair->peer_mdev->device),
1093 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
1096 if (hpe->hp->num_channels > 1) {
1097 flow_flag_set(flow, HAIRPIN_RSS);
1098 flow->attr->nic_attr->hairpin_ft =
1099 mlx5_get_ttc_flow_table(hpe->hp->ttc);
1101 flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir);
1105 spin_lock(&hpe->flows_lock);
1106 list_add(&flow->hairpin, &hpe->flows);
1107 spin_unlock(&hpe->flows_lock);
1112 mlx5e_hairpin_put(priv, hpe);
1116 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
1117 struct mlx5e_tc_flow *flow)
1119 /* flow wasn't fully initialized */
1123 spin_lock(&flow->hpe->flows_lock);
1124 list_del(&flow->hairpin);
1125 spin_unlock(&flow->hpe->flows_lock);
1127 mlx5e_hairpin_put(priv, flow->hpe);
1131 struct mlx5_flow_handle *
1132 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
1133 struct mlx5_flow_spec *spec,
1134 struct mlx5_flow_attr *attr)
1136 struct mlx5_flow_context *flow_context = &spec->flow_context;
1137 struct mlx5e_vlan_table *vlan = mlx5e_fs_get_vlan(priv->fs);
1138 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1139 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
1140 struct mlx5_flow_destination dest[2] = {};
1141 struct mlx5_fs_chains *nic_chains;
1142 struct mlx5_flow_act flow_act = {
1143 .action = attr->action,
1144 .flags = FLOW_ACT_NO_APPEND,
1146 struct mlx5_flow_handle *rule;
1147 struct mlx5_flow_table *ft;
1150 nic_chains = mlx5e_nic_chains(tc);
1151 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
1152 flow_context->flow_tag = nic_attr->flow_tag;
1154 if (attr->dest_ft) {
1155 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1156 dest[dest_ix].ft = attr->dest_ft;
1158 } else if (nic_attr->hairpin_ft) {
1159 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1160 dest[dest_ix].ft = nic_attr->hairpin_ft;
1162 } else if (nic_attr->hairpin_tirn) {
1163 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1164 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
1166 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1167 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
1168 if (attr->dest_chain) {
1169 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
1170 attr->dest_chain, 1,
1172 if (IS_ERR(dest[dest_ix].ft))
1173 return ERR_CAST(dest[dest_ix].ft);
1175 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(vlan);
1180 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
1181 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
1182 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
1184 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1185 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
1186 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
1190 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1191 flow_act.modify_hdr = attr->modify_hdr;
1193 mutex_lock(&tc->t_lock);
1194 if (IS_ERR_OR_NULL(tc->t)) {
1195 /* Create the root table here if doesn't exist yet */
1197 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
1199 if (IS_ERR(tc->t)) {
1200 mutex_unlock(&tc->t_lock);
1201 netdev_err(priv->netdev,
1202 "Failed to create tc offload table\n");
1203 rule = ERR_CAST(tc->t);
1207 mutex_unlock(&tc->t_lock);
1209 if (attr->chain || attr->prio)
1210 ft = mlx5_chains_get_table(nic_chains,
1211 attr->chain, attr->prio,
1217 rule = ERR_CAST(ft);
1221 if (attr->outer_match_level != MLX5_MATCH_NONE)
1222 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
1224 rule = mlx5_add_flow_rules(ft, spec,
1225 &flow_act, dest, dest_ix);
1232 if (attr->chain || attr->prio)
1233 mlx5_chains_put_table(nic_chains,
1234 attr->chain, attr->prio,
1237 if (attr->dest_chain)
1238 mlx5_chains_put_table(nic_chains,
1239 attr->dest_chain, 1,
1242 return ERR_CAST(rule);
1246 alloc_flow_attr_counter(struct mlx5_core_dev *counter_dev,
1247 struct mlx5_flow_attr *attr)
1250 struct mlx5_fc *counter;
1252 counter = mlx5_fc_create(counter_dev, true);
1253 if (IS_ERR(counter))
1254 return PTR_ERR(counter);
1256 attr->counter = counter;
1261 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1262 struct mlx5e_tc_flow *flow,
1263 struct netlink_ext_ack *extack)
1265 struct mlx5e_tc_flow_parse_attr *parse_attr;
1266 struct mlx5_flow_attr *attr = flow->attr;
1267 struct mlx5_core_dev *dev = priv->mdev;
1270 parse_attr = attr->parse_attr;
1272 if (flow_flag_test(flow, HAIRPIN)) {
1273 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1278 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1279 err = alloc_flow_attr_counter(dev, attr);
1284 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1285 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1286 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
1291 if (attr->flags & MLX5_ATTR_FLAG_CT)
1292 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), &parse_attr->spec,
1293 attr, &parse_attr->mod_hdr_acts);
1295 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1298 return PTR_ERR_OR_ZERO(flow->rule[0]);
1301 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1302 struct mlx5_flow_handle *rule,
1303 struct mlx5_flow_attr *attr)
1305 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1306 struct mlx5_fs_chains *nic_chains;
1308 nic_chains = mlx5e_nic_chains(tc);
1309 mlx5_del_flow_rules(rule);
1311 if (attr->chain || attr->prio)
1312 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1315 if (attr->dest_chain)
1316 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1320 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1321 struct mlx5e_tc_flow *flow)
1323 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
1324 struct mlx5_flow_attr *attr = flow->attr;
1326 flow_flag_clear(flow, OFFLOADED);
1328 if (attr->flags & MLX5_ATTR_FLAG_CT)
1329 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), attr);
1330 else if (!IS_ERR_OR_NULL(flow->rule[0]))
1331 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1333 /* Remove root table if no rules are left to avoid
1334 * extra steering hops.
1336 mutex_lock(&tc->t_lock);
1337 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1338 !IS_ERR_OR_NULL(tc->t)) {
1339 mlx5_chains_put_table(mlx5e_nic_chains(tc), 0, 1, MLX5E_TC_FT_LEVEL);
1342 mutex_unlock(&tc->t_lock);
1344 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1345 mlx5e_detach_mod_hdr(priv, flow);
1347 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1348 mlx5_fc_destroy(priv->mdev, attr->counter);
1350 if (flow_flag_test(flow, HAIRPIN))
1351 mlx5e_hairpin_flow_del(priv, flow);
1353 free_flow_post_acts(flow);
1355 kvfree(attr->parse_attr);
1359 struct mlx5_flow_handle *
1360 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1361 struct mlx5e_tc_flow *flow,
1362 struct mlx5_flow_spec *spec,
1363 struct mlx5_flow_attr *attr)
1365 struct mlx5_flow_handle *rule;
1367 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1368 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1370 rule = mlx5e_tc_rule_offload(flow->priv, spec, attr);
1375 if (attr->esw_attr->split_count) {
1376 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1377 if (IS_ERR(flow->rule[1]))
1384 mlx5e_tc_rule_unoffload(flow->priv, rule, attr);
1385 return flow->rule[1];
1388 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1389 struct mlx5e_tc_flow *flow,
1390 struct mlx5_flow_attr *attr)
1392 flow_flag_clear(flow, OFFLOADED);
1394 if (attr->flags & MLX5_ATTR_FLAG_SLOW_PATH)
1395 return mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1397 if (attr->esw_attr->split_count)
1398 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1400 mlx5e_tc_rule_unoffload(flow->priv, flow->rule[0], attr);
1403 struct mlx5_flow_handle *
1404 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1405 struct mlx5e_tc_flow *flow,
1406 struct mlx5_flow_spec *spec)
1408 struct mlx5e_tc_mod_hdr_acts mod_acts = {};
1409 struct mlx5e_mod_hdr_handle *mh = NULL;
1410 struct mlx5_flow_attr *slow_attr;
1411 struct mlx5_flow_handle *rule;
1412 bool fwd_and_modify_cap;
1413 u32 chain_mapping = 0;
1416 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1418 return ERR_PTR(-ENOMEM);
1420 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1421 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1422 slow_attr->esw_attr->split_count = 0;
1423 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1425 fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table);
1426 if (!fwd_and_modify_cap)
1429 err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping);
1433 err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB,
1434 CHAIN_TO_REG, chain_mapping);
1438 mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow),
1439 MLX5_FLOW_NAMESPACE_FDB, &mod_acts);
1445 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1446 slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh);
1449 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1451 err = PTR_ERR(rule);
1456 flow->chain_mapping = chain_mapping;
1457 flow_flag_set(flow, SLOW);
1459 mlx5e_mod_hdr_dealloc(&mod_acts);
1465 if (fwd_and_modify_cap)
1466 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh);
1469 if (fwd_and_modify_cap)
1470 mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping);
1472 mlx5e_mod_hdr_dealloc(&mod_acts);
1474 return ERR_PTR(err);
1477 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1478 struct mlx5e_tc_flow *flow)
1480 struct mlx5_flow_attr *slow_attr;
1482 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1484 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1488 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1489 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1490 slow_attr->esw_attr->split_count = 0;
1491 slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH;
1492 if (flow->slow_mh) {
1493 slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1494 slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh);
1496 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1497 if (flow->slow_mh) {
1498 mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh);
1499 mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping);
1500 flow->chain_mapping = 0;
1501 flow->slow_mh = NULL;
1503 flow_flag_clear(flow, SLOW);
1507 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1510 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1511 struct list_head *unready_flows)
1513 flow_flag_set(flow, NOT_READY);
1514 list_add_tail(&flow->unready, unready_flows);
1517 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1520 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1522 list_del(&flow->unready);
1523 flow_flag_clear(flow, NOT_READY);
1526 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1528 struct mlx5_rep_uplink_priv *uplink_priv;
1529 struct mlx5e_rep_priv *rpriv;
1530 struct mlx5_eswitch *esw;
1532 esw = flow->priv->mdev->priv.eswitch;
1533 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1534 uplink_priv = &rpriv->uplink_priv;
1536 mutex_lock(&uplink_priv->unready_flows_lock);
1537 unready_flow_add(flow, &uplink_priv->unready_flows);
1538 mutex_unlock(&uplink_priv->unready_flows_lock);
1541 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1543 struct mlx5_rep_uplink_priv *uplink_priv;
1544 struct mlx5e_rep_priv *rpriv;
1545 struct mlx5_eswitch *esw;
1547 esw = flow->priv->mdev->priv.eswitch;
1548 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1549 uplink_priv = &rpriv->uplink_priv;
1551 mutex_lock(&uplink_priv->unready_flows_lock);
1552 unready_flow_del(flow);
1553 mutex_unlock(&uplink_priv->unready_flows_lock);
1556 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1558 struct mlx5_core_dev *out_mdev, *route_mdev;
1559 struct mlx5e_priv *out_priv, *route_priv;
1561 out_priv = netdev_priv(out_dev);
1562 out_mdev = out_priv->mdev;
1563 route_priv = netdev_priv(route_dev);
1564 route_mdev = route_priv->mdev;
1566 if (out_mdev->coredev_type != MLX5_COREDEV_PF)
1569 if (route_mdev->coredev_type != MLX5_COREDEV_VF &&
1570 route_mdev->coredev_type != MLX5_COREDEV_SF)
1573 return mlx5e_same_hw_devs(out_priv, route_priv);
1576 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1578 struct mlx5e_priv *out_priv, *route_priv;
1579 struct mlx5_devcom *devcom = NULL;
1580 struct mlx5_core_dev *route_mdev;
1581 struct mlx5_eswitch *esw;
1585 out_priv = netdev_priv(out_dev);
1586 esw = out_priv->mdev->priv.eswitch;
1587 route_priv = netdev_priv(route_dev);
1588 route_mdev = route_priv->mdev;
1590 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1591 if (mlx5_lag_is_active(out_priv->mdev)) {
1592 /* In lag case we may get devices from different eswitch instances.
1593 * If we failed to get vport num, it means, mostly, that we on the wrong
1596 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1600 devcom = out_priv->mdev->priv.devcom;
1601 esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1606 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1608 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1612 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
1613 struct mlx5e_tc_flow *flow,
1614 struct mlx5_flow_attr *attr)
1616 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1617 struct mlx5_modify_hdr *mod_hdr;
1619 mod_hdr = mlx5_modify_header_alloc(priv->mdev,
1620 mlx5e_get_flow_namespace(flow),
1621 mod_hdr_acts->num_actions,
1622 mod_hdr_acts->actions);
1623 if (IS_ERR(mod_hdr))
1624 return PTR_ERR(mod_hdr);
1626 WARN_ON(attr->modify_hdr);
1627 attr->modify_hdr = mod_hdr;
1633 set_encap_dests(struct mlx5e_priv *priv,
1634 struct mlx5e_tc_flow *flow,
1635 struct mlx5_flow_attr *attr,
1636 struct netlink_ext_ack *extack,
1640 struct mlx5e_tc_flow_parse_attr *parse_attr;
1641 struct mlx5_esw_flow_attr *esw_attr;
1642 struct net_device *encap_dev = NULL;
1643 struct mlx5e_rep_priv *rpriv;
1644 struct mlx5e_priv *out_priv;
1648 if (!mlx5e_is_eswitch_flow(flow))
1651 parse_attr = attr->parse_attr;
1652 esw_attr = attr->esw_attr;
1654 *encap_valid = true;
1656 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1657 struct net_device *out_dev;
1660 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1663 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1664 out_dev = dev_get_by_index(dev_net(priv->netdev), mirred_ifindex);
1666 NL_SET_ERR_MSG_MOD(extack, "Requested mirred device not found");
1670 err = mlx5e_attach_encap(priv, flow, attr, out_dev, out_index,
1671 extack, &encap_dev, encap_valid);
1676 if (esw_attr->dests[out_index].flags &
1677 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1678 !esw_attr->dest_int_port)
1681 out_priv = netdev_priv(encap_dev);
1682 rpriv = out_priv->ppriv;
1683 esw_attr->dests[out_index].rep = rpriv->rep;
1684 esw_attr->dests[out_index].mdev = out_priv->mdev;
1687 if (*vf_tun && esw_attr->out_count > 1) {
1688 NL_SET_ERR_MSG_MOD(extack, "VF tunnel encap with mirroring is not supported");
1698 clean_encap_dests(struct mlx5e_priv *priv,
1699 struct mlx5e_tc_flow *flow,
1700 struct mlx5_flow_attr *attr,
1703 struct mlx5_esw_flow_attr *esw_attr;
1706 if (!mlx5e_is_eswitch_flow(flow))
1709 esw_attr = attr->esw_attr;
1712 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1713 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1716 if (esw_attr->dests[out_index].flags &
1717 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE &&
1718 !esw_attr->dest_int_port)
1721 mlx5e_detach_encap(priv, flow, attr, out_index);
1722 kfree(attr->parse_attr->tun_info[out_index]);
1727 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1728 struct mlx5e_tc_flow *flow,
1729 struct netlink_ext_ack *extack)
1731 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1732 struct mlx5e_tc_flow_parse_attr *parse_attr;
1733 struct mlx5_flow_attr *attr = flow->attr;
1734 struct mlx5_esw_flow_attr *esw_attr;
1735 bool vf_tun, encap_valid;
1736 u32 max_prio, max_chain;
1739 parse_attr = attr->parse_attr;
1740 esw_attr = attr->esw_attr;
1742 /* We check chain range only for tc flows.
1743 * For ft flows, we checked attr->chain was originally 0 and set it to
1744 * FDB_FT_CHAIN which is outside tc range.
1745 * See mlx5e_rep_setup_ft_cb().
1747 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1748 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1749 NL_SET_ERR_MSG_MOD(extack,
1750 "Requested chain is out of supported range");
1755 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1756 if (attr->prio > max_prio) {
1757 NL_SET_ERR_MSG_MOD(extack,
1758 "Requested priority is out of supported range");
1763 if (flow_flag_test(flow, TUN_RX)) {
1764 err = mlx5e_attach_decap_route(priv, flow);
1768 if (!attr->chain && esw_attr->int_port &&
1769 attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
1770 /* If decap route device is internal port, change the
1771 * source vport value in reg_c0 back to uplink just in
1772 * case the rule performs goto chain > 0. If we have a miss
1773 * on chain > 0 we want the metadata regs to hold the
1774 * chain id so SW will resume handling of this packet
1775 * from the proper chain.
1777 u32 metadata = mlx5_eswitch_get_vport_metadata_for_set(esw,
1778 esw_attr->in_rep->vport);
1780 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
1781 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
1786 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1790 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1791 err = mlx5e_attach_decap(priv, flow, extack);
1796 if (netif_is_ovs_master(parse_attr->filter_dev)) {
1797 struct mlx5e_tc_int_port *int_port;
1800 NL_SET_ERR_MSG_MOD(extack,
1801 "Internal port rule is only supported on chain 0");
1806 if (attr->dest_chain) {
1807 NL_SET_ERR_MSG_MOD(extack,
1808 "Internal port rule offload doesn't support goto action");
1813 int_port = mlx5e_tc_int_port_get(mlx5e_get_int_port_priv(priv),
1814 parse_attr->filter_dev->ifindex,
1815 flow_flag_test(flow, EGRESS) ?
1816 MLX5E_TC_INT_PORT_EGRESS :
1817 MLX5E_TC_INT_PORT_INGRESS);
1818 if (IS_ERR(int_port)) {
1819 err = PTR_ERR(int_port);
1823 esw_attr->int_port = int_port;
1826 err = set_encap_dests(priv, flow, attr, extack, &encap_valid, &vf_tun);
1830 err = mlx5_eswitch_add_vlan_action(esw, attr);
1834 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1836 err = mlx5e_tc_add_flow_mod_hdr(priv, flow, attr);
1840 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1846 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1847 err = alloc_flow_attr_counter(esw_attr->counter_dev, attr);
1852 /* we get here if one of the following takes place:
1853 * (1) there's no error
1854 * (2) there's an encap action and we don't have valid neigh
1856 if (!encap_valid || flow_flag_test(flow, SLOW))
1857 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1859 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1861 if (IS_ERR(flow->rule[0])) {
1862 err = PTR_ERR(flow->rule[0]);
1865 flow_flag_set(flow, OFFLOADED);
1870 flow_flag_set(flow, FAILED);
1874 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1876 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1877 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1880 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1882 geneve_tlv_option_0_data);
1884 return !!geneve_tlv_opt_0_data;
1887 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1888 struct mlx5e_tc_flow *flow)
1890 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1891 struct mlx5_flow_attr *attr = flow->attr;
1892 struct mlx5_esw_flow_attr *esw_attr;
1895 esw_attr = attr->esw_attr;
1896 mlx5e_put_flow_tunnel_id(flow);
1898 if (flow_flag_test(flow, NOT_READY))
1899 remove_unready_flow(flow);
1901 if (mlx5e_is_offloaded_flow(flow)) {
1902 if (flow_flag_test(flow, SLOW))
1903 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1905 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1907 complete_all(&flow->del_hw_done);
1909 if (mlx5_flow_has_geneve_opt(flow))
1910 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1912 mlx5_eswitch_del_vlan_action(esw, attr);
1914 if (flow->decap_route)
1915 mlx5e_detach_decap_route(priv, flow);
1917 clean_encap_dests(priv, flow, attr, &vf_tun);
1919 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1921 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1922 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
1923 if (vf_tun && attr->modify_hdr)
1924 mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
1926 mlx5e_detach_mod_hdr(priv, flow);
1929 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1930 mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
1932 if (esw_attr->int_port)
1933 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->int_port);
1935 if (esw_attr->dest_int_port)
1936 mlx5e_tc_int_port_put(mlx5e_get_int_port_priv(priv), esw_attr->dest_int_port);
1938 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1939 mlx5e_detach_decap(priv, flow);
1941 free_flow_post_acts(flow);
1943 if (flow->attr->lag.count)
1944 mlx5_lag_del_mpesw_rule(esw->dev);
1946 kvfree(attr->esw_attr->rx_tun_attr);
1947 kvfree(attr->parse_attr);
1951 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1953 struct mlx5_flow_attr *attr;
1955 attr = list_first_entry(&flow->attrs, struct mlx5_flow_attr, list);
1956 return attr->counter;
1959 /* Iterate over tmp_list of flows attached to flow_list head. */
1960 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1962 struct mlx5e_tc_flow *flow, *tmp;
1964 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1965 mlx5e_flow_put(priv, flow);
1968 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1970 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1972 if (!flow_flag_test(flow, ESWITCH) ||
1973 !flow_flag_test(flow, DUP))
1976 mutex_lock(&esw->offloads.peer_mutex);
1977 list_del(&flow->peer);
1978 mutex_unlock(&esw->offloads.peer_mutex);
1980 flow_flag_clear(flow, DUP);
1982 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1983 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1984 kfree(flow->peer_flow);
1987 flow->peer_flow = NULL;
1990 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1992 struct mlx5_core_dev *dev = flow->priv->mdev;
1993 struct mlx5_devcom *devcom = dev->priv.devcom;
1994 struct mlx5_eswitch *peer_esw;
1996 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2000 __mlx5e_tc_del_fdb_peer_flow(flow);
2001 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2004 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
2005 struct mlx5e_tc_flow *flow)
2007 if (mlx5e_is_eswitch_flow(flow)) {
2008 mlx5e_tc_del_fdb_peer_flow(flow);
2009 mlx5e_tc_del_fdb_flow(priv, flow);
2011 mlx5e_tc_del_nic_flow(priv, flow);
2015 static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f)
2017 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2018 struct flow_action *flow_action = &rule->action;
2019 const struct flow_action_entry *act;
2025 flow_action_for_each(i, act, flow_action) {
2027 case FLOW_ACTION_GOTO:
2029 case FLOW_ACTION_SAMPLE:
2040 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
2041 struct flow_dissector_key_enc_opts *opts,
2042 struct netlink_ext_ack *extack,
2045 struct geneve_opt *opt;
2050 while (opts->len > off) {
2051 opt = (struct geneve_opt *)&opts->data[off];
2053 if (!(*dont_care) || opt->opt_class || opt->type ||
2054 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
2057 if (opt->opt_class != htons(U16_MAX) ||
2058 opt->type != U8_MAX) {
2059 NL_SET_ERR_MSG_MOD(extack,
2060 "Partial match of tunnel options in chain > 0 isn't supported");
2061 netdev_warn(priv->netdev,
2062 "Partial match of tunnel options in chain > 0 isn't supported");
2067 off += sizeof(struct geneve_opt) + opt->length * 4;
2073 #define COPY_DISSECTOR(rule, diss_key, dst)\
2075 struct flow_rule *__rule = (rule);\
2076 typeof(dst) __dst = dst;\
2079 skb_flow_dissector_target(__rule->match.dissector,\
2081 __rule->match.key),\
2085 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
2086 struct mlx5e_tc_flow *flow,
2087 struct flow_cls_offload *f,
2088 struct net_device *filter_dev)
2090 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2091 struct netlink_ext_ack *extack = f->common.extack;
2092 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
2093 struct flow_match_enc_opts enc_opts_match;
2094 struct tunnel_match_enc_opts tun_enc_opts;
2095 struct mlx5_rep_uplink_priv *uplink_priv;
2096 struct mlx5_flow_attr *attr = flow->attr;
2097 struct mlx5e_rep_priv *uplink_rpriv;
2098 struct tunnel_match_key tunnel_key;
2099 bool enc_opts_is_dont_care = true;
2100 u32 tun_id, enc_opts_id = 0;
2101 struct mlx5_eswitch *esw;
2105 esw = priv->mdev->priv.eswitch;
2106 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2107 uplink_priv = &uplink_rpriv->uplink_priv;
2109 memset(&tunnel_key, 0, sizeof(tunnel_key));
2110 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
2111 &tunnel_key.enc_control);
2112 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
2113 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
2114 &tunnel_key.enc_ipv4);
2116 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
2117 &tunnel_key.enc_ipv6);
2118 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
2119 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
2120 &tunnel_key.enc_tp);
2121 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
2122 &tunnel_key.enc_key_id);
2123 tunnel_key.filter_ifindex = filter_dev->ifindex;
2125 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
2129 flow_rule_match_enc_opts(rule, &enc_opts_match);
2130 err = enc_opts_is_dont_care_or_full_match(priv,
2131 enc_opts_match.mask,
2133 &enc_opts_is_dont_care);
2137 if (!enc_opts_is_dont_care) {
2138 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
2139 memcpy(&tun_enc_opts.key, enc_opts_match.key,
2140 sizeof(*enc_opts_match.key));
2141 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
2142 sizeof(*enc_opts_match.mask));
2144 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
2145 &tun_enc_opts, &enc_opts_id);
2150 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
2151 mask = enc_opts_id ? TUNNEL_ID_MASK :
2152 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
2155 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
2156 TUNNEL_TO_REG, value, mask);
2158 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
2159 err = mlx5e_tc_match_to_reg_set(priv->mdev,
2160 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
2161 TUNNEL_TO_REG, value);
2165 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2168 flow->attr->tunnel_id = value;
2173 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2176 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2180 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
2182 u32 enc_opts_id = flow->attr->tunnel_id & ENC_OPTS_BITS_MASK;
2183 u32 tun_id = flow->attr->tunnel_id >> ENC_OPTS_BITS;
2184 struct mlx5_rep_uplink_priv *uplink_priv;
2185 struct mlx5e_rep_priv *uplink_rpriv;
2186 struct mlx5_eswitch *esw;
2188 esw = flow->priv->mdev->priv.eswitch;
2189 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2190 uplink_priv = &uplink_rpriv->uplink_priv;
2193 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
2195 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
2199 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
2200 struct flow_match_basic *match, bool outer,
2201 void *headers_c, void *headers_v)
2203 bool ip_version_cap;
2205 ip_version_cap = outer ?
2206 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2207 ft_field_support.outer_ip_version) :
2208 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
2209 ft_field_support.inner_ip_version);
2211 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
2212 (match->key->n_proto == htons(ETH_P_IP) ||
2213 match->key->n_proto == htons(ETH_P_IPV6))) {
2214 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
2215 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
2216 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
2218 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
2219 ntohs(match->mask->n_proto));
2220 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
2221 ntohs(match->key->n_proto));
2225 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
2232 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2234 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2236 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
2237 /* Return ip_version converted from ethertype anyway */
2239 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2240 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
2242 else if (ethertype == ETH_P_IPV6)
2248 /* Tunnel device follows RFC 6040, see include/net/inet_ecn.h.
2249 * And changes inner ip_ecn depending on inner and outer ip_ecn as follows:
2250 * +---------+----------------------------------------+
2251 * |Arriving | Arriving Outer Header |
2252 * | Inner +---------+---------+---------+----------+
2253 * | Header | Not-ECT | ECT(0) | ECT(1) | CE |
2254 * +---------+---------+---------+---------+----------+
2255 * | Not-ECT | Not-ECT | Not-ECT | Not-ECT | <drop> |
2256 * | ECT(0) | ECT(0) | ECT(0) | ECT(1) | CE* |
2257 * | ECT(1) | ECT(1) | ECT(1) | ECT(1)* | CE* |
2258 * | CE | CE | CE | CE | CE |
2259 * +---------+---------+---------+---------+----------+
2261 * Tc matches on inner after decapsulation on tunnel device, but hw offload matches
2262 * the inner ip_ecn value before hardware decap action.
2264 * Cells marked are changed from original inner packet ip_ecn value during decap, and
2265 * so matching those values on inner ip_ecn before decap will fail.
2267 * The following helper allows offload when inner ip_ecn won't be changed by outer ip_ecn,
2268 * except for the outer ip_ecn = CE, where in all cases inner ip_ecn will be changed to CE,
2269 * and such we can drop the inner ip_ecn=CE match.
2272 static int mlx5e_tc_verify_tunnel_ecn(struct mlx5e_priv *priv,
2273 struct flow_cls_offload *f,
2274 bool *match_inner_ecn)
2276 u8 outer_ecn_mask = 0, outer_ecn_key = 0, inner_ecn_mask = 0, inner_ecn_key = 0;
2277 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2278 struct netlink_ext_ack *extack = f->common.extack;
2279 struct flow_match_ip match;
2281 *match_inner_ecn = true;
2283 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
2284 flow_rule_match_enc_ip(rule, &match);
2285 outer_ecn_key = match.key->tos & INET_ECN_MASK;
2286 outer_ecn_mask = match.mask->tos & INET_ECN_MASK;
2289 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2290 flow_rule_match_ip(rule, &match);
2291 inner_ecn_key = match.key->tos & INET_ECN_MASK;
2292 inner_ecn_mask = match.mask->tos & INET_ECN_MASK;
2295 if (outer_ecn_mask != 0 && outer_ecn_mask != INET_ECN_MASK) {
2296 NL_SET_ERR_MSG_MOD(extack, "Partial match on enc_tos ecn bits isn't supported");
2297 netdev_warn(priv->netdev, "Partial match on enc_tos ecn bits isn't supported");
2301 if (!outer_ecn_mask) {
2302 if (!inner_ecn_mask)
2305 NL_SET_ERR_MSG_MOD(extack,
2306 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2307 netdev_warn(priv->netdev,
2308 "Matching on tos ecn bits without also matching enc_tos ecn bits isn't supported");
2312 if (inner_ecn_mask && inner_ecn_mask != INET_ECN_MASK) {
2313 NL_SET_ERR_MSG_MOD(extack,
2314 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2315 netdev_warn(priv->netdev,
2316 "Partial match on tos ecn bits with match on enc_tos ecn bits isn't supported");
2320 if (!inner_ecn_mask)
2323 /* Both inner and outer have full mask on ecn */
2325 if (outer_ecn_key == INET_ECN_ECT_1) {
2326 /* inner ecn might change by DECAP action */
2328 NL_SET_ERR_MSG_MOD(extack, "Match on enc_tos ecn = ECT(1) isn't supported");
2329 netdev_warn(priv->netdev, "Match on enc_tos ecn = ECT(1) isn't supported");
2333 if (outer_ecn_key != INET_ECN_CE)
2336 if (inner_ecn_key != INET_ECN_CE) {
2337 /* Can't happen in software, as packet ecn will be changed to CE after decap */
2338 NL_SET_ERR_MSG_MOD(extack,
2339 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2340 netdev_warn(priv->netdev,
2341 "Match on tos enc_tos ecn = CE while match on tos ecn != CE isn't supported");
2345 /* outer ecn = CE, inner ecn = CE, as decap will change inner ecn to CE in anycase,
2346 * drop match on inner ecn
2348 *match_inner_ecn = false;
2353 static int parse_tunnel_attr(struct mlx5e_priv *priv,
2354 struct mlx5e_tc_flow *flow,
2355 struct mlx5_flow_spec *spec,
2356 struct flow_cls_offload *f,
2357 struct net_device *filter_dev,
2361 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
2362 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2363 struct netlink_ext_ack *extack = f->common.extack;
2364 bool needs_mapping, sets_mapping;
2367 if (!mlx5e_is_eswitch_flow(flow)) {
2368 NL_SET_ERR_MSG_MOD(extack, "Match on tunnel is not supported");
2372 needs_mapping = !!flow->attr->chain;
2373 sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f);
2374 *match_inner = !needs_mapping;
2376 if ((needs_mapping || sets_mapping) &&
2377 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
2378 NL_SET_ERR_MSG_MOD(extack,
2379 "Chains on tunnel devices isn't supported without register loopback support");
2380 netdev_warn(priv->netdev,
2381 "Chains on tunnel devices isn't supported without register loopback support");
2385 if (!flow->attr->chain) {
2386 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
2389 NL_SET_ERR_MSG_MOD(extack,
2390 "Failed to parse tunnel attributes");
2391 netdev_warn(priv->netdev,
2392 "Failed to parse tunnel attributes");
2396 /* With mpls over udp we decapsulate using packet reformat
2399 if (!netif_is_bareudp(filter_dev))
2400 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2401 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
2404 } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
2405 struct mlx5_flow_spec *tmp_spec;
2407 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
2409 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
2410 netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
2413 memcpy(tmp_spec, spec, sizeof(*tmp_spec));
2415 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
2418 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
2419 netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
2422 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
2428 if (!needs_mapping && !sets_mapping)
2431 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
2434 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
2436 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2440 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
2442 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2446 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
2448 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2452 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
2454 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
2458 void *mlx5e_get_match_headers_value(u32 flags, struct mlx5_flow_spec *spec)
2460 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2461 get_match_inner_headers_value(spec) :
2462 get_match_outer_headers_value(spec);
2465 void *mlx5e_get_match_headers_criteria(u32 flags, struct mlx5_flow_spec *spec)
2467 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
2468 get_match_inner_headers_criteria(spec) :
2469 get_match_outer_headers_criteria(spec);
2472 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
2473 struct flow_cls_offload *f)
2475 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2476 struct netlink_ext_ack *extack = f->common.extack;
2477 struct net_device *ingress_dev;
2478 struct flow_match_meta match;
2480 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
2483 flow_rule_match_meta(rule, &match);
2484 if (!match.mask->ingress_ifindex)
2487 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
2488 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
2492 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
2493 match.key->ingress_ifindex);
2495 NL_SET_ERR_MSG_MOD(extack,
2496 "Can't find the ingress port to match on");
2500 if (ingress_dev != filter_dev) {
2501 NL_SET_ERR_MSG_MOD(extack,
2502 "Can't match on the ingress filter port");
2509 static bool skip_key_basic(struct net_device *filter_dev,
2510 struct flow_cls_offload *f)
2512 /* When doing mpls over udp decap, the user needs to provide
2513 * MPLS_UC as the protocol in order to be able to match on mpls
2514 * label fields. However, the actual ethertype is IP so we want to
2515 * avoid matching on this, otherwise we'll fail the match.
2517 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
2523 static int __parse_cls_flower(struct mlx5e_priv *priv,
2524 struct mlx5e_tc_flow *flow,
2525 struct mlx5_flow_spec *spec,
2526 struct flow_cls_offload *f,
2527 struct net_device *filter_dev,
2528 u8 *inner_match_level, u8 *outer_match_level)
2530 struct netlink_ext_ack *extack = f->common.extack;
2531 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2533 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2535 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2537 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2539 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
2541 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2543 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2544 struct flow_dissector *dissector = rule->match.dissector;
2545 enum fs_flow_table_type fs_type;
2546 bool match_inner_ecn = true;
2552 fs_type = mlx5e_is_eswitch_flow(flow) ? FS_FT_FDB : FS_FT_NIC_RX;
2553 match_level = outer_match_level;
2555 if (dissector->used_keys &
2556 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2557 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2558 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2559 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2560 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2561 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2562 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2563 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2564 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2565 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2566 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2567 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2568 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2569 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2570 BIT(FLOW_DISSECTOR_KEY_TCP) |
2571 BIT(FLOW_DISSECTOR_KEY_IP) |
2572 BIT(FLOW_DISSECTOR_KEY_CT) |
2573 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2574 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2575 BIT(FLOW_DISSECTOR_KEY_ICMP) |
2576 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2577 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2578 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2579 dissector->used_keys);
2583 if (mlx5e_get_tc_tun(filter_dev)) {
2584 bool match_inner = false;
2586 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2587 outer_match_level, &match_inner);
2592 /* header pointers should point to the inner headers
2593 * if the packet was decapsulated already.
2594 * outer headers are set by parse_tunnel_attr.
2596 match_level = inner_match_level;
2597 headers_c = get_match_inner_headers_criteria(spec);
2598 headers_v = get_match_inner_headers_value(spec);
2601 err = mlx5e_tc_verify_tunnel_ecn(priv, f, &match_inner_ecn);
2606 err = mlx5e_flower_parse_meta(filter_dev, f);
2610 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2611 !skip_key_basic(filter_dev, f)) {
2612 struct flow_match_basic match;
2614 flow_rule_match_basic(rule, &match);
2615 mlx5e_tc_set_ethertype(priv->mdev, &match,
2616 match_level == outer_match_level,
2617 headers_c, headers_v);
2619 if (match.mask->n_proto)
2620 *match_level = MLX5_MATCH_L2;
2622 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2623 is_vlan_dev(filter_dev)) {
2624 struct flow_dissector_key_vlan filter_dev_mask;
2625 struct flow_dissector_key_vlan filter_dev_key;
2626 struct flow_match_vlan match;
2628 if (is_vlan_dev(filter_dev)) {
2629 match.key = &filter_dev_key;
2630 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2631 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2632 match.key->vlan_priority = 0;
2633 match.mask = &filter_dev_mask;
2634 memset(match.mask, 0xff, sizeof(*match.mask));
2635 match.mask->vlan_priority = 0;
2637 flow_rule_match_vlan(rule, &match);
2639 if (match.mask->vlan_id ||
2640 match.mask->vlan_priority ||
2641 match.mask->vlan_tpid) {
2642 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2643 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2645 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2648 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2650 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2654 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2655 match.mask->vlan_id);
2656 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2657 match.key->vlan_id);
2659 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2660 match.mask->vlan_priority);
2661 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2662 match.key->vlan_priority);
2664 *match_level = MLX5_MATCH_L2;
2666 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN) &&
2667 match.mask->vlan_eth_type &&
2668 MLX5_CAP_FLOWTABLE_TYPE(priv->mdev,
2669 ft_field_support.outer_second_vid,
2671 MLX5_SET(fte_match_set_misc, misc_c,
2672 outer_second_cvlan_tag, 1);
2673 spec->match_criteria_enable |=
2674 MLX5_MATCH_MISC_PARAMETERS;
2677 } else if (*match_level != MLX5_MATCH_NONE) {
2678 /* cvlan_tag enabled in match criteria and
2679 * disabled in match value means both S & C tags
2680 * don't exist (untagged of both)
2682 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2683 *match_level = MLX5_MATCH_L2;
2686 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2687 struct flow_match_vlan match;
2689 flow_rule_match_cvlan(rule, &match);
2690 if (match.mask->vlan_id ||
2691 match.mask->vlan_priority ||
2692 match.mask->vlan_tpid) {
2693 if (!MLX5_CAP_FLOWTABLE_TYPE(priv->mdev, ft_field_support.outer_second_vid,
2695 NL_SET_ERR_MSG_MOD(extack,
2696 "Matching on CVLAN is not supported");
2700 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2701 MLX5_SET(fte_match_set_misc, misc_c,
2702 outer_second_svlan_tag, 1);
2703 MLX5_SET(fte_match_set_misc, misc_v,
2704 outer_second_svlan_tag, 1);
2706 MLX5_SET(fte_match_set_misc, misc_c,
2707 outer_second_cvlan_tag, 1);
2708 MLX5_SET(fte_match_set_misc, misc_v,
2709 outer_second_cvlan_tag, 1);
2712 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2713 match.mask->vlan_id);
2714 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2715 match.key->vlan_id);
2716 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2717 match.mask->vlan_priority);
2718 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2719 match.key->vlan_priority);
2721 *match_level = MLX5_MATCH_L2;
2722 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2726 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2727 struct flow_match_eth_addrs match;
2729 flow_rule_match_eth_addrs(rule, &match);
2730 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2733 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2737 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2740 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2744 if (!is_zero_ether_addr(match.mask->src) ||
2745 !is_zero_ether_addr(match.mask->dst))
2746 *match_level = MLX5_MATCH_L2;
2749 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2750 struct flow_match_control match;
2752 flow_rule_match_control(rule, &match);
2753 addr_type = match.key->addr_type;
2755 /* the HW doesn't support frag first/later */
2756 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
2757 NL_SET_ERR_MSG_MOD(extack, "Match on frag first/later is not supported");
2761 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2762 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2763 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2764 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2766 /* the HW doesn't need L3 inline to match on frag=no */
2767 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2768 *match_level = MLX5_MATCH_L2;
2769 /* *** L2 attributes parsing up to here *** */
2771 *match_level = MLX5_MATCH_L3;
2775 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2776 struct flow_match_basic match;
2778 flow_rule_match_basic(rule, &match);
2779 ip_proto = match.key->ip_proto;
2781 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2782 match.mask->ip_proto);
2783 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2784 match.key->ip_proto);
2786 if (match.mask->ip_proto)
2787 *match_level = MLX5_MATCH_L3;
2790 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2791 struct flow_match_ipv4_addrs match;
2793 flow_rule_match_ipv4_addrs(rule, &match);
2794 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2795 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2796 &match.mask->src, sizeof(match.mask->src));
2797 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2798 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2799 &match.key->src, sizeof(match.key->src));
2800 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2801 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2802 &match.mask->dst, sizeof(match.mask->dst));
2803 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2804 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2805 &match.key->dst, sizeof(match.key->dst));
2807 if (match.mask->src || match.mask->dst)
2808 *match_level = MLX5_MATCH_L3;
2811 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2812 struct flow_match_ipv6_addrs match;
2814 flow_rule_match_ipv6_addrs(rule, &match);
2815 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2816 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2817 &match.mask->src, sizeof(match.mask->src));
2818 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2819 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2820 &match.key->src, sizeof(match.key->src));
2822 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2823 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2824 &match.mask->dst, sizeof(match.mask->dst));
2825 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2826 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2827 &match.key->dst, sizeof(match.key->dst));
2829 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2830 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2831 *match_level = MLX5_MATCH_L3;
2834 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2835 struct flow_match_ip match;
2837 flow_rule_match_ip(rule, &match);
2838 if (match_inner_ecn) {
2839 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2840 match.mask->tos & 0x3);
2841 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2842 match.key->tos & 0x3);
2845 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2846 match.mask->tos >> 2);
2847 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2848 match.key->tos >> 2);
2850 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2852 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2855 if (match.mask->ttl &&
2856 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2857 ft_field_support.outer_ipv4_ttl)) {
2858 NL_SET_ERR_MSG_MOD(extack,
2859 "Matching on TTL is not supported");
2863 if (match.mask->tos || match.mask->ttl)
2864 *match_level = MLX5_MATCH_L3;
2867 /* *** L3 attributes parsing up to here *** */
2869 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2870 struct flow_match_ports match;
2872 flow_rule_match_ports(rule, &match);
2875 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2876 tcp_sport, ntohs(match.mask->src));
2877 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2878 tcp_sport, ntohs(match.key->src));
2880 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2881 tcp_dport, ntohs(match.mask->dst));
2882 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2883 tcp_dport, ntohs(match.key->dst));
2887 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2888 udp_sport, ntohs(match.mask->src));
2889 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2890 udp_sport, ntohs(match.key->src));
2892 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2893 udp_dport, ntohs(match.mask->dst));
2894 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2895 udp_dport, ntohs(match.key->dst));
2898 NL_SET_ERR_MSG_MOD(extack,
2899 "Only UDP and TCP transports are supported for L4 matching");
2900 netdev_err(priv->netdev,
2901 "Only UDP and TCP transport are supported\n");
2905 if (match.mask->src || match.mask->dst)
2906 *match_level = MLX5_MATCH_L4;
2909 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2910 struct flow_match_tcp match;
2912 flow_rule_match_tcp(rule, &match);
2913 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2914 ntohs(match.mask->flags));
2915 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2916 ntohs(match.key->flags));
2918 if (match.mask->flags)
2919 *match_level = MLX5_MATCH_L4;
2921 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
2922 struct flow_match_icmp match;
2924 flow_rule_match_icmp(rule, &match);
2927 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2928 MLX5_FLEX_PROTO_ICMP)) {
2929 NL_SET_ERR_MSG_MOD(extack,
2930 "Match on Flex protocols for ICMP is not supported");
2933 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
2935 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
2937 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
2939 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
2942 case IPPROTO_ICMPV6:
2943 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2944 MLX5_FLEX_PROTO_ICMPV6)) {
2945 NL_SET_ERR_MSG_MOD(extack,
2946 "Match on Flex protocols for ICMPV6 is not supported");
2949 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
2951 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
2953 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
2955 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
2959 NL_SET_ERR_MSG_MOD(extack,
2960 "Code and type matching only with ICMP and ICMPv6");
2961 netdev_err(priv->netdev,
2962 "Code and type matching only with ICMP and ICMPv6\n");
2965 if (match.mask->code || match.mask->type) {
2966 *match_level = MLX5_MATCH_L4;
2967 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
2970 /* Currently supported only for MPLS over UDP */
2971 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
2972 !netif_is_bareudp(filter_dev)) {
2973 NL_SET_ERR_MSG_MOD(extack,
2974 "Matching on MPLS is supported only for MPLS over UDP");
2975 netdev_err(priv->netdev,
2976 "Matching on MPLS is supported only for MPLS over UDP\n");
2983 static int parse_cls_flower(struct mlx5e_priv *priv,
2984 struct mlx5e_tc_flow *flow,
2985 struct mlx5_flow_spec *spec,
2986 struct flow_cls_offload *f,
2987 struct net_device *filter_dev)
2989 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2990 struct netlink_ext_ack *extack = f->common.extack;
2991 struct mlx5_core_dev *dev = priv->mdev;
2992 struct mlx5_eswitch *esw = dev->priv.eswitch;
2993 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2994 struct mlx5_eswitch_rep *rep;
2995 bool is_eswitch_flow;
2998 inner_match_level = MLX5_MATCH_NONE;
2999 outer_match_level = MLX5_MATCH_NONE;
3001 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
3002 &inner_match_level, &outer_match_level);
3003 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
3004 outer_match_level : inner_match_level;
3006 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
3007 if (!err && is_eswitch_flow) {
3009 if (rep->vport != MLX5_VPORT_UPLINK &&
3010 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
3011 esw->offloads.inline_mode < non_tunnel_match_level)) {
3012 NL_SET_ERR_MSG_MOD(extack,
3013 "Flow is not offloaded due to min inline setting");
3014 netdev_warn(priv->netdev,
3015 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
3016 non_tunnel_match_level, esw->offloads.inline_mode);
3021 flow->attr->inner_match_level = inner_match_level;
3022 flow->attr->outer_match_level = outer_match_level;
3028 struct mlx5_fields {
3036 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
3037 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
3038 offsetof(struct pedit_headers, field) + (off), \
3039 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
3041 /* masked values are the same and there are no rewrites that do not have a
3044 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
3045 type matchmaskx = *(type *)(matchmaskp); \
3046 type matchvalx = *(type *)(matchvalp); \
3047 type maskx = *(type *)(maskp); \
3048 type valx = *(type *)(valp); \
3050 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
3054 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
3055 void *matchmaskp, u8 bsize)
3061 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
3064 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
3067 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
3074 static struct mlx5_fields fields[] = {
3075 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
3076 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
3077 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
3078 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
3079 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
3080 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
3082 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
3083 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
3084 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
3085 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
3087 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
3088 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
3089 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
3090 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
3091 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
3092 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
3093 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
3094 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
3095 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
3096 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
3097 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
3098 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
3099 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
3100 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
3101 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
3102 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
3103 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
3104 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
3106 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
3107 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
3108 /* in linux iphdr tcp_flags is 8 bits long */
3109 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
3111 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
3112 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
3115 static unsigned long mask_to_le(unsigned long mask, int size)
3121 mask_be32 = (__force __be32)(mask);
3122 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
3123 } else if (size == 16) {
3124 mask_be32 = (__force __be32)(mask);
3125 mask_be16 = *(__be16 *)&mask_be32;
3126 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
3132 static int offload_pedit_fields(struct mlx5e_priv *priv,
3134 struct mlx5e_tc_flow_parse_attr *parse_attr,
3136 struct netlink_ext_ack *extack)
3138 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
3139 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3140 void *headers_c, *headers_v, *action, *vals_p;
3141 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
3142 struct mlx5e_tc_mod_hdr_acts *mod_acts;
3143 unsigned long mask, field_mask;
3144 int i, first, last, next_z;
3145 struct mlx5_fields *f;
3148 mod_acts = &parse_attr->mod_hdr_acts;
3149 headers_c = mlx5e_get_match_headers_criteria(*action_flags, &parse_attr->spec);
3150 headers_v = mlx5e_get_match_headers_value(*action_flags, &parse_attr->spec);
3152 set_masks = &hdrs[0].masks;
3153 add_masks = &hdrs[1].masks;
3154 set_vals = &hdrs[0].vals;
3155 add_vals = &hdrs[1].vals;
3157 for (i = 0; i < ARRAY_SIZE(fields); i++) {
3161 /* avoid seeing bits set from previous iterations */
3165 s_masks_p = (void *)set_masks + f->offset;
3166 a_masks_p = (void *)add_masks + f->offset;
3168 s_mask = *s_masks_p & f->field_mask;
3169 a_mask = *a_masks_p & f->field_mask;
3171 if (!s_mask && !a_mask) /* nothing to offload here */
3174 if (s_mask && a_mask) {
3175 NL_SET_ERR_MSG_MOD(extack,
3176 "can't set and add to the same HW field");
3177 netdev_warn(priv->netdev,
3178 "mlx5: can't set and add to the same HW field (%x)\n",
3185 void *match_mask = headers_c + f->match_offset;
3186 void *match_val = headers_v + f->match_offset;
3188 cmd = MLX5_ACTION_TYPE_SET;
3190 vals_p = (void *)set_vals + f->offset;
3191 /* don't rewrite if we have a match on the same value */
3192 if (cmp_val_mask(vals_p, s_masks_p, match_val,
3193 match_mask, f->field_bsize))
3195 /* clear to denote we consumed this field */
3196 *s_masks_p &= ~f->field_mask;
3198 cmd = MLX5_ACTION_TYPE_ADD;
3200 vals_p = (void *)add_vals + f->offset;
3201 /* add 0 is no change */
3202 if ((*(u32 *)vals_p & f->field_mask) == 0)
3204 /* clear to denote we consumed this field */
3205 *a_masks_p &= ~f->field_mask;
3210 mask = mask_to_le(mask, f->field_bsize);
3212 first = find_first_bit(&mask, f->field_bsize);
3213 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
3214 last = find_last_bit(&mask, f->field_bsize);
3215 if (first < next_z && next_z < last) {
3216 NL_SET_ERR_MSG_MOD(extack,
3217 "rewrite of few sub-fields isn't supported");
3218 netdev_warn(priv->netdev,
3219 "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
3224 action = mlx5e_mod_hdr_alloc(priv->mdev, namespace, mod_acts);
3225 if (IS_ERR(action)) {
3226 NL_SET_ERR_MSG_MOD(extack,
3227 "too many pedit actions, can't offload");
3228 mlx5_core_warn(priv->mdev,
3229 "mlx5: parsed %d pedit actions, can't do more\n",
3230 mod_acts->num_actions);
3231 return PTR_ERR(action);
3234 MLX5_SET(set_action_in, action, action_type, cmd);
3235 MLX5_SET(set_action_in, action, field, f->field);
3237 if (cmd == MLX5_ACTION_TYPE_SET) {
3240 field_mask = mask_to_le(f->field_mask, f->field_bsize);
3242 /* if field is bit sized it can start not from first bit */
3243 start = find_first_bit(&field_mask, f->field_bsize);
3245 MLX5_SET(set_action_in, action, offset, first - start);
3246 /* length is num of bits to be written, zero means length of 32 */
3247 MLX5_SET(set_action_in, action, length, (last - first + 1));
3250 if (f->field_bsize == 32)
3251 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
3252 else if (f->field_bsize == 16)
3253 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
3254 else if (f->field_bsize == 8)
3255 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
3257 ++mod_acts->num_actions;
3263 static const struct pedit_headers zero_masks = {};
3265 static int verify_offload_pedit_fields(struct mlx5e_priv *priv,
3266 struct mlx5e_tc_flow_parse_attr *parse_attr,
3267 struct netlink_ext_ack *extack)
3269 struct pedit_headers *cmd_masks;
3272 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
3273 cmd_masks = &parse_attr->hdrs[cmd].masks;
3274 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
3275 NL_SET_ERR_MSG_MOD(extack, "attempt to offload an unsupported field");
3276 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
3277 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
3278 16, 1, cmd_masks, sizeof(zero_masks), true);
3286 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
3287 struct mlx5e_tc_flow_parse_attr *parse_attr,
3289 struct netlink_ext_ack *extack)
3293 err = offload_pedit_fields(priv, namespace, parse_attr, action_flags, extack);
3295 goto out_dealloc_parsed_actions;
3297 err = verify_offload_pedit_fields(priv, parse_attr, extack);
3299 goto out_dealloc_parsed_actions;
3303 out_dealloc_parsed_actions:
3304 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3308 struct ip_ttl_word {
3314 struct ipv6_hoplimit_word {
3321 is_action_keys_supported(const struct flow_action_entry *act, bool ct_flow,
3322 bool *modify_ip_header, bool *modify_tuple,
3323 struct netlink_ext_ack *extack)
3328 htype = act->mangle.htype;
3329 offset = act->mangle.offset;
3330 mask = ~act->mangle.mask;
3331 /* For IPv4 & IPv6 header check 4 byte word,
3332 * to determine that modified fields
3333 * are NOT ttl & hop_limit only.
3335 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
3336 struct ip_ttl_word *ttl_word =
3337 (struct ip_ttl_word *)&mask;
3339 if (offset != offsetof(struct iphdr, ttl) ||
3340 ttl_word->protocol ||
3342 *modify_ip_header = true;
3345 if (offset >= offsetof(struct iphdr, saddr))
3346 *modify_tuple = true;
3348 if (ct_flow && *modify_tuple) {
3349 NL_SET_ERR_MSG_MOD(extack,
3350 "can't offload re-write of ipv4 address with action ct");
3353 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
3354 struct ipv6_hoplimit_word *hoplimit_word =
3355 (struct ipv6_hoplimit_word *)&mask;
3357 if (offset != offsetof(struct ipv6hdr, payload_len) ||
3358 hoplimit_word->payload_len ||
3359 hoplimit_word->nexthdr) {
3360 *modify_ip_header = true;
3363 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
3364 *modify_tuple = true;
3366 if (ct_flow && *modify_tuple) {
3367 NL_SET_ERR_MSG_MOD(extack,
3368 "can't offload re-write of ipv6 address with action ct");
3371 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
3372 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
3373 *modify_tuple = true;
3375 NL_SET_ERR_MSG_MOD(extack,
3376 "can't offload re-write of transport header ports with action ct");
3384 static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
3385 bool ct_flow, struct netlink_ext_ack *extack,
3386 struct mlx5e_priv *priv,
3387 struct mlx5_flow_spec *spec)
3389 if (!modify_tuple || ct_clear)
3393 NL_SET_ERR_MSG_MOD(extack,
3394 "can't offload tuple modification with non-clear ct()");
3395 netdev_info(priv->netdev,
3396 "can't offload tuple modification with non-clear ct()");
3400 /* Add ct_state=-trk match so it will be offloaded for non ct flows
3401 * (or after clear action), as otherwise, since the tuple is changed,
3402 * we can't restore ct state
3404 if (mlx5_tc_ct_add_no_trk_match(spec)) {
3405 NL_SET_ERR_MSG_MOD(extack,
3406 "can't offload tuple modification with ct matches and no ct(clear) action");
3407 netdev_info(priv->netdev,
3408 "can't offload tuple modification with ct matches and no ct(clear) action");
3415 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3416 struct mlx5_flow_spec *spec,
3417 struct flow_action *flow_action,
3418 u32 actions, bool ct_flow,
3420 struct netlink_ext_ack *extack)
3422 const struct flow_action_entry *act;
3423 bool modify_ip_header, modify_tuple;
3430 headers_c = mlx5e_get_match_headers_criteria(actions, spec);
3431 headers_v = mlx5e_get_match_headers_value(actions, spec);
3432 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3434 /* for non-IP we only re-write MACs, so we're okay */
3435 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3436 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3439 modify_ip_header = false;
3440 modify_tuple = false;
3441 flow_action_for_each(i, act, flow_action) {
3442 if (act->id != FLOW_ACTION_MANGLE &&
3443 act->id != FLOW_ACTION_ADD)
3446 if (!is_action_keys_supported(act, ct_flow,
3448 &modify_tuple, extack))
3452 if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
3456 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3457 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3458 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3459 NL_SET_ERR_MSG_MOD(extack,
3460 "can't offload re-write of non TCP/UDP");
3461 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3471 actions_match_supported_fdb(struct mlx5e_priv *priv,
3472 struct mlx5e_tc_flow_parse_attr *parse_attr,
3473 struct mlx5e_tc_flow *flow,
3474 struct netlink_ext_ack *extack)
3476 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
3477 bool ct_flow, ct_clear;
3479 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3480 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3482 if (esw_attr->split_count && ct_flow &&
3483 !MLX5_CAP_GEN(esw_attr->in_mdev, reg_c_preserve)) {
3484 /* All registers used by ct are cleared when using
3487 NL_SET_ERR_MSG_MOD(extack, "Can't offload mirroring with action ct");
3491 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
3492 NL_SET_ERR_MSG_MOD(extack,
3493 "current firmware doesn't support split rule for port mirroring");
3494 netdev_warn_once(priv->netdev,
3495 "current firmware doesn't support split rule for port mirroring\n");
3503 actions_match_supported(struct mlx5e_priv *priv,
3504 struct flow_action *flow_action,
3506 struct mlx5e_tc_flow_parse_attr *parse_attr,
3507 struct mlx5e_tc_flow *flow,
3508 struct netlink_ext_ack *extack)
3510 bool ct_flow, ct_clear;
3512 ct_clear = flow->attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR;
3513 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3516 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3517 NL_SET_ERR_MSG_MOD(extack, "Rule must have at least one forward/drop action");
3522 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3523 NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
3527 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3528 actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3529 NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
3534 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
3535 NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action");
3539 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3540 actions & MLX5_FLOW_CONTEXT_ACTION_DROP) {
3541 NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported");
3545 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
3546 !modify_header_match_supported(priv, &parse_attr->spec, flow_action,
3547 actions, ct_flow, ct_clear, extack))
3550 if (mlx5e_is_eswitch_flow(flow) &&
3551 !actions_match_supported_fdb(priv, parse_attr, flow, extack))
3557 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3559 return priv->mdev == peer_priv->mdev;
3562 bool mlx5e_same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3564 struct mlx5_core_dev *fmdev, *pmdev;
3565 u64 fsystem_guid, psystem_guid;
3568 pmdev = peer_priv->mdev;
3570 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3571 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3573 return (fsystem_guid == psystem_guid);
3577 actions_prepare_mod_hdr_actions(struct mlx5e_priv *priv,
3578 struct mlx5e_tc_flow *flow,
3579 struct mlx5_flow_attr *attr,
3580 struct netlink_ext_ack *extack)
3582 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
3583 struct pedit_headers_action *hdrs = parse_attr->hdrs;
3584 enum mlx5_flow_namespace_type ns_type;
3587 if (!hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits &&
3588 !hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits)
3591 ns_type = mlx5e_get_flow_namespace(flow);
3593 err = alloc_tc_pedit_action(priv, ns_type, parse_attr, &attr->action, extack);
3597 if (parse_attr->mod_hdr_acts.num_actions > 0)
3600 /* In case all pedit actions are skipped, remove the MOD_HDR flag. */
3601 attr->action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3602 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
3604 if (ns_type != MLX5_FLOW_NAMESPACE_FDB)
3607 if (!((attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3608 (attr->action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3609 attr->esw_attr->split_count = 0;
3614 static struct mlx5_flow_attr*
3615 mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr,
3616 enum mlx5_flow_namespace_type ns_type)
3618 struct mlx5e_tc_flow_parse_attr *parse_attr;
3619 u32 attr_sz = ns_to_attr_sz(ns_type);
3620 struct mlx5_flow_attr *attr2;
3622 attr2 = mlx5_alloc_flow_attr(ns_type);
3623 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
3624 if (!attr2 || !parse_attr) {
3630 memcpy(attr2, attr, attr_sz);
3631 INIT_LIST_HEAD(&attr2->list);
3632 parse_attr->filter_dev = attr->parse_attr->filter_dev;
3635 attr2->parse_attr = parse_attr;
3636 attr2->dest_chain = 0;
3637 attr2->dest_ft = NULL;
3639 if (ns_type == MLX5_FLOW_NAMESPACE_FDB) {
3640 attr2->esw_attr->out_count = 0;
3641 attr2->esw_attr->split_count = 0;
3647 static struct mlx5_core_dev *
3648 get_flow_counter_dev(struct mlx5e_tc_flow *flow)
3650 return mlx5e_is_eswitch_flow(flow) ? flow->attr->esw_attr->counter_dev : flow->priv->mdev;
3653 struct mlx5_flow_attr *
3654 mlx5e_tc_get_encap_attr(struct mlx5e_tc_flow *flow)
3656 struct mlx5_esw_flow_attr *esw_attr;
3657 struct mlx5_flow_attr *attr;
3660 list_for_each_entry(attr, &flow->attrs, list) {
3661 esw_attr = attr->esw_attr;
3662 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
3663 if (esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)
3672 mlx5e_tc_unoffload_flow_post_acts(struct mlx5e_tc_flow *flow)
3674 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3675 struct mlx5_flow_attr *attr;
3677 list_for_each_entry(attr, &flow->attrs, list) {
3678 if (list_is_last(&attr->list, &flow->attrs))
3681 mlx5e_tc_post_act_unoffload(post_act, attr->post_act_handle);
3686 free_flow_post_acts(struct mlx5e_tc_flow *flow)
3688 struct mlx5_core_dev *counter_dev = get_flow_counter_dev(flow);
3689 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3690 struct mlx5_flow_attr *attr, *tmp;
3693 list_for_each_entry_safe(attr, tmp, &flow->attrs, list) {
3694 if (list_is_last(&attr->list, &flow->attrs))
3697 if (attr->post_act_handle)
3698 mlx5e_tc_post_act_del(post_act, attr->post_act_handle);
3700 clean_encap_dests(flow->priv, flow, attr, &vf_tun);
3702 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
3703 mlx5_fc_destroy(counter_dev, attr->counter);
3705 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
3706 mlx5e_mod_hdr_dealloc(&attr->parse_attr->mod_hdr_acts);
3707 if (attr->modify_hdr)
3708 mlx5_modify_header_dealloc(flow->priv->mdev, attr->modify_hdr);
3711 list_del(&attr->list);
3712 kvfree(attr->parse_attr);
3718 mlx5e_tc_offload_flow_post_acts(struct mlx5e_tc_flow *flow)
3720 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3721 struct mlx5_flow_attr *attr;
3724 list_for_each_entry(attr, &flow->attrs, list) {
3725 if (list_is_last(&attr->list, &flow->attrs))
3728 err = mlx5e_tc_post_act_offload(post_act, attr->post_act_handle);
3736 /* TC filter rule HW translation:
3738 * +---------------------+
3739 * + ft prio (tc chain) +
3740 * + original match +
3741 * +---------------------+
3743 * | if multi table action
3746 * +---------------------+
3747 * + post act ft |<----.
3748 * + match fte id | | split on multi table action
3749 * + do actions |-----'
3750 * +---------------------+
3754 * Do rest of the actions after last multi table action.
3757 alloc_flow_post_acts(struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack)
3759 struct mlx5e_post_act *post_act = get_post_action(flow->priv);
3760 struct mlx5_flow_attr *attr, *next_attr = NULL;
3761 struct mlx5e_post_act_handle *handle;
3762 bool vf_tun, encap_valid = true;
3765 /* This is going in reverse order as needed.
3766 * The first entry is the last attribute.
3768 list_for_each_entry(attr, &flow->attrs, list) {
3770 /* Set counter action on last post act rule. */
3771 attr->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3773 err = mlx5e_tc_act_set_next_post_act(flow, attr, next_attr);
3778 /* Don't add post_act rule for first attr (last in the list).
3779 * It's being handled by the caller.
3781 if (list_is_last(&attr->list, &flow->attrs))
3784 err = set_encap_dests(flow->priv, flow, attr, extack, &encap_valid, &vf_tun);
3789 flow_flag_set(flow, SLOW);
3791 err = actions_prepare_mod_hdr_actions(flow->priv, flow, attr, extack);
3795 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
3796 err = mlx5e_tc_add_flow_mod_hdr(flow->priv, flow, attr);
3801 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
3802 err = alloc_flow_attr_counter(get_flow_counter_dev(flow), attr);
3807 handle = mlx5e_tc_post_act_add(post_act, attr);
3808 if (IS_ERR(handle)) {
3809 err = PTR_ERR(handle);
3813 attr->post_act_handle = handle;
3817 if (flow_flag_test(flow, SLOW))
3820 err = mlx5e_tc_offload_flow_post_acts(flow);
3828 free_flow_post_acts(flow);
3833 parse_tc_actions(struct mlx5e_tc_act_parse_state *parse_state,
3834 struct flow_action *flow_action)
3836 struct netlink_ext_ack *extack = parse_state->extack;
3837 struct mlx5e_tc_flow_action flow_action_reorder;
3838 struct mlx5e_tc_flow *flow = parse_state->flow;
3839 struct mlx5_flow_attr *attr = flow->attr;
3840 enum mlx5_flow_namespace_type ns_type;
3841 struct mlx5e_priv *priv = flow->priv;
3842 struct flow_action_entry *act, **_act;
3843 struct mlx5e_tc_act *tc_act;
3846 flow_action_reorder.num_entries = flow_action->num_entries;
3847 flow_action_reorder.entries = kcalloc(flow_action->num_entries,
3848 sizeof(flow_action), GFP_KERNEL);
3849 if (!flow_action_reorder.entries)
3852 mlx5e_tc_act_reorder_flow_actions(flow_action, &flow_action_reorder);
3854 ns_type = mlx5e_get_flow_namespace(flow);
3855 list_add(&attr->list, &flow->attrs);
3857 flow_action_for_each(i, _act, &flow_action_reorder) {
3859 tc_act = mlx5e_tc_act_get(act->id, ns_type);
3861 NL_SET_ERR_MSG_MOD(extack, "Not implemented offload action");
3866 if (!tc_act->can_offload(parse_state, act, i, attr)) {
3871 err = tc_act->parse_action(parse_state, act, priv, attr);
3875 parse_state->actions |= attr->action;
3877 /* Split attr for multi table act if not the last act. */
3878 if (tc_act->is_multi_table_act &&
3879 tc_act->is_multi_table_act(priv, act, attr) &&
3880 i < flow_action_reorder.num_entries - 1) {
3881 err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
3885 attr = mlx5e_clone_flow_attr_for_post_act(flow->attr, ns_type);
3891 list_add(&attr->list, &flow->attrs);
3895 kfree(flow_action_reorder.entries);
3897 err = mlx5e_tc_act_post_parse(parse_state, flow_action, attr, ns_type);
3899 goto out_free_post_acts;
3901 err = alloc_flow_post_acts(flow, extack);
3903 goto out_free_post_acts;
3908 kfree(flow_action_reorder.entries);
3910 free_flow_post_acts(flow);
3916 flow_action_supported(struct flow_action *flow_action,
3917 struct netlink_ext_ack *extack)
3919 if (!flow_action_has_entries(flow_action)) {
3920 NL_SET_ERR_MSG_MOD(extack, "Flow action doesn't have any entries");
3924 if (!flow_action_hw_stats_check(flow_action, extack,
3925 FLOW_ACTION_HW_STATS_DELAYED_BIT)) {
3926 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
3934 parse_tc_nic_actions(struct mlx5e_priv *priv,
3935 struct flow_action *flow_action,
3936 struct mlx5e_tc_flow *flow,
3937 struct netlink_ext_ack *extack)
3939 struct mlx5e_tc_act_parse_state *parse_state;
3940 struct mlx5e_tc_flow_parse_attr *parse_attr;
3941 struct mlx5_flow_attr *attr = flow->attr;
3944 err = flow_action_supported(flow_action, extack);
3948 attr->nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3949 parse_attr = attr->parse_attr;
3950 parse_state = &parse_attr->parse_state;
3951 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
3952 parse_state->ct_priv = get_ct_priv(priv);
3954 err = parse_tc_actions(parse_state, flow_action);
3958 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
3962 if (!actions_match_supported(priv, flow_action, parse_state->actions,
3963 parse_attr, flow, extack))
3969 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3970 struct net_device *peer_netdev)
3972 struct mlx5e_priv *peer_priv;
3974 peer_priv = netdev_priv(peer_netdev);
3976 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3977 mlx5e_eswitch_vf_rep(priv->netdev) &&
3978 mlx5e_eswitch_vf_rep(peer_netdev) &&
3979 mlx5e_same_hw_devs(priv, peer_priv));
3982 static bool same_hw_reps(struct mlx5e_priv *priv,
3983 struct net_device *peer_netdev)
3985 struct mlx5e_priv *peer_priv;
3987 peer_priv = netdev_priv(peer_netdev);
3989 return mlx5e_eswitch_rep(priv->netdev) &&
3990 mlx5e_eswitch_rep(peer_netdev) &&
3991 mlx5e_same_hw_devs(priv, peer_priv);
3994 static bool is_lag_dev(struct mlx5e_priv *priv,
3995 struct net_device *peer_netdev)
3997 return ((mlx5_lag_is_sriov(priv->mdev) ||
3998 mlx5_lag_is_multipath(priv->mdev)) &&
3999 same_hw_reps(priv, peer_netdev));
4002 static bool is_multiport_eligible(struct mlx5e_priv *priv, struct net_device *out_dev)
4004 if (same_hw_reps(priv, out_dev) &&
4005 MLX5_CAP_PORT_SELECTION(priv->mdev, port_select_flow_table) &&
4006 MLX5_CAP_GEN(priv->mdev, create_lag_when_not_master_up))
4012 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
4013 struct net_device *out_dev)
4015 if (is_merged_eswitch_vfs(priv, out_dev))
4018 if (is_multiport_eligible(priv, out_dev))
4021 if (is_lag_dev(priv, out_dev))
4024 return mlx5e_eswitch_rep(out_dev) &&
4025 same_port_devs(priv, netdev_priv(out_dev));
4028 int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv,
4029 struct mlx5_flow_attr *attr,
4031 enum mlx5e_tc_int_port_type type,
4035 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4036 struct mlx5e_tc_int_port_priv *int_port_priv;
4037 struct mlx5e_tc_flow_parse_attr *parse_attr;
4038 struct mlx5e_tc_int_port *dest_int_port;
4041 parse_attr = attr->parse_attr;
4042 int_port_priv = mlx5e_get_int_port_priv(priv);
4044 dest_int_port = mlx5e_tc_int_port_get(int_port_priv, ifindex, type);
4045 if (IS_ERR(dest_int_port))
4046 return PTR_ERR(dest_int_port);
4048 err = mlx5e_tc_match_to_reg_set(priv->mdev, &parse_attr->mod_hdr_acts,
4049 MLX5_FLOW_NAMESPACE_FDB, VPORT_TO_REG,
4050 mlx5e_tc_int_port_get_metadata(dest_int_port));
4052 mlx5e_tc_int_port_put(int_port_priv, dest_int_port);
4056 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
4058 esw_attr->dest_int_port = dest_int_port;
4059 esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE;
4061 /* Forward to root fdb for matching against the new source vport */
4062 attr->dest_chain = 0;
4068 parse_tc_fdb_actions(struct mlx5e_priv *priv,
4069 struct flow_action *flow_action,
4070 struct mlx5e_tc_flow *flow,
4071 struct netlink_ext_ack *extack)
4073 struct mlx5e_tc_act_parse_state *parse_state;
4074 struct mlx5e_tc_flow_parse_attr *parse_attr;
4075 struct mlx5_flow_attr *attr = flow->attr;
4076 struct mlx5_esw_flow_attr *esw_attr;
4077 struct net_device *filter_dev;
4080 err = flow_action_supported(flow_action, extack);
4084 esw_attr = attr->esw_attr;
4085 parse_attr = attr->parse_attr;
4086 filter_dev = parse_attr->filter_dev;
4087 parse_state = &parse_attr->parse_state;
4088 mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack);
4089 parse_state->ct_priv = get_ct_priv(priv);
4091 err = parse_tc_actions(parse_state, flow_action);
4095 /* Forward to/from internal port can only have 1 dest */
4096 if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) &&
4097 esw_attr->out_count > 1) {
4098 NL_SET_ERR_MSG_MOD(extack,
4099 "Rules with internal port can have only one destination");
4103 /* Forward from tunnel/internal port to internal port is not supported */
4104 if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) &&
4105 esw_attr->dest_int_port) {
4106 NL_SET_ERR_MSG_MOD(extack,
4107 "Forwarding from tunnel/internal port to internal port is not supported");
4111 err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack);
4115 if (!actions_match_supported(priv, flow_action, parse_state->actions,
4116 parse_attr, flow, extack))
4122 static void get_flags(int flags, unsigned long *flow_flags)
4124 unsigned long __flow_flags = 0;
4126 if (flags & MLX5_TC_FLAG(INGRESS))
4127 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4128 if (flags & MLX5_TC_FLAG(EGRESS))
4129 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4131 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4132 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4133 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4134 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4135 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4136 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4138 *flow_flags = __flow_flags;
4141 static const struct rhashtable_params tc_ht_params = {
4142 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4143 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4144 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4145 .automatic_shrinking = true,
4148 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4149 unsigned long flags)
4151 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4152 struct mlx5e_rep_priv *rpriv;
4154 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4155 rpriv = priv->ppriv;
4156 return &rpriv->tc_ht;
4157 } else /* NIC offload */
4161 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4163 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4164 struct mlx5_flow_attr *attr = flow->attr;
4165 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4166 flow_flag_test(flow, INGRESS);
4167 bool act_is_encap = !!(attr->action &
4168 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4169 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
4170 MLX5_DEVCOM_ESW_OFFLOADS);
4175 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4176 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4177 (is_rep_ingress || act_is_encap))
4183 struct mlx5_flow_attr *
4184 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4186 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
4187 sizeof(struct mlx5_esw_flow_attr) :
4188 sizeof(struct mlx5_nic_flow_attr);
4189 struct mlx5_flow_attr *attr;
4191 attr = kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4195 INIT_LIST_HEAD(&attr->list);
4200 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4201 struct flow_cls_offload *f, unsigned long flow_flags,
4202 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4203 struct mlx5e_tc_flow **__flow)
4205 struct mlx5e_tc_flow_parse_attr *parse_attr;
4206 struct mlx5_flow_attr *attr;
4207 struct mlx5e_tc_flow *flow;
4211 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4212 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4213 if (!parse_attr || !flow)
4216 flow->flags = flow_flags;
4217 flow->cookie = f->cookie;
4220 attr = mlx5_alloc_flow_attr(mlx5e_get_flow_namespace(flow));
4226 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4227 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4228 INIT_LIST_HEAD(&flow->hairpin);
4229 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4230 INIT_LIST_HEAD(&flow->attrs);
4231 refcount_set(&flow->refcnt, 1);
4232 init_completion(&flow->init_done);
4233 init_completion(&flow->del_hw_done);
4236 *__parse_attr = parse_attr;
4247 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4248 struct mlx5e_tc_flow_parse_attr *parse_attr,
4249 struct flow_cls_offload *f)
4251 attr->parse_attr = parse_attr;
4252 attr->chain = f->common.chain_index;
4253 attr->prio = f->common.prio;
4257 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4258 struct mlx5e_priv *priv,
4259 struct mlx5e_tc_flow_parse_attr *parse_attr,
4260 struct flow_cls_offload *f,
4261 struct mlx5_eswitch_rep *in_rep,
4262 struct mlx5_core_dev *in_mdev)
4264 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4265 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4267 mlx5e_flow_attr_init(attr, parse_attr, f);
4269 esw_attr->in_rep = in_rep;
4270 esw_attr->in_mdev = in_mdev;
4272 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4273 MLX5_COUNTER_SOURCE_ESWITCH)
4274 esw_attr->counter_dev = in_mdev;
4276 esw_attr->counter_dev = priv->mdev;
4279 static struct mlx5e_tc_flow *
4280 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4281 struct flow_cls_offload *f,
4282 unsigned long flow_flags,
4283 struct net_device *filter_dev,
4284 struct mlx5_eswitch_rep *in_rep,
4285 struct mlx5_core_dev *in_mdev)
4287 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4288 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4289 struct netlink_ext_ack *extack = f->common.extack;
4290 struct mlx5e_tc_flow_parse_attr *parse_attr;
4291 struct mlx5e_tc_flow *flow;
4294 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4295 attr_size = sizeof(struct mlx5_esw_flow_attr);
4296 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4297 &parse_attr, &flow);
4301 parse_attr->filter_dev = filter_dev;
4302 mlx5e_flow_esw_attr_init(flow->attr,
4304 f, in_rep, in_mdev);
4306 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4311 /* actions validation depends on parsing the ct matches first */
4312 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4313 &flow->attr->ct_attr, extack);
4317 /* always set IP version for indirect table handling */
4318 flow->attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
4320 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack);
4324 if (flow->attr->lag.count) {
4325 err = mlx5_lag_add_mpesw_rule(esw->dev);
4330 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4331 complete_all(&flow->init_done);
4333 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4336 add_unready_flow(flow);
4342 if (flow->attr->lag.count)
4343 mlx5_lag_del_mpesw_rule(esw->dev);
4345 mlx5e_flow_put(priv, flow);
4347 return ERR_PTR(err);
4350 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4351 struct mlx5e_tc_flow *flow,
4352 unsigned long flow_flags)
4354 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4355 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4356 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4357 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4358 struct mlx5e_tc_flow_parse_attr *parse_attr;
4359 struct mlx5e_rep_priv *peer_urpriv;
4360 struct mlx5e_tc_flow *peer_flow;
4361 struct mlx5_core_dev *in_mdev;
4364 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4368 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4369 peer_priv = netdev_priv(peer_urpriv->netdev);
4371 /* in_mdev is assigned of which the packet originated from.
4372 * So packets redirected to uplink use the same mdev of the
4373 * original flow and packets redirected from uplink use the
4376 if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
4377 in_mdev = peer_priv->mdev;
4379 in_mdev = priv->mdev;
4381 parse_attr = flow->attr->parse_attr;
4382 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4383 parse_attr->filter_dev,
4384 attr->in_rep, in_mdev);
4385 if (IS_ERR(peer_flow)) {
4386 err = PTR_ERR(peer_flow);
4390 flow->peer_flow = peer_flow;
4391 flow_flag_set(flow, DUP);
4392 mutex_lock(&esw->offloads.peer_mutex);
4393 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4394 mutex_unlock(&esw->offloads.peer_mutex);
4397 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4402 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4403 struct flow_cls_offload *f,
4404 unsigned long flow_flags,
4405 struct net_device *filter_dev,
4406 struct mlx5e_tc_flow **__flow)
4408 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4409 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4410 struct mlx5_core_dev *in_mdev = priv->mdev;
4411 struct mlx5e_tc_flow *flow;
4414 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4417 return PTR_ERR(flow);
4419 if (is_peer_flow_needed(flow)) {
4420 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4422 mlx5e_tc_del_fdb_flow(priv, flow);
4436 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4437 struct flow_cls_offload *f,
4438 unsigned long flow_flags,
4439 struct net_device *filter_dev,
4440 struct mlx5e_tc_flow **__flow)
4442 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4443 struct netlink_ext_ack *extack = f->common.extack;
4444 struct mlx5e_tc_flow_parse_attr *parse_attr;
4445 struct mlx5e_tc_flow *flow;
4448 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4449 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4451 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4455 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4456 attr_size = sizeof(struct mlx5_nic_flow_attr);
4457 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4458 &parse_attr, &flow);
4462 parse_attr->filter_dev = filter_dev;
4463 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4465 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4470 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4471 &flow->attr->ct_attr, extack);
4475 err = parse_tc_nic_actions(priv, &rule->action, flow, extack);
4479 err = mlx5e_tc_add_nic_flow(priv, flow, extack);
4483 flow_flag_set(flow, OFFLOADED);
4489 flow_flag_set(flow, FAILED);
4490 mlx5e_mod_hdr_dealloc(&parse_attr->mod_hdr_acts);
4491 mlx5e_flow_put(priv, flow);
4497 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4498 struct flow_cls_offload *f,
4499 unsigned long flags,
4500 struct net_device *filter_dev,
4501 struct mlx5e_tc_flow **flow)
4503 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4504 unsigned long flow_flags;
4507 get_flags(flags, &flow_flags);
4509 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4512 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4513 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4516 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4522 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4523 struct mlx5e_rep_priv *rpriv)
4525 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4526 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4527 * function is called from NIC mode.
4529 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4532 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4533 struct flow_cls_offload *f, unsigned long flags)
4535 struct netlink_ext_ack *extack = f->common.extack;
4536 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4537 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4538 struct mlx5e_tc_flow *flow;
4541 if (!mlx5_esw_hold(priv->mdev))
4544 mlx5_esw_get(priv->mdev);
4547 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4549 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4552 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4555 NL_SET_ERR_MSG_MOD(extack,
4556 "flow cookie already exists, ignoring");
4557 netdev_warn_once(priv->netdev,
4558 "flow cookie %lx already exists, ignoring\n",
4568 trace_mlx5e_configure_flower(f);
4569 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4573 /* Flow rule offloaded to non-uplink representor sharing tc block,
4574 * set the flow's owner dev.
4576 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4577 flow->orig_dev = dev;
4579 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4583 mlx5_esw_release(priv->mdev);
4587 mlx5e_flow_put(priv, flow);
4589 mlx5_esw_put(priv->mdev);
4590 mlx5_esw_release(priv->mdev);
4594 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4596 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4597 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4599 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4600 flow_flag_test(flow, EGRESS) == dir_egress;
4603 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4604 struct flow_cls_offload *f, unsigned long flags)
4606 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4607 struct mlx5e_tc_flow *flow;
4611 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4612 if (!flow || !same_flow_direction(flow, flags)) {
4617 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4620 if (flow_flag_test_and_set(flow, DELETED)) {
4624 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4627 trace_mlx5e_delete_flower(f);
4628 mlx5e_flow_put(priv, flow);
4630 mlx5_esw_put(priv->mdev);
4638 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4639 struct flow_cls_offload *f, unsigned long flags)
4641 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4642 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4643 struct mlx5_eswitch *peer_esw;
4644 struct mlx5e_tc_flow *flow;
4645 struct mlx5_fc *counter;
4652 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4656 return PTR_ERR(flow);
4658 if (!same_flow_direction(flow, flags)) {
4663 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4664 counter = mlx5e_tc_get_counter(flow);
4668 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4671 /* Under multipath it's possible for one rule to be currently
4672 * un-offloaded while the other rule is offloaded.
4674 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4678 if (flow_flag_test(flow, DUP) &&
4679 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4684 counter = mlx5e_tc_get_counter(flow->peer_flow);
4686 goto no_peer_counter;
4687 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4690 packets += packets2;
4691 lastuse = max_t(u64, lastuse, lastuse2);
4695 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4697 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4698 FLOW_ACTION_HW_STATS_DELAYED);
4699 trace_mlx5e_stats_flower(f);
4701 mlx5e_flow_put(priv, flow);
4705 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
4706 struct netlink_ext_ack *extack)
4708 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4709 struct mlx5_eswitch *esw;
4714 vport_num = rpriv->rep->vport;
4715 if (vport_num >= MLX5_VPORT_ECPF) {
4716 NL_SET_ERR_MSG_MOD(extack,
4717 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4721 esw = priv->mdev->priv.eswitch;
4722 /* rate is given in bytes/sec.
4723 * First convert to bits/sec and then round to the nearest mbit/secs.
4724 * mbit means million bits.
4725 * Moreover, if rate is non zero we choose to configure to a minimum of
4729 rate = (rate * BITS_PER_BYTE) + 500000;
4730 do_div(rate, 1000000);
4731 rate_mbps = max_t(u32, rate, 1);
4734 err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps);
4736 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4741 int mlx5e_policer_validate(const struct flow_action *action,
4742 const struct flow_action_entry *act,
4743 struct netlink_ext_ack *extack)
4745 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
4746 NL_SET_ERR_MSG_MOD(extack,
4747 "Offload not supported when exceed action is not drop");
4751 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
4752 !flow_action_is_last_entry(action, act)) {
4753 NL_SET_ERR_MSG_MOD(extack,
4754 "Offload not supported when conform action is ok, but action is not last");
4758 if (act->police.peakrate_bytes_ps ||
4759 act->police.avrate || act->police.overhead) {
4760 NL_SET_ERR_MSG_MOD(extack,
4761 "Offload not supported when peakrate/avrate/overhead is configured");
4768 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4769 struct flow_action *flow_action,
4770 struct netlink_ext_ack *extack)
4772 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4773 const struct flow_action_entry *act;
4777 if (!flow_action_has_entries(flow_action)) {
4778 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4782 if (!flow_offload_has_one_action(flow_action)) {
4783 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4787 if (!flow_action_basic_hw_stats_check(flow_action, extack)) {
4788 NL_SET_ERR_MSG_MOD(extack, "Flow action HW stats type is not supported");
4792 flow_action_for_each(i, act, flow_action) {
4794 case FLOW_ACTION_POLICE:
4795 if (act->police.notexceed.act_id != FLOW_ACTION_CONTINUE) {
4796 NL_SET_ERR_MSG_MOD(extack,
4797 "Offload not supported when conform action is not continue");
4801 err = mlx5e_policer_validate(flow_action, act, extack);
4805 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4809 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4812 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4820 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4821 struct tc_cls_matchall_offload *ma)
4823 struct netlink_ext_ack *extack = ma->common.extack;
4825 if (ma->common.prio != 1) {
4826 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4830 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4833 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4834 struct tc_cls_matchall_offload *ma)
4836 struct netlink_ext_ack *extack = ma->common.extack;
4838 return apply_police_params(priv, 0, extack);
4841 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4842 struct tc_cls_matchall_offload *ma)
4844 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4845 struct rtnl_link_stats64 cur_stats;
4849 cur_stats = priv->stats.vf_vport;
4850 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4851 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4852 rpriv->prev_vf_vport_stats = cur_stats;
4853 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
4854 FLOW_ACTION_HW_STATS_DELAYED);
4857 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4858 struct mlx5e_priv *peer_priv)
4860 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4861 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4862 struct mlx5e_hairpin_entry *hpe, *tmp;
4863 LIST_HEAD(init_wait_list);
4867 if (!mlx5e_same_hw_devs(priv, peer_priv))
4870 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4872 mutex_lock(&tc->hairpin_tbl_lock);
4873 hash_for_each(tc->hairpin_tbl, bkt, hpe, hairpin_hlist)
4874 if (refcount_inc_not_zero(&hpe->refcnt))
4875 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4876 mutex_unlock(&tc->hairpin_tbl_lock);
4878 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4879 wait_for_completion(&hpe->res_ready);
4880 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4881 mlx5_core_hairpin_clear_dead_peer(hpe->hp->pair);
4883 mlx5e_hairpin_put(priv, hpe);
4887 static int mlx5e_tc_netdev_event(struct notifier_block *this,
4888 unsigned long event, void *ptr)
4890 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4891 struct mlx5e_priv *peer_priv;
4892 struct mlx5e_tc_table *tc;
4893 struct mlx5e_priv *priv;
4895 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4896 event != NETDEV_UNREGISTER ||
4897 ndev->reg_state == NETREG_REGISTERED)
4900 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
4902 peer_priv = netdev_priv(ndev);
4903 if (priv == peer_priv ||
4904 !(priv->netdev->features & NETIF_F_HW_TC))
4907 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
4912 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
4914 int tc_grp_size, tc_tbl_size;
4915 u32 max_flow_counter;
4917 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
4918 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
4920 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
4922 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
4923 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
4928 static int mlx5e_tc_nic_create_miss_table(struct mlx5e_priv *priv)
4930 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4931 struct mlx5_flow_table **ft = &tc->miss_t;
4932 struct mlx5_flow_table_attr ft_attr = {};
4933 struct mlx5_flow_namespace *ns;
4936 ft_attr.max_fte = 1;
4937 ft_attr.autogroup.max_num_groups = 1;
4938 ft_attr.level = MLX5E_TC_MISS_LEVEL;
4940 ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL);
4942 *ft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr);
4945 netdev_err(priv->netdev, "failed to create tc nic miss table err=%d\n", err);
4951 static void mlx5e_tc_nic_destroy_miss_table(struct mlx5e_priv *priv)
4953 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4955 mlx5_destroy_flow_table(tc->miss_t);
4958 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4960 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
4961 struct mlx5_core_dev *dev = priv->mdev;
4962 struct mapping_ctx *chains_mapping;
4963 struct mlx5_chains_attr attr = {};
4967 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
4968 mutex_init(&tc->t_lock);
4969 mutex_init(&tc->hairpin_tbl_lock);
4970 hash_init(tc->hairpin_tbl);
4973 err = rhashtable_init(&tc->ht, &tc_ht_params);
4977 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
4979 mapping_id = mlx5_query_nic_system_image_guid(dev);
4981 chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN,
4982 sizeof(struct mlx5_mapped_obj),
4983 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
4985 if (IS_ERR(chains_mapping)) {
4986 err = PTR_ERR(chains_mapping);
4989 tc->mapping = chains_mapping;
4991 err = mlx5e_tc_nic_create_miss_table(priv);
4995 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
4996 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
4997 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
4998 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
4999 attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
5000 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
5001 attr.default_ft = tc->miss_t;
5002 attr.mapping = chains_mapping;
5004 tc->chains = mlx5_chains_create(dev, &attr);
5005 if (IS_ERR(tc->chains)) {
5006 err = PTR_ERR(tc->chains);
5010 tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL);
5011 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &tc->mod_hdr,
5012 MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act);
5014 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
5015 err = register_netdevice_notifier_dev_net(priv->netdev,
5019 tc->netdevice_nb.notifier_call = NULL;
5020 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
5027 mlx5_tc_ct_clean(tc->ct);
5028 mlx5e_tc_post_act_destroy(tc->post_act);
5029 mlx5_chains_destroy(tc->chains);
5031 mlx5e_tc_nic_destroy_miss_table(priv);
5033 mapping_destroy(chains_mapping);
5035 rhashtable_destroy(&tc->ht);
5039 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
5041 struct mlx5e_tc_flow *flow = ptr;
5042 struct mlx5e_priv *priv = flow->priv;
5044 mlx5e_tc_del_flow(priv, flow);
5048 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
5050 struct mlx5e_tc_table *tc = mlx5e_fs_get_tc(priv->fs);
5052 if (tc->netdevice_nb.notifier_call)
5053 unregister_netdevice_notifier_dev_net(priv->netdev,
5057 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
5058 mutex_destroy(&tc->hairpin_tbl_lock);
5060 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
5062 if (!IS_ERR_OR_NULL(tc->t)) {
5063 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
5066 mutex_destroy(&tc->t_lock);
5068 mlx5_tc_ct_clean(tc->ct);
5069 mlx5e_tc_post_act_destroy(tc->post_act);
5070 mapping_destroy(tc->mapping);
5071 mlx5_chains_destroy(tc->chains);
5072 mlx5e_tc_nic_destroy_miss_table(priv);
5075 int mlx5e_tc_ht_init(struct rhashtable *tc_ht)
5079 err = rhashtable_init(tc_ht, &tc_ht_params);
5083 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
5088 void mlx5e_tc_ht_cleanup(struct rhashtable *tc_ht)
5090 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
5093 int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv)
5095 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
5096 struct mlx5e_rep_priv *rpriv;
5097 struct mapping_ctx *mapping;
5098 struct mlx5_eswitch *esw;
5099 struct mlx5e_priv *priv;
5103 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
5104 priv = netdev_priv(rpriv->netdev);
5105 esw = priv->mdev->priv.eswitch;
5107 uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw),
5108 MLX5_FLOW_NAMESPACE_FDB);
5109 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
5111 &esw->offloads.mod_hdr,
5112 MLX5_FLOW_NAMESPACE_FDB,
5113 uplink_priv->post_act);
5115 uplink_priv->int_port_priv = mlx5e_tc_int_port_init(netdev_priv(priv->netdev));
5117 uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act);
5119 mapping_id = mlx5_query_nic_system_image_guid(esw->dev);
5121 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL,
5122 sizeof(struct tunnel_match_key),
5123 TUNNEL_INFO_BITS_MASK, true);
5125 if (IS_ERR(mapping)) {
5126 err = PTR_ERR(mapping);
5127 goto err_tun_mapping;
5129 uplink_priv->tunnel_mapping = mapping;
5131 /* Two last values are reserved for stack devices slow path table mark
5132 * and bridge ingress push mark.
5134 mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS,
5135 sz_enc_opts, ENC_OPTS_BITS_MASK - 2, true);
5136 if (IS_ERR(mapping)) {
5137 err = PTR_ERR(mapping);
5138 goto err_enc_opts_mapping;
5140 uplink_priv->tunnel_enc_opts_mapping = mapping;
5142 uplink_priv->encap = mlx5e_tc_tun_init(priv);
5143 if (IS_ERR(uplink_priv->encap)) {
5144 err = PTR_ERR(uplink_priv->encap);
5145 goto err_register_fib_notifier;
5150 err_register_fib_notifier:
5151 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5152 err_enc_opts_mapping:
5153 mapping_destroy(uplink_priv->tunnel_mapping);
5155 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5156 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5157 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5158 netdev_warn(priv->netdev,
5159 "Failed to initialize tc (eswitch), err: %d", err);
5160 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5164 void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv)
5166 mlx5e_tc_tun_cleanup(uplink_priv->encap);
5168 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
5169 mapping_destroy(uplink_priv->tunnel_mapping);
5171 mlx5e_tc_sample_cleanup(uplink_priv->tc_psample);
5172 mlx5e_tc_int_port_cleanup(uplink_priv->int_port_priv);
5173 mlx5_tc_ct_clean(uplink_priv->ct_priv);
5174 mlx5e_flow_meters_cleanup(uplink_priv->flow_meters);
5175 mlx5e_tc_post_act_destroy(uplink_priv->post_act);
5178 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
5180 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
5182 return atomic_read(&tc_ht->nelems);
5185 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
5187 struct mlx5e_tc_flow *flow, *tmp;
5189 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
5190 __mlx5e_tc_del_fdb_peer_flow(flow);
5193 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
5195 struct mlx5_rep_uplink_priv *rpriv =
5196 container_of(work, struct mlx5_rep_uplink_priv,
5197 reoffload_flows_work);
5198 struct mlx5e_tc_flow *flow, *tmp;
5200 mutex_lock(&rpriv->unready_flows_lock);
5201 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5202 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5203 unready_flow_del(flow);
5205 mutex_unlock(&rpriv->unready_flows_lock);
5208 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5209 struct flow_cls_offload *cls_flower,
5210 unsigned long flags)
5212 switch (cls_flower->command) {
5213 case FLOW_CLS_REPLACE:
5214 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5216 case FLOW_CLS_DESTROY:
5217 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5219 case FLOW_CLS_STATS:
5220 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5227 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5230 unsigned long flags = MLX5_TC_FLAG(INGRESS);
5231 struct mlx5e_priv *priv = cb_priv;
5233 if (!priv->netdev || !netif_device_present(priv->netdev))
5236 if (mlx5e_is_uplink_rep(priv))
5237 flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
5239 flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
5242 case TC_SETUP_CLSFLOWER:
5243 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5249 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
5250 struct sk_buff *skb)
5252 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5253 u32 chain = 0, chain_tag, reg_b, zone_restore_id;
5254 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5255 struct mlx5_mapped_obj mapped_obj;
5256 struct tc_skb_ext *tc_skb_ext;
5257 struct mlx5e_tc_table *tc;
5260 reg_b = be32_to_cpu(cqe->ft_metadata);
5261 tc = mlx5e_fs_get_tc(priv->fs);
5262 chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5264 err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
5266 netdev_dbg(priv->netdev,
5267 "Couldn't find chain for chain tag: %d, err: %d\n",
5272 if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
5273 chain = mapped_obj.chain;
5274 tc_skb_ext = tc_skb_ext_alloc(skb);
5275 if (WARN_ON(!tc_skb_ext))
5278 tc_skb_ext->chain = chain;
5280 zone_restore_id = (reg_b >> MLX5_REG_MAPPING_MOFFSET(NIC_ZONE_RESTORE_TO_REG)) &
5283 if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
5287 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
5290 #endif /* CONFIG_NET_TC_SKB_EXT */