2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/flow_offload.h>
35 #include <net/sch_generic.h>
36 #include <net/pkt_cls.h>
37 #include <net/tc_act/tc_gact.h>
38 #include <net/tc_act/tc_skbedit.h>
39 #include <linux/mlx5/fs.h>
40 #include <linux/mlx5/device.h>
41 #include <linux/rhashtable.h>
42 #include <linux/refcount.h>
43 #include <linux/completion.h>
44 #include <net/tc_act/tc_mirred.h>
45 #include <net/tc_act/tc_vlan.h>
46 #include <net/tc_act/tc_tunnel_key.h>
47 #include <net/tc_act/tc_pedit.h>
48 #include <net/tc_act/tc_csum.h>
49 #include <net/tc_act/tc_mpls.h>
50 #include <net/psample.h>
52 #include <net/ipv6_stubs.h>
53 #include <net/bareudp.h>
54 #include <net/bonding.h>
57 #include "en/rep/tc.h"
58 #include "en/rep/neigh.h"
63 #include "en/tc_tun.h"
64 #include "en/mapping.h"
66 #include "en/mod_hdr.h"
67 #include "en/tc_priv.h"
68 #include "en/tc_tun_encap.h"
69 #include "esw/sample.h"
70 #include "lib/devcom.h"
71 #include "lib/geneve.h"
72 #include "lib/fs_chains.h"
73 #include "diag/en_tc_tracepoint.h"
74 #include <asm/div64.h>
76 #define nic_chains(priv) ((priv)->fs.tc.chains)
77 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)
79 #define MLX5E_TC_TABLE_NUM_GROUPS 4
80 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(18)
82 struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = {
84 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
89 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_0,
94 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_C_1,
96 .mlen = ((ESW_TUN_OPTS_BITS + ESW_TUN_ID_BITS) / 8),
97 .soffset = MLX5_BYTE_OFF(fte_match_param,
98 misc_parameters_2.metadata_reg_c_1),
100 [ZONE_TO_REG] = zone_to_reg_ct,
101 [ZONE_RESTORE_TO_REG] = zone_restore_to_reg_ct,
102 [CTSTATE_TO_REG] = ctstate_to_reg_ct,
103 [MARK_TO_REG] = mark_to_reg_ct,
104 [LABELS_TO_REG] = labels_to_reg_ct,
105 [FTEID_TO_REG] = fteid_to_reg_ct,
106 /* For NIC rules we store the retore metadata directly
107 * into reg_b that is passed to SW since we don't
108 * jump between steering domains.
110 [NIC_CHAIN_TO_REG] = {
111 .mfield = MLX5_ACTION_IN_FIELD_METADATA_REG_B,
115 [NIC_ZONE_RESTORE_TO_REG] = nic_zone_restore_to_reg_ct,
118 /* To avoid false lock dependency warning set the tc_ht lock
119 * class different than the lock class of the ht being used when deleting
120 * last flow from a group and then deleting a group, we get into del_sw_flow_group()
121 * which call rhashtable_destroy on fg->ftes_hash which will take ht->mutex but
122 * it's different than the ht->mutex here.
124 static struct lock_class_key tc_ht_lock_key;
126 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow);
129 mlx5e_tc_match_to_reg_match(struct mlx5_flow_spec *spec,
130 enum mlx5e_tc_attr_to_reg type,
134 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
135 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
136 void *headers_c = spec->match_criteria;
137 void *headers_v = spec->match_value;
140 fmask = headers_c + soffset;
141 fval = headers_v + soffset;
143 mask = (__force u32)(cpu_to_be32(mask)) >> (32 - (match_len * 8));
144 data = (__force u32)(cpu_to_be32(data)) >> (32 - (match_len * 8));
146 memcpy(fmask, &mask, match_len);
147 memcpy(fval, &data, match_len);
149 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
153 mlx5e_tc_match_to_reg_get_match(struct mlx5_flow_spec *spec,
154 enum mlx5e_tc_attr_to_reg type,
158 int soffset = mlx5e_tc_attr_to_reg_mappings[type].soffset;
159 int match_len = mlx5e_tc_attr_to_reg_mappings[type].mlen;
160 void *headers_c = spec->match_criteria;
161 void *headers_v = spec->match_value;
164 fmask = headers_c + soffset;
165 fval = headers_v + soffset;
167 memcpy(mask, fmask, match_len);
168 memcpy(data, fval, match_len);
170 *mask = be32_to_cpu((__force __be32)(*mask << (32 - (match_len * 8))));
171 *data = be32_to_cpu((__force __be32)(*data << (32 - (match_len * 8))));
175 mlx5e_tc_match_to_reg_set_and_get_id(struct mlx5_core_dev *mdev,
176 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
177 enum mlx5_flow_namespace_type ns,
178 enum mlx5e_tc_attr_to_reg type,
181 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
182 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
183 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
187 err = alloc_mod_hdr_actions(mdev, ns, mod_hdr_acts);
191 modact = mod_hdr_acts->actions +
192 (mod_hdr_acts->num_actions * MLX5_MH_ACT_SZ);
194 /* Firmware has 5bit length field and 0 means 32bits */
198 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
199 MLX5_SET(set_action_in, modact, field, mfield);
200 MLX5_SET(set_action_in, modact, offset, moffset * 8);
201 MLX5_SET(set_action_in, modact, length, mlen * 8);
202 MLX5_SET(set_action_in, modact, data, data);
203 err = mod_hdr_acts->num_actions;
204 mod_hdr_acts->num_actions++;
209 static struct mlx5_tc_ct_priv *
210 get_ct_priv(struct mlx5e_priv *priv)
212 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
213 struct mlx5_rep_uplink_priv *uplink_priv;
214 struct mlx5e_rep_priv *uplink_rpriv;
216 if (is_mdev_switchdev_mode(priv->mdev)) {
217 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
218 uplink_priv = &uplink_rpriv->uplink_priv;
220 return uplink_priv->ct_priv;
223 return priv->fs.tc.ct;
226 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
227 static struct mlx5_esw_psample *
228 get_sample_priv(struct mlx5e_priv *priv)
230 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
231 struct mlx5_rep_uplink_priv *uplink_priv;
232 struct mlx5e_rep_priv *uplink_rpriv;
234 if (is_mdev_switchdev_mode(priv->mdev)) {
235 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
236 uplink_priv = &uplink_rpriv->uplink_priv;
238 return uplink_priv->esw_psample;
245 struct mlx5_flow_handle *
246 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
247 struct mlx5_flow_spec *spec,
248 struct mlx5_flow_attr *attr)
250 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
252 if (is_mdev_switchdev_mode(priv->mdev))
253 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
255 return mlx5e_add_offloaded_nic_rule(priv, spec, attr);
259 mlx5_tc_rule_delete(struct mlx5e_priv *priv,
260 struct mlx5_flow_handle *rule,
261 struct mlx5_flow_attr *attr)
263 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
265 if (is_mdev_switchdev_mode(priv->mdev)) {
266 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
271 mlx5e_del_offloaded_nic_rule(priv, rule, attr);
275 mlx5e_tc_match_to_reg_set(struct mlx5_core_dev *mdev,
276 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
277 enum mlx5_flow_namespace_type ns,
278 enum mlx5e_tc_attr_to_reg type,
281 int ret = mlx5e_tc_match_to_reg_set_and_get_id(mdev, mod_hdr_acts, ns, type, data);
283 return ret < 0 ? ret : 0;
286 void mlx5e_tc_match_to_reg_mod_hdr_change(struct mlx5_core_dev *mdev,
287 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts,
288 enum mlx5e_tc_attr_to_reg type,
289 int act_id, u32 data)
291 int moffset = mlx5e_tc_attr_to_reg_mappings[type].moffset;
292 int mfield = mlx5e_tc_attr_to_reg_mappings[type].mfield;
293 int mlen = mlx5e_tc_attr_to_reg_mappings[type].mlen;
296 modact = mod_hdr_acts->actions + (act_id * MLX5_MH_ACT_SZ);
298 /* Firmware has 5bit length field and 0 means 32bits */
302 MLX5_SET(set_action_in, modact, action_type, MLX5_ACTION_TYPE_SET);
303 MLX5_SET(set_action_in, modact, field, mfield);
304 MLX5_SET(set_action_in, modact, offset, moffset * 8);
305 MLX5_SET(set_action_in, modact, length, mlen * 8);
306 MLX5_SET(set_action_in, modact, data, data);
309 struct mlx5e_hairpin {
310 struct mlx5_hairpin *pair;
312 struct mlx5_core_dev *func_mdev;
313 struct mlx5e_priv *func_priv;
318 struct mlx5e_rqt indir_rqt;
319 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
320 struct mlx5e_ttc_table ttc;
323 struct mlx5e_hairpin_entry {
324 /* a node of a hash table which keeps all the hairpin entries */
325 struct hlist_node hairpin_hlist;
327 /* protects flows list */
328 spinlock_t flows_lock;
329 /* flows sharing the same hairpin */
330 struct list_head flows;
331 /* hpe's that were not fully initialized when dead peer update event
332 * function traversed them.
334 struct list_head dead_peer_wait_list;
338 struct mlx5e_hairpin *hp;
340 struct completion res_ready;
343 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
344 struct mlx5e_tc_flow *flow);
346 struct mlx5e_tc_flow *mlx5e_flow_get(struct mlx5e_tc_flow *flow)
348 if (!flow || !refcount_inc_not_zero(&flow->refcnt))
349 return ERR_PTR(-EINVAL);
353 void mlx5e_flow_put(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
355 if (refcount_dec_and_test(&flow->refcnt)) {
356 mlx5e_tc_del_flow(priv, flow);
357 kfree_rcu(flow, rcu_head);
361 bool mlx5e_is_eswitch_flow(struct mlx5e_tc_flow *flow)
363 return flow_flag_test(flow, ESWITCH);
366 static bool mlx5e_is_ft_flow(struct mlx5e_tc_flow *flow)
368 return flow_flag_test(flow, FT);
371 bool mlx5e_is_offloaded_flow(struct mlx5e_tc_flow *flow)
373 return flow_flag_test(flow, OFFLOADED);
376 static int get_flow_name_space(struct mlx5e_tc_flow *flow)
378 return mlx5e_is_eswitch_flow(flow) ?
379 MLX5_FLOW_NAMESPACE_FDB : MLX5_FLOW_NAMESPACE_KERNEL;
382 static struct mod_hdr_tbl *
383 get_mod_hdr_table(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow)
385 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
387 return get_flow_name_space(flow) == MLX5_FLOW_NAMESPACE_FDB ?
388 &esw->offloads.mod_hdr :
389 &priv->fs.tc.mod_hdr;
392 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
393 struct mlx5e_tc_flow *flow,
394 struct mlx5e_tc_flow_parse_attr *parse_attr)
396 struct mlx5_modify_hdr *modify_hdr;
397 struct mlx5e_mod_hdr_handle *mh;
399 mh = mlx5e_mod_hdr_attach(priv->mdev, get_mod_hdr_table(priv, flow),
400 get_flow_name_space(flow),
401 &parse_attr->mod_hdr_acts);
405 modify_hdr = mlx5e_mod_hdr_get(mh);
406 flow->attr->modify_hdr = modify_hdr;
412 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
413 struct mlx5e_tc_flow *flow)
415 /* flow wasn't fully initialized */
419 mlx5e_mod_hdr_detach(priv->mdev, get_mod_hdr_table(priv, flow),
425 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
427 struct net_device *netdev;
428 struct mlx5e_priv *priv;
430 netdev = __dev_get_by_index(net, ifindex);
431 priv = netdev_priv(netdev);
435 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
437 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {};
441 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
445 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
447 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
448 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
449 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
451 err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn);
458 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
463 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
465 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
466 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
469 static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
471 struct mlx5e_priv *priv = hp->func_priv;
472 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
473 u32 *indirection_rqt, rqn;
475 indirection_rqt = kcalloc(sz, sizeof(*indirection_rqt), GFP_KERNEL);
476 if (!indirection_rqt)
479 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
482 for (i = 0; i < sz; i++) {
484 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
485 ix = mlx5e_bits_invert(i, ilog2(sz));
486 ix = indirection_rqt[ix];
487 rqn = hp->pair->rqn[ix];
488 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
491 kfree(indirection_rqt);
495 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
497 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
498 struct mlx5e_priv *priv = hp->func_priv;
499 struct mlx5_core_dev *mdev = priv->mdev;
503 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
504 in = kvzalloc(inlen, GFP_KERNEL);
508 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
510 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
511 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
513 err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
517 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
519 hp->indir_rqt.enabled = true;
526 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
528 struct mlx5e_priv *priv = hp->func_priv;
529 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
533 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
534 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
536 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
537 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
539 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
540 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
541 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
542 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
544 err = mlx5_core_create_tir(hp->func_mdev, in,
545 &hp->indir_tirn[tt]);
547 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
548 goto err_destroy_tirs;
554 for (i = 0; i < tt; i++)
555 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
559 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
563 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
564 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
567 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
568 struct ttc_params *ttc_params)
570 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
573 memset(ttc_params, 0, sizeof(*ttc_params));
575 ttc_params->any_tt_tirn = hp->tirn;
577 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
578 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
580 ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE;
581 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
582 ft_attr->prio = MLX5E_TC_PRIO;
585 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
587 struct mlx5e_priv *priv = hp->func_priv;
588 struct ttc_params ttc_params;
591 err = mlx5e_hairpin_create_indirect_rqt(hp);
595 err = mlx5e_hairpin_create_indirect_tirs(hp);
597 goto err_create_indirect_tirs;
599 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
600 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
602 goto err_create_ttc_table;
604 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
605 hp->num_channels, hp->ttc.ft.t->id);
609 err_create_ttc_table:
610 mlx5e_hairpin_destroy_indirect_tirs(hp);
611 err_create_indirect_tirs:
612 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
617 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
619 struct mlx5e_priv *priv = hp->func_priv;
621 mlx5e_destroy_ttc_table(priv, &hp->ttc);
622 mlx5e_hairpin_destroy_indirect_tirs(hp);
623 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
626 static struct mlx5e_hairpin *
627 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
630 struct mlx5_core_dev *func_mdev, *peer_mdev;
631 struct mlx5e_hairpin *hp;
632 struct mlx5_hairpin *pair;
635 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
637 return ERR_PTR(-ENOMEM);
639 func_mdev = priv->mdev;
640 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
642 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
645 goto create_pair_err;
648 hp->func_mdev = func_mdev;
649 hp->func_priv = priv;
650 hp->num_channels = params->num_channels;
652 err = mlx5e_hairpin_create_transport(hp);
654 goto create_transport_err;
656 if (hp->num_channels > 1) {
657 err = mlx5e_hairpin_rss_init(hp);
665 mlx5e_hairpin_destroy_transport(hp);
666 create_transport_err:
667 mlx5_core_hairpin_destroy(hp->pair);
673 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
675 if (hp->num_channels > 1)
676 mlx5e_hairpin_rss_cleanup(hp);
677 mlx5e_hairpin_destroy_transport(hp);
678 mlx5_core_hairpin_destroy(hp->pair);
682 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
684 return (peer_vhca_id << 16 | prio);
687 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
688 u16 peer_vhca_id, u8 prio)
690 struct mlx5e_hairpin_entry *hpe;
691 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
693 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
694 hairpin_hlist, hash_key) {
695 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio) {
696 refcount_inc(&hpe->refcnt);
704 static void mlx5e_hairpin_put(struct mlx5e_priv *priv,
705 struct mlx5e_hairpin_entry *hpe)
707 /* no more hairpin flows for us, release the hairpin pair */
708 if (!refcount_dec_and_mutex_lock(&hpe->refcnt, &priv->fs.tc.hairpin_tbl_lock))
710 hash_del(&hpe->hairpin_hlist);
711 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
713 if (!IS_ERR_OR_NULL(hpe->hp)) {
714 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
715 dev_name(hpe->hp->pair->peer_mdev->device));
717 mlx5e_hairpin_destroy(hpe->hp);
720 WARN_ON(!list_empty(&hpe->flows));
724 #define UNKNOWN_MATCH_PRIO 8
726 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
727 struct mlx5_flow_spec *spec, u8 *match_prio,
728 struct netlink_ext_ack *extack)
730 void *headers_c, *headers_v;
731 u8 prio_val, prio_mask = 0;
734 #ifdef CONFIG_MLX5_CORE_EN_DCB
735 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
736 NL_SET_ERR_MSG_MOD(extack,
737 "only PCP trust state supported for hairpin");
741 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
742 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
744 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
746 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
747 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
750 if (!vlan_present || !prio_mask) {
751 prio_val = UNKNOWN_MATCH_PRIO;
752 } else if (prio_mask != 0x7) {
753 NL_SET_ERR_MSG_MOD(extack,
754 "masked priority match not supported for hairpin");
758 *match_prio = prio_val;
762 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
763 struct mlx5e_tc_flow *flow,
764 struct mlx5e_tc_flow_parse_attr *parse_attr,
765 struct netlink_ext_ack *extack)
767 int peer_ifindex = parse_attr->mirred_ifindex[0];
768 struct mlx5_hairpin_params params;
769 struct mlx5_core_dev *peer_mdev;
770 struct mlx5e_hairpin_entry *hpe;
771 struct mlx5e_hairpin *hp;
778 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
779 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
780 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
784 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
785 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
790 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
791 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
793 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
794 wait_for_completion(&hpe->res_ready);
796 if (IS_ERR(hpe->hp)) {
803 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
805 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
809 spin_lock_init(&hpe->flows_lock);
810 INIT_LIST_HEAD(&hpe->flows);
811 INIT_LIST_HEAD(&hpe->dead_peer_wait_list);
812 hpe->peer_vhca_id = peer_id;
813 hpe->prio = match_prio;
814 refcount_set(&hpe->refcnt, 1);
815 init_completion(&hpe->res_ready);
817 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
818 hash_hairpin_info(peer_id, match_prio));
819 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
821 params.log_data_size = 15;
822 params.log_data_size = min_t(u8, params.log_data_size,
823 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
824 params.log_data_size = max_t(u8, params.log_data_size,
825 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
827 params.log_num_packets = params.log_data_size -
828 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
829 params.log_num_packets = min_t(u8, params.log_num_packets,
830 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
832 params.q_counter = priv->q_counter;
833 /* set hairpin pair per each 50Gbs share of the link */
834 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
835 link_speed = max_t(u32, link_speed, 50000);
836 link_speed64 = link_speed;
837 do_div(link_speed64, 50000);
838 params.num_channels = link_speed64;
840 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
842 complete_all(&hpe->res_ready);
848 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
849 hp->tirn, hp->pair->rqn[0],
850 dev_name(hp->pair->peer_mdev->device),
851 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
854 if (hpe->hp->num_channels > 1) {
855 flow_flag_set(flow, HAIRPIN_RSS);
856 flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
858 flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn;
862 spin_lock(&hpe->flows_lock);
863 list_add(&flow->hairpin, &hpe->flows);
864 spin_unlock(&hpe->flows_lock);
869 mlx5e_hairpin_put(priv, hpe);
873 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
874 struct mlx5e_tc_flow *flow)
876 /* flow wasn't fully initialized */
880 spin_lock(&flow->hpe->flows_lock);
881 list_del(&flow->hairpin);
882 spin_unlock(&flow->hpe->flows_lock);
884 mlx5e_hairpin_put(priv, flow->hpe);
888 struct mlx5_flow_handle *
889 mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
890 struct mlx5_flow_spec *spec,
891 struct mlx5_flow_attr *attr)
893 struct mlx5_flow_context *flow_context = &spec->flow_context;
894 struct mlx5_fs_chains *nic_chains = nic_chains(priv);
895 struct mlx5_nic_flow_attr *nic_attr = attr->nic_attr;
896 struct mlx5e_tc_table *tc = &priv->fs.tc;
897 struct mlx5_flow_destination dest[2] = {};
898 struct mlx5_flow_act flow_act = {
899 .action = attr->action,
900 .flags = FLOW_ACT_NO_APPEND,
902 struct mlx5_flow_handle *rule;
903 struct mlx5_flow_table *ft;
906 flow_context->flags |= FLOW_CONTEXT_HAS_TAG;
907 flow_context->flow_tag = nic_attr->flow_tag;
910 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
911 dest[dest_ix].ft = attr->dest_ft;
913 } else if (nic_attr->hairpin_ft) {
914 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
915 dest[dest_ix].ft = nic_attr->hairpin_ft;
917 } else if (nic_attr->hairpin_tirn) {
918 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
919 dest[dest_ix].tir_num = nic_attr->hairpin_tirn;
921 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
922 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
923 if (attr->dest_chain) {
924 dest[dest_ix].ft = mlx5_chains_get_table(nic_chains,
927 if (IS_ERR(dest[dest_ix].ft))
928 return ERR_CAST(dest[dest_ix].ft);
930 dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
935 if (dest[0].type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE &&
936 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
937 flow_act.flags |= FLOW_ACT_IGNORE_FLOW_LEVEL;
939 if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
940 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
941 dest[dest_ix].counter_id = mlx5_fc_id(attr->counter);
945 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
946 flow_act.modify_hdr = attr->modify_hdr;
948 mutex_lock(&tc->t_lock);
949 if (IS_ERR_OR_NULL(tc->t)) {
950 /* Create the root table here if doesn't exist yet */
952 mlx5_chains_get_table(nic_chains, 0, 1, MLX5E_TC_FT_LEVEL);
955 mutex_unlock(&tc->t_lock);
956 netdev_err(priv->netdev,
957 "Failed to create tc offload table\n");
958 rule = ERR_CAST(priv->fs.tc.t);
962 mutex_unlock(&tc->t_lock);
964 if (attr->chain || attr->prio)
965 ft = mlx5_chains_get_table(nic_chains,
966 attr->chain, attr->prio,
976 if (attr->outer_match_level != MLX5_MATCH_NONE)
977 spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;
979 rule = mlx5_add_flow_rules(ft, spec,
980 &flow_act, dest, dest_ix);
987 if (attr->chain || attr->prio)
988 mlx5_chains_put_table(nic_chains,
989 attr->chain, attr->prio,
992 if (attr->dest_chain)
993 mlx5_chains_put_table(nic_chains,
997 return ERR_CAST(rule);
1001 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
1002 struct mlx5e_tc_flow_parse_attr *parse_attr,
1003 struct mlx5e_tc_flow *flow,
1004 struct netlink_ext_ack *extack)
1006 struct mlx5_flow_attr *attr = flow->attr;
1007 struct mlx5_core_dev *dev = priv->mdev;
1008 struct mlx5_fc *counter = NULL;
1011 if (flow_flag_test(flow, HAIRPIN)) {
1012 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
1017 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1018 counter = mlx5_fc_create(dev, true);
1019 if (IS_ERR(counter))
1020 return PTR_ERR(counter);
1022 attr->counter = counter;
1025 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1026 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1027 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
1032 if (flow_flag_test(flow, CT))
1033 flow->rule[0] = mlx5_tc_ct_flow_offload(get_ct_priv(priv), flow, &parse_attr->spec,
1034 attr, &parse_attr->mod_hdr_acts);
1036 flow->rule[0] = mlx5e_add_offloaded_nic_rule(priv, &parse_attr->spec,
1039 return PTR_ERR_OR_ZERO(flow->rule[0]);
1042 void mlx5e_del_offloaded_nic_rule(struct mlx5e_priv *priv,
1043 struct mlx5_flow_handle *rule,
1044 struct mlx5_flow_attr *attr)
1046 struct mlx5_fs_chains *nic_chains = nic_chains(priv);
1048 mlx5_del_flow_rules(rule);
1050 if (attr->chain || attr->prio)
1051 mlx5_chains_put_table(nic_chains, attr->chain, attr->prio,
1054 if (attr->dest_chain)
1055 mlx5_chains_put_table(nic_chains, attr->dest_chain, 1,
1059 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
1060 struct mlx5e_tc_flow *flow)
1062 struct mlx5_flow_attr *attr = flow->attr;
1063 struct mlx5e_tc_table *tc = &priv->fs.tc;
1065 flow_flag_clear(flow, OFFLOADED);
1067 if (flow_flag_test(flow, CT))
1068 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1069 else if (!IS_ERR_OR_NULL(flow->rule[0]))
1070 mlx5e_del_offloaded_nic_rule(priv, flow->rule[0], attr);
1072 /* Remove root table if no rules are left to avoid
1073 * extra steering hops.
1075 mutex_lock(&priv->fs.tc.t_lock);
1076 if (!mlx5e_tc_num_filters(priv, MLX5_TC_FLAG(NIC_OFFLOAD)) &&
1077 !IS_ERR_OR_NULL(tc->t)) {
1078 mlx5_chains_put_table(nic_chains(priv), 0, 1, MLX5E_TC_FT_LEVEL);
1079 priv->fs.tc.t = NULL;
1081 mutex_unlock(&priv->fs.tc.t_lock);
1083 kvfree(attr->parse_attr);
1085 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1086 mlx5e_detach_mod_hdr(priv, flow);
1088 mlx5_fc_destroy(priv->mdev, attr->counter);
1090 if (flow_flag_test(flow, HAIRPIN))
1091 mlx5e_hairpin_flow_del(priv, flow);
1096 struct mlx5_flow_handle *
1097 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
1098 struct mlx5e_tc_flow *flow,
1099 struct mlx5_flow_spec *spec,
1100 struct mlx5_flow_attr *attr)
1102 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1103 struct mlx5_flow_handle *rule;
1105 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1106 return mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1108 if (flow_flag_test(flow, CT)) {
1109 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1111 rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
1114 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1115 } else if (flow_flag_test(flow, SAMPLE)) {
1116 rule = mlx5_esw_sample_offload(get_sample_priv(flow->priv), spec, attr);
1119 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
1125 if (attr->esw_attr->split_count) {
1126 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
1127 if (IS_ERR(flow->rule[1])) {
1128 if (flow_flag_test(flow, CT))
1129 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1131 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
1132 return flow->rule[1];
1139 void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
1140 struct mlx5e_tc_flow *flow,
1141 struct mlx5_flow_attr *attr)
1143 flow_flag_clear(flow, OFFLOADED);
1145 if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH)
1146 goto offload_rule_0;
1148 if (flow_flag_test(flow, CT)) {
1149 mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
1153 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
1154 if (flow_flag_test(flow, SAMPLE)) {
1155 mlx5_esw_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
1160 if (attr->esw_attr->split_count)
1161 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
1164 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
1167 struct mlx5_flow_handle *
1168 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
1169 struct mlx5e_tc_flow *flow,
1170 struct mlx5_flow_spec *spec)
1172 struct mlx5_flow_attr *slow_attr;
1173 struct mlx5_flow_handle *rule;
1175 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1177 return ERR_PTR(-ENOMEM);
1179 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1180 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1181 slow_attr->esw_attr->split_count = 0;
1182 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1184 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
1186 flow_flag_set(flow, SLOW);
1193 void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
1194 struct mlx5e_tc_flow *flow)
1196 struct mlx5_flow_attr *slow_attr;
1198 slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB);
1200 mlx5_core_warn(flow->priv->mdev, "Unable to alloc attr to unoffload slow path rule\n");
1204 memcpy(slow_attr, flow->attr, ESW_FLOW_ATTR_SZ);
1205 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1206 slow_attr->esw_attr->split_count = 0;
1207 slow_attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
1208 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
1209 flow_flag_clear(flow, SLOW);
1213 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1216 static void unready_flow_add(struct mlx5e_tc_flow *flow,
1217 struct list_head *unready_flows)
1219 flow_flag_set(flow, NOT_READY);
1220 list_add_tail(&flow->unready, unready_flows);
1223 /* Caller must obtain uplink_priv->unready_flows_lock mutex before calling this
1226 static void unready_flow_del(struct mlx5e_tc_flow *flow)
1228 list_del(&flow->unready);
1229 flow_flag_clear(flow, NOT_READY);
1232 static void add_unready_flow(struct mlx5e_tc_flow *flow)
1234 struct mlx5_rep_uplink_priv *uplink_priv;
1235 struct mlx5e_rep_priv *rpriv;
1236 struct mlx5_eswitch *esw;
1238 esw = flow->priv->mdev->priv.eswitch;
1239 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1240 uplink_priv = &rpriv->uplink_priv;
1242 mutex_lock(&uplink_priv->unready_flows_lock);
1243 unready_flow_add(flow, &uplink_priv->unready_flows);
1244 mutex_unlock(&uplink_priv->unready_flows_lock);
1247 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
1249 struct mlx5_rep_uplink_priv *uplink_priv;
1250 struct mlx5e_rep_priv *rpriv;
1251 struct mlx5_eswitch *esw;
1253 esw = flow->priv->mdev->priv.eswitch;
1254 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1255 uplink_priv = &rpriv->uplink_priv;
1257 mutex_lock(&uplink_priv->unready_flows_lock);
1258 unready_flow_del(flow);
1259 mutex_unlock(&uplink_priv->unready_flows_lock);
1262 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv);
1264 bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_dev)
1266 struct mlx5_core_dev *out_mdev, *route_mdev;
1267 struct mlx5e_priv *out_priv, *route_priv;
1269 out_priv = netdev_priv(out_dev);
1270 out_mdev = out_priv->mdev;
1271 route_priv = netdev_priv(route_dev);
1272 route_mdev = route_priv->mdev;
1274 if (out_mdev->coredev_type != MLX5_COREDEV_PF ||
1275 route_mdev->coredev_type != MLX5_COREDEV_VF)
1278 return same_hw_devs(out_priv, route_priv);
1281 int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport)
1283 struct mlx5e_priv *out_priv, *route_priv;
1284 struct mlx5_core_dev *route_mdev;
1285 struct mlx5_eswitch *esw;
1289 out_priv = netdev_priv(out_dev);
1290 esw = out_priv->mdev->priv.eswitch;
1291 route_priv = netdev_priv(route_dev);
1292 route_mdev = route_priv->mdev;
1294 vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id);
1295 err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport);
1299 int mlx5e_tc_add_flow_mod_hdr(struct mlx5e_priv *priv,
1300 struct mlx5e_tc_flow_parse_attr *parse_attr,
1301 struct mlx5e_tc_flow *flow)
1303 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts = &parse_attr->mod_hdr_acts;
1304 struct mlx5_modify_hdr *mod_hdr;
1306 mod_hdr = mlx5_modify_header_alloc(priv->mdev,
1307 get_flow_name_space(flow),
1308 mod_hdr_acts->num_actions,
1309 mod_hdr_acts->actions);
1310 if (IS_ERR(mod_hdr))
1311 return PTR_ERR(mod_hdr);
1313 WARN_ON(flow->attr->modify_hdr);
1314 flow->attr->modify_hdr = mod_hdr;
1320 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
1321 struct mlx5e_tc_flow *flow,
1322 struct netlink_ext_ack *extack)
1324 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1325 struct net_device *out_dev, *encap_dev = NULL;
1326 struct mlx5e_tc_flow_parse_attr *parse_attr;
1327 struct mlx5_flow_attr *attr = flow->attr;
1328 bool vf_tun = false, encap_valid = true;
1329 struct mlx5_esw_flow_attr *esw_attr;
1330 struct mlx5_fc *counter = NULL;
1331 struct mlx5e_rep_priv *rpriv;
1332 struct mlx5e_priv *out_priv;
1333 u32 max_prio, max_chain;
1337 /* We check chain range only for tc flows.
1338 * For ft flows, we checked attr->chain was originally 0 and set it to
1339 * FDB_FT_CHAIN which is outside tc range.
1340 * See mlx5e_rep_setup_ft_cb().
1342 max_chain = mlx5_chains_get_chain_range(esw_chains(esw));
1343 if (!mlx5e_is_ft_flow(flow) && attr->chain > max_chain) {
1344 NL_SET_ERR_MSG_MOD(extack,
1345 "Requested chain is out of supported range");
1350 max_prio = mlx5_chains_get_prio_range(esw_chains(esw));
1351 if (attr->prio > max_prio) {
1352 NL_SET_ERR_MSG_MOD(extack,
1353 "Requested priority is out of supported range");
1358 if (flow_flag_test(flow, TUN_RX)) {
1359 err = mlx5e_attach_decap_route(priv, flow);
1364 if (flow_flag_test(flow, L3_TO_L2_DECAP)) {
1365 err = mlx5e_attach_decap(priv, flow, extack);
1370 parse_attr = attr->parse_attr;
1371 esw_attr = attr->esw_attr;
1373 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1376 if (!(esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
1379 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
1380 out_dev = __dev_get_by_index(dev_net(priv->netdev),
1382 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
1383 extack, &encap_dev, &encap_valid);
1387 if (esw_attr->dests[out_index].flags &
1388 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
1390 out_priv = netdev_priv(encap_dev);
1391 rpriv = out_priv->ppriv;
1392 esw_attr->dests[out_index].rep = rpriv->rep;
1393 esw_attr->dests[out_index].mdev = out_priv->mdev;
1396 err = mlx5_eswitch_add_vlan_action(esw, attr);
1400 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR &&
1401 !(attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR)) {
1403 err = mlx5e_tc_add_flow_mod_hdr(priv, parse_attr, flow);
1407 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1413 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1414 counter = mlx5_fc_create(esw_attr->counter_dev, true);
1415 if (IS_ERR(counter)) {
1416 err = PTR_ERR(counter);
1420 attr->counter = counter;
1423 /* we get here if one of the following takes place:
1424 * (1) there's no error
1425 * (2) there's an encap action and we don't have valid neigh
1428 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec);
1430 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1432 if (IS_ERR(flow->rule[0])) {
1433 err = PTR_ERR(flow->rule[0]);
1436 flow_flag_set(flow, OFFLOADED);
1441 flow_flag_set(flow, FAILED);
1445 static bool mlx5_flow_has_geneve_opt(struct mlx5e_tc_flow *flow)
1447 struct mlx5_flow_spec *spec = &flow->attr->parse_attr->spec;
1448 void *headers_v = MLX5_ADDR_OF(fte_match_param,
1451 u32 geneve_tlv_opt_0_data = MLX5_GET(fte_match_set_misc3,
1453 geneve_tlv_option_0_data);
1455 return !!geneve_tlv_opt_0_data;
1458 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1459 struct mlx5e_tc_flow *flow)
1461 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1462 struct mlx5_flow_attr *attr = flow->attr;
1463 struct mlx5_esw_flow_attr *esw_attr;
1464 bool vf_tun = false;
1467 esw_attr = attr->esw_attr;
1468 mlx5e_put_flow_tunnel_id(flow);
1470 if (flow_flag_test(flow, NOT_READY))
1471 remove_unready_flow(flow);
1473 if (mlx5e_is_offloaded_flow(flow)) {
1474 if (flow_flag_test(flow, SLOW))
1475 mlx5e_tc_unoffload_from_slow_path(esw, flow);
1477 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1480 if (mlx5_flow_has_geneve_opt(flow))
1481 mlx5_geneve_tlv_option_del(priv->mdev->geneve);
1483 mlx5_eswitch_del_vlan_action(esw, attr);
1485 if (flow->decap_route)
1486 mlx5e_detach_decap_route(priv, flow);
1488 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
1489 if (esw_attr->dests[out_index].flags &
1490 MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE)
1492 if (esw_attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) {
1493 mlx5e_detach_encap(priv, flow, out_index);
1494 kfree(attr->parse_attr->tun_info[out_index]);
1498 mlx5_tc_ct_match_del(get_ct_priv(priv), &flow->attr->ct_attr);
1500 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1501 dealloc_mod_hdr_actions(&attr->parse_attr->mod_hdr_acts);
1502 if (vf_tun && attr->modify_hdr)
1503 mlx5_modify_header_dealloc(priv->mdev, attr->modify_hdr);
1505 mlx5e_detach_mod_hdr(priv, flow);
1507 kvfree(attr->parse_attr);
1508 kvfree(attr->esw_attr->rx_tun_attr);
1510 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1511 mlx5_fc_destroy(esw_attr->counter_dev, attr->counter);
1513 if (flow_flag_test(flow, L3_TO_L2_DECAP))
1514 mlx5e_detach_decap(priv, flow);
1516 kfree(flow->attr->esw_attr->sample);
1520 struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1522 return flow->attr->counter;
1525 /* Iterate over tmp_list of flows attached to flow_list head. */
1526 void mlx5e_put_flow_list(struct mlx5e_priv *priv, struct list_head *flow_list)
1528 struct mlx5e_tc_flow *flow, *tmp;
1530 list_for_each_entry_safe(flow, tmp, flow_list, tmp_list)
1531 mlx5e_flow_put(priv, flow);
1534 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1536 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1538 if (!flow_flag_test(flow, ESWITCH) ||
1539 !flow_flag_test(flow, DUP))
1542 mutex_lock(&esw->offloads.peer_mutex);
1543 list_del(&flow->peer);
1544 mutex_unlock(&esw->offloads.peer_mutex);
1546 flow_flag_clear(flow, DUP);
1548 if (refcount_dec_and_test(&flow->peer_flow->refcnt)) {
1549 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1550 kfree(flow->peer_flow);
1553 flow->peer_flow = NULL;
1556 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1558 struct mlx5_core_dev *dev = flow->priv->mdev;
1559 struct mlx5_devcom *devcom = dev->priv.devcom;
1560 struct mlx5_eswitch *peer_esw;
1562 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1566 __mlx5e_tc_del_fdb_peer_flow(flow);
1567 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1570 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1571 struct mlx5e_tc_flow *flow)
1573 if (mlx5e_is_eswitch_flow(flow)) {
1574 mlx5e_tc_del_fdb_peer_flow(flow);
1575 mlx5e_tc_del_fdb_flow(priv, flow);
1577 mlx5e_tc_del_nic_flow(priv, flow);
1581 static int flow_has_tc_fwd_action(struct flow_cls_offload *f)
1583 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1584 struct flow_action *flow_action = &rule->action;
1585 const struct flow_action_entry *act;
1588 flow_action_for_each(i, act, flow_action) {
1590 case FLOW_ACTION_GOTO:
1601 enc_opts_is_dont_care_or_full_match(struct mlx5e_priv *priv,
1602 struct flow_dissector_key_enc_opts *opts,
1603 struct netlink_ext_ack *extack,
1606 struct geneve_opt *opt;
1611 while (opts->len > off) {
1612 opt = (struct geneve_opt *)&opts->data[off];
1614 if (!(*dont_care) || opt->opt_class || opt->type ||
1615 memchr_inv(opt->opt_data, 0, opt->length * 4)) {
1618 if (opt->opt_class != htons(U16_MAX) ||
1619 opt->type != U8_MAX) {
1620 NL_SET_ERR_MSG(extack,
1621 "Partial match of tunnel options in chain > 0 isn't supported");
1622 netdev_warn(priv->netdev,
1623 "Partial match of tunnel options in chain > 0 isn't supported");
1628 off += sizeof(struct geneve_opt) + opt->length * 4;
1634 #define COPY_DISSECTOR(rule, diss_key, dst)\
1636 struct flow_rule *__rule = (rule);\
1637 typeof(dst) __dst = dst;\
1640 skb_flow_dissector_target(__rule->match.dissector,\
1642 __rule->match.key),\
1646 static int mlx5e_get_flow_tunnel_id(struct mlx5e_priv *priv,
1647 struct mlx5e_tc_flow *flow,
1648 struct flow_cls_offload *f,
1649 struct net_device *filter_dev)
1651 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1652 struct netlink_ext_ack *extack = f->common.extack;
1653 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts;
1654 struct flow_match_enc_opts enc_opts_match;
1655 struct tunnel_match_enc_opts tun_enc_opts;
1656 struct mlx5_rep_uplink_priv *uplink_priv;
1657 struct mlx5_flow_attr *attr = flow->attr;
1658 struct mlx5e_rep_priv *uplink_rpriv;
1659 struct tunnel_match_key tunnel_key;
1660 bool enc_opts_is_dont_care = true;
1661 u32 tun_id, enc_opts_id = 0;
1662 struct mlx5_eswitch *esw;
1666 esw = priv->mdev->priv.eswitch;
1667 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1668 uplink_priv = &uplink_rpriv->uplink_priv;
1670 memset(&tunnel_key, 0, sizeof(tunnel_key));
1671 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL,
1672 &tunnel_key.enc_control);
1673 if (tunnel_key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS)
1674 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1675 &tunnel_key.enc_ipv4);
1677 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1678 &tunnel_key.enc_ipv6);
1679 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_IP, &tunnel_key.enc_ip);
1680 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_PORTS,
1681 &tunnel_key.enc_tp);
1682 COPY_DISSECTOR(rule, FLOW_DISSECTOR_KEY_ENC_KEYID,
1683 &tunnel_key.enc_key_id);
1684 tunnel_key.filter_ifindex = filter_dev->ifindex;
1686 err = mapping_add(uplink_priv->tunnel_mapping, &tunnel_key, &tun_id);
1690 flow_rule_match_enc_opts(rule, &enc_opts_match);
1691 err = enc_opts_is_dont_care_or_full_match(priv,
1692 enc_opts_match.mask,
1694 &enc_opts_is_dont_care);
1698 if (!enc_opts_is_dont_care) {
1699 memset(&tun_enc_opts, 0, sizeof(tun_enc_opts));
1700 memcpy(&tun_enc_opts.key, enc_opts_match.key,
1701 sizeof(*enc_opts_match.key));
1702 memcpy(&tun_enc_opts.mask, enc_opts_match.mask,
1703 sizeof(*enc_opts_match.mask));
1705 err = mapping_add(uplink_priv->tunnel_enc_opts_mapping,
1706 &tun_enc_opts, &enc_opts_id);
1711 value = tun_id << ENC_OPTS_BITS | enc_opts_id;
1712 mask = enc_opts_id ? TUNNEL_ID_MASK :
1713 (TUNNEL_ID_MASK & ~ENC_OPTS_BITS_MASK);
1716 mlx5e_tc_match_to_reg_match(&attr->parse_attr->spec,
1717 TUNNEL_TO_REG, value, mask);
1719 mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
1720 err = mlx5e_tc_match_to_reg_set(priv->mdev,
1721 mod_hdr_acts, MLX5_FLOW_NAMESPACE_FDB,
1722 TUNNEL_TO_REG, value);
1726 attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
1729 flow->tunnel_id = value;
1734 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1737 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1741 static void mlx5e_put_flow_tunnel_id(struct mlx5e_tc_flow *flow)
1743 u32 enc_opts_id = flow->tunnel_id & ENC_OPTS_BITS_MASK;
1744 u32 tun_id = flow->tunnel_id >> ENC_OPTS_BITS;
1745 struct mlx5_rep_uplink_priv *uplink_priv;
1746 struct mlx5e_rep_priv *uplink_rpriv;
1747 struct mlx5_eswitch *esw;
1749 esw = flow->priv->mdev->priv.eswitch;
1750 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1751 uplink_priv = &uplink_rpriv->uplink_priv;
1754 mapping_remove(uplink_priv->tunnel_mapping, tun_id);
1756 mapping_remove(uplink_priv->tunnel_enc_opts_mapping,
1760 u32 mlx5e_tc_get_flow_tun_id(struct mlx5e_tc_flow *flow)
1762 return flow->tunnel_id;
1765 void mlx5e_tc_set_ethertype(struct mlx5_core_dev *mdev,
1766 struct flow_match_basic *match, bool outer,
1767 void *headers_c, void *headers_v)
1769 bool ip_version_cap;
1771 ip_version_cap = outer ?
1772 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1773 ft_field_support.outer_ip_version) :
1774 MLX5_CAP_FLOWTABLE_NIC_RX(mdev,
1775 ft_field_support.inner_ip_version);
1777 if (ip_version_cap && match->mask->n_proto == htons(0xFFFF) &&
1778 (match->key->n_proto == htons(ETH_P_IP) ||
1779 match->key->n_proto == htons(ETH_P_IPV6))) {
1780 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_version);
1781 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version,
1782 match->key->n_proto == htons(ETH_P_IP) ? 4 : 6);
1784 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1785 ntohs(match->mask->n_proto));
1786 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1787 ntohs(match->key->n_proto));
1791 u8 mlx5e_tc_get_ip_version(struct mlx5_flow_spec *spec, bool outer)
1798 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1800 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
1802 ip_version = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_version);
1803 /* Return ip_version converted from ethertype anyway */
1805 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1806 if (ethertype == ETH_P_IP || ethertype == ETH_P_ARP)
1808 else if (ethertype == ETH_P_IPV6)
1814 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1815 struct mlx5e_tc_flow *flow,
1816 struct mlx5_flow_spec *spec,
1817 struct flow_cls_offload *f,
1818 struct net_device *filter_dev,
1822 struct mlx5e_tc_tunnel *tunnel = mlx5e_get_tc_tun(filter_dev);
1823 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1824 struct netlink_ext_ack *extack = f->common.extack;
1825 bool needs_mapping, sets_mapping;
1828 if (!mlx5e_is_eswitch_flow(flow))
1831 needs_mapping = !!flow->attr->chain;
1832 sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f);
1833 *match_inner = !needs_mapping;
1835 if ((needs_mapping || sets_mapping) &&
1836 !mlx5_eswitch_reg_c1_loopback_enabled(esw)) {
1837 NL_SET_ERR_MSG(extack,
1838 "Chains on tunnel devices isn't supported without register loopback support");
1839 netdev_warn(priv->netdev,
1840 "Chains on tunnel devices isn't supported without register loopback support");
1844 if (!flow->attr->chain) {
1845 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1848 NL_SET_ERR_MSG_MOD(extack,
1849 "Failed to parse tunnel attributes");
1850 netdev_warn(priv->netdev,
1851 "Failed to parse tunnel attributes");
1855 /* With mpls over udp we decapsulate using packet reformat
1858 if (!netif_is_bareudp(filter_dev))
1859 flow->attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
1860 err = mlx5e_tc_set_attr_rx_tun(flow, spec);
1863 } else if (tunnel && tunnel->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) {
1864 struct mlx5_flow_spec *tmp_spec;
1866 tmp_spec = kvzalloc(sizeof(*tmp_spec), GFP_KERNEL);
1868 NL_SET_ERR_MSG_MOD(extack, "Failed to allocate memory for vxlan tmp spec");
1869 netdev_warn(priv->netdev, "Failed to allocate memory for vxlan tmp spec");
1872 memcpy(tmp_spec, spec, sizeof(*tmp_spec));
1874 err = mlx5e_tc_tun_parse(filter_dev, priv, tmp_spec, f, match_level);
1877 NL_SET_ERR_MSG_MOD(extack, "Failed to parse tunnel attributes");
1878 netdev_warn(priv->netdev, "Failed to parse tunnel attributes");
1881 err = mlx5e_tc_set_attr_rx_tun(flow, tmp_spec);
1887 if (!needs_mapping && !sets_mapping)
1890 return mlx5e_get_flow_tunnel_id(priv, flow, f, filter_dev);
1893 static void *get_match_inner_headers_criteria(struct mlx5_flow_spec *spec)
1895 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1899 static void *get_match_inner_headers_value(struct mlx5_flow_spec *spec)
1901 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
1905 static void *get_match_outer_headers_criteria(struct mlx5_flow_spec *spec)
1907 return MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1911 static void *get_match_outer_headers_value(struct mlx5_flow_spec *spec)
1913 return MLX5_ADDR_OF(fte_match_param, spec->match_value,
1917 static void *get_match_headers_value(u32 flags,
1918 struct mlx5_flow_spec *spec)
1920 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
1921 get_match_inner_headers_value(spec) :
1922 get_match_outer_headers_value(spec);
1925 static void *get_match_headers_criteria(u32 flags,
1926 struct mlx5_flow_spec *spec)
1928 return (flags & MLX5_FLOW_CONTEXT_ACTION_DECAP) ?
1929 get_match_inner_headers_criteria(spec) :
1930 get_match_outer_headers_criteria(spec);
1933 static int mlx5e_flower_parse_meta(struct net_device *filter_dev,
1934 struct flow_cls_offload *f)
1936 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
1937 struct netlink_ext_ack *extack = f->common.extack;
1938 struct net_device *ingress_dev;
1939 struct flow_match_meta match;
1941 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_META))
1944 flow_rule_match_meta(rule, &match);
1945 if (match.mask->ingress_ifindex != 0xFFFFFFFF) {
1946 NL_SET_ERR_MSG_MOD(extack, "Unsupported ingress ifindex mask");
1950 ingress_dev = __dev_get_by_index(dev_net(filter_dev),
1951 match.key->ingress_ifindex);
1953 NL_SET_ERR_MSG_MOD(extack,
1954 "Can't find the ingress port to match on");
1958 if (ingress_dev != filter_dev) {
1959 NL_SET_ERR_MSG_MOD(extack,
1960 "Can't match on the ingress filter port");
1967 static bool skip_key_basic(struct net_device *filter_dev,
1968 struct flow_cls_offload *f)
1970 /* When doing mpls over udp decap, the user needs to provide
1971 * MPLS_UC as the protocol in order to be able to match on mpls
1972 * label fields. However, the actual ethertype is IP so we want to
1973 * avoid matching on this, otherwise we'll fail the match.
1975 if (netif_is_bareudp(filter_dev) && f->common.chain_index == 0)
1981 static int __parse_cls_flower(struct mlx5e_priv *priv,
1982 struct mlx5e_tc_flow *flow,
1983 struct mlx5_flow_spec *spec,
1984 struct flow_cls_offload *f,
1985 struct net_device *filter_dev,
1986 u8 *inner_match_level, u8 *outer_match_level)
1988 struct netlink_ext_ack *extack = f->common.extack;
1989 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1991 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1993 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1995 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1997 void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1999 void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
2001 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2002 struct flow_dissector *dissector = rule->match.dissector;
2008 match_level = outer_match_level;
2010 if (dissector->used_keys &
2011 ~(BIT(FLOW_DISSECTOR_KEY_META) |
2012 BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2013 BIT(FLOW_DISSECTOR_KEY_BASIC) |
2014 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2015 BIT(FLOW_DISSECTOR_KEY_VLAN) |
2016 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
2017 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2018 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2019 BIT(FLOW_DISSECTOR_KEY_PORTS) |
2020 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
2021 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
2022 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
2023 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
2024 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
2025 BIT(FLOW_DISSECTOR_KEY_TCP) |
2026 BIT(FLOW_DISSECTOR_KEY_IP) |
2027 BIT(FLOW_DISSECTOR_KEY_CT) |
2028 BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
2029 BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
2030 BIT(FLOW_DISSECTOR_KEY_ICMP) |
2031 BIT(FLOW_DISSECTOR_KEY_MPLS))) {
2032 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
2033 netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
2034 dissector->used_keys);
2038 if (mlx5e_get_tc_tun(filter_dev)) {
2039 bool match_inner = false;
2041 err = parse_tunnel_attr(priv, flow, spec, f, filter_dev,
2042 outer_match_level, &match_inner);
2047 /* header pointers should point to the inner headers
2048 * if the packet was decapsulated already.
2049 * outer headers are set by parse_tunnel_attr.
2051 match_level = inner_match_level;
2052 headers_c = get_match_inner_headers_criteria(spec);
2053 headers_v = get_match_inner_headers_value(spec);
2057 err = mlx5e_flower_parse_meta(filter_dev, f);
2061 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC) &&
2062 !skip_key_basic(filter_dev, f)) {
2063 struct flow_match_basic match;
2065 flow_rule_match_basic(rule, &match);
2066 mlx5e_tc_set_ethertype(priv->mdev, &match,
2067 match_level == outer_match_level,
2068 headers_c, headers_v);
2070 if (match.mask->n_proto)
2071 *match_level = MLX5_MATCH_L2;
2073 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN) ||
2074 is_vlan_dev(filter_dev)) {
2075 struct flow_dissector_key_vlan filter_dev_mask;
2076 struct flow_dissector_key_vlan filter_dev_key;
2077 struct flow_match_vlan match;
2079 if (is_vlan_dev(filter_dev)) {
2080 match.key = &filter_dev_key;
2081 match.key->vlan_id = vlan_dev_vlan_id(filter_dev);
2082 match.key->vlan_tpid = vlan_dev_vlan_proto(filter_dev);
2083 match.key->vlan_priority = 0;
2084 match.mask = &filter_dev_mask;
2085 memset(match.mask, 0xff, sizeof(*match.mask));
2086 match.mask->vlan_priority = 0;
2088 flow_rule_match_vlan(rule, &match);
2090 if (match.mask->vlan_id ||
2091 match.mask->vlan_priority ||
2092 match.mask->vlan_tpid) {
2093 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2094 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2096 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2099 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2101 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2105 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
2106 match.mask->vlan_id);
2107 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
2108 match.key->vlan_id);
2110 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
2111 match.mask->vlan_priority);
2112 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
2113 match.key->vlan_priority);
2115 *match_level = MLX5_MATCH_L2;
2117 } else if (*match_level != MLX5_MATCH_NONE) {
2118 /* cvlan_tag enabled in match criteria and
2119 * disabled in match value means both S & C tags
2120 * don't exist (untagged of both)
2122 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
2123 *match_level = MLX5_MATCH_L2;
2126 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
2127 struct flow_match_vlan match;
2129 flow_rule_match_cvlan(rule, &match);
2130 if (match.mask->vlan_id ||
2131 match.mask->vlan_priority ||
2132 match.mask->vlan_tpid) {
2133 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
2134 MLX5_SET(fte_match_set_misc, misc_c,
2135 outer_second_svlan_tag, 1);
2136 MLX5_SET(fte_match_set_misc, misc_v,
2137 outer_second_svlan_tag, 1);
2139 MLX5_SET(fte_match_set_misc, misc_c,
2140 outer_second_cvlan_tag, 1);
2141 MLX5_SET(fte_match_set_misc, misc_v,
2142 outer_second_cvlan_tag, 1);
2145 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
2146 match.mask->vlan_id);
2147 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
2148 match.key->vlan_id);
2149 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
2150 match.mask->vlan_priority);
2151 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
2152 match.key->vlan_priority);
2154 *match_level = MLX5_MATCH_L2;
2155 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
2159 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2160 struct flow_match_eth_addrs match;
2162 flow_rule_match_eth_addrs(rule, &match);
2163 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2166 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2170 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2173 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2177 if (!is_zero_ether_addr(match.mask->src) ||
2178 !is_zero_ether_addr(match.mask->dst))
2179 *match_level = MLX5_MATCH_L2;
2182 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2183 struct flow_match_control match;
2185 flow_rule_match_control(rule, &match);
2186 addr_type = match.key->addr_type;
2188 /* the HW doesn't support frag first/later */
2189 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
2192 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
2193 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
2194 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
2195 match.key->flags & FLOW_DIS_IS_FRAGMENT);
2197 /* the HW doesn't need L3 inline to match on frag=no */
2198 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
2199 *match_level = MLX5_MATCH_L2;
2200 /* *** L2 attributes parsing up to here *** */
2202 *match_level = MLX5_MATCH_L3;
2206 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2207 struct flow_match_basic match;
2209 flow_rule_match_basic(rule, &match);
2210 ip_proto = match.key->ip_proto;
2212 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
2213 match.mask->ip_proto);
2214 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2215 match.key->ip_proto);
2217 if (match.mask->ip_proto)
2218 *match_level = MLX5_MATCH_L3;
2221 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2222 struct flow_match_ipv4_addrs match;
2224 flow_rule_match_ipv4_addrs(rule, &match);
2225 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2226 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2227 &match.mask->src, sizeof(match.mask->src));
2228 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2229 src_ipv4_src_ipv6.ipv4_layout.ipv4),
2230 &match.key->src, sizeof(match.key->src));
2231 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2232 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2233 &match.mask->dst, sizeof(match.mask->dst));
2234 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2235 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2236 &match.key->dst, sizeof(match.key->dst));
2238 if (match.mask->src || match.mask->dst)
2239 *match_level = MLX5_MATCH_L3;
2242 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2243 struct flow_match_ipv6_addrs match;
2245 flow_rule_match_ipv6_addrs(rule, &match);
2246 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2247 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2248 &match.mask->src, sizeof(match.mask->src));
2249 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2250 src_ipv4_src_ipv6.ipv6_layout.ipv6),
2251 &match.key->src, sizeof(match.key->src));
2253 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
2254 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2255 &match.mask->dst, sizeof(match.mask->dst));
2256 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2257 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
2258 &match.key->dst, sizeof(match.key->dst));
2260 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
2261 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
2262 *match_level = MLX5_MATCH_L3;
2265 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
2266 struct flow_match_ip match;
2268 flow_rule_match_ip(rule, &match);
2269 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
2270 match.mask->tos & 0x3);
2271 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
2272 match.key->tos & 0x3);
2274 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
2275 match.mask->tos >> 2);
2276 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
2277 match.key->tos >> 2);
2279 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
2281 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
2284 if (match.mask->ttl &&
2285 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
2286 ft_field_support.outer_ipv4_ttl)) {
2287 NL_SET_ERR_MSG_MOD(extack,
2288 "Matching on TTL is not supported");
2292 if (match.mask->tos || match.mask->ttl)
2293 *match_level = MLX5_MATCH_L3;
2296 /* *** L3 attributes parsing up to here *** */
2298 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2299 struct flow_match_ports match;
2301 flow_rule_match_ports(rule, &match);
2304 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2305 tcp_sport, ntohs(match.mask->src));
2306 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2307 tcp_sport, ntohs(match.key->src));
2309 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2310 tcp_dport, ntohs(match.mask->dst));
2311 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2312 tcp_dport, ntohs(match.key->dst));
2316 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2317 udp_sport, ntohs(match.mask->src));
2318 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2319 udp_sport, ntohs(match.key->src));
2321 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
2322 udp_dport, ntohs(match.mask->dst));
2323 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
2324 udp_dport, ntohs(match.key->dst));
2327 NL_SET_ERR_MSG_MOD(extack,
2328 "Only UDP and TCP transports are supported for L4 matching");
2329 netdev_err(priv->netdev,
2330 "Only UDP and TCP transport are supported\n");
2334 if (match.mask->src || match.mask->dst)
2335 *match_level = MLX5_MATCH_L4;
2338 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
2339 struct flow_match_tcp match;
2341 flow_rule_match_tcp(rule, &match);
2342 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
2343 ntohs(match.mask->flags));
2344 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
2345 ntohs(match.key->flags));
2347 if (match.mask->flags)
2348 *match_level = MLX5_MATCH_L4;
2350 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
2351 struct flow_match_icmp match;
2353 flow_rule_match_icmp(rule, &match);
2356 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2357 MLX5_FLEX_PROTO_ICMP))
2359 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
2361 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
2363 MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
2365 MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
2368 case IPPROTO_ICMPV6:
2369 if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
2370 MLX5_FLEX_PROTO_ICMPV6))
2372 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
2374 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
2376 MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
2378 MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
2382 NL_SET_ERR_MSG_MOD(extack,
2383 "Code and type matching only with ICMP and ICMPv6");
2384 netdev_err(priv->netdev,
2385 "Code and type matching only with ICMP and ICMPv6\n");
2388 if (match.mask->code || match.mask->type) {
2389 *match_level = MLX5_MATCH_L4;
2390 spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
2393 /* Currenlty supported only for MPLS over UDP */
2394 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
2395 !netif_is_bareudp(filter_dev)) {
2396 NL_SET_ERR_MSG_MOD(extack,
2397 "Matching on MPLS is supported only for MPLS over UDP");
2398 netdev_err(priv->netdev,
2399 "Matching on MPLS is supported only for MPLS over UDP\n");
2406 static int parse_cls_flower(struct mlx5e_priv *priv,
2407 struct mlx5e_tc_flow *flow,
2408 struct mlx5_flow_spec *spec,
2409 struct flow_cls_offload *f,
2410 struct net_device *filter_dev)
2412 u8 inner_match_level, outer_match_level, non_tunnel_match_level;
2413 struct netlink_ext_ack *extack = f->common.extack;
2414 struct mlx5_core_dev *dev = priv->mdev;
2415 struct mlx5_eswitch *esw = dev->priv.eswitch;
2416 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2417 struct mlx5_eswitch_rep *rep;
2418 bool is_eswitch_flow;
2421 inner_match_level = MLX5_MATCH_NONE;
2422 outer_match_level = MLX5_MATCH_NONE;
2424 err = __parse_cls_flower(priv, flow, spec, f, filter_dev,
2425 &inner_match_level, &outer_match_level);
2426 non_tunnel_match_level = (inner_match_level == MLX5_MATCH_NONE) ?
2427 outer_match_level : inner_match_level;
2429 is_eswitch_flow = mlx5e_is_eswitch_flow(flow);
2430 if (!err && is_eswitch_flow) {
2432 if (rep->vport != MLX5_VPORT_UPLINK &&
2433 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
2434 esw->offloads.inline_mode < non_tunnel_match_level)) {
2435 NL_SET_ERR_MSG_MOD(extack,
2436 "Flow is not offloaded due to min inline setting");
2437 netdev_warn(priv->netdev,
2438 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
2439 non_tunnel_match_level, esw->offloads.inline_mode);
2444 flow->attr->inner_match_level = inner_match_level;
2445 flow->attr->outer_match_level = outer_match_level;
2451 struct pedit_headers {
2453 struct vlan_hdr vlan;
2460 struct pedit_headers_action {
2461 struct pedit_headers vals;
2462 struct pedit_headers masks;
2466 static int pedit_header_offsets[] = {
2467 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
2468 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
2469 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
2470 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
2471 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
2474 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
2476 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
2477 struct pedit_headers_action *hdrs)
2479 u32 *curr_pmask, *curr_pval;
2481 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
2482 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
2484 if (*curr_pmask & mask) /* disallow acting twice on the same location */
2487 *curr_pmask |= mask;
2488 *curr_pval |= (val & mask);
2496 struct mlx5_fields {
2504 #define OFFLOAD(fw_field, field_bsize, field_mask, field, off, match_field) \
2505 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, field_bsize, field_mask, \
2506 offsetof(struct pedit_headers, field) + (off), \
2507 MLX5_BYTE_OFF(fte_match_set_lyr_2_4, match_field)}
2509 /* masked values are the same and there are no rewrites that do not have a
2512 #define SAME_VAL_MASK(type, valp, maskp, matchvalp, matchmaskp) ({ \
2513 type matchmaskx = *(type *)(matchmaskp); \
2514 type matchvalx = *(type *)(matchvalp); \
2515 type maskx = *(type *)(maskp); \
2516 type valx = *(type *)(valp); \
2518 (valx & maskx) == (matchvalx & matchmaskx) && !(maskx & (maskx ^ \
2522 static bool cmp_val_mask(void *valp, void *maskp, void *matchvalp,
2523 void *matchmaskp, u8 bsize)
2529 same = SAME_VAL_MASK(u8, valp, maskp, matchvalp, matchmaskp);
2532 same = SAME_VAL_MASK(u16, valp, maskp, matchvalp, matchmaskp);
2535 same = SAME_VAL_MASK(u32, valp, maskp, matchvalp, matchmaskp);
2542 static struct mlx5_fields fields[] = {
2543 OFFLOAD(DMAC_47_16, 32, U32_MAX, eth.h_dest[0], 0, dmac_47_16),
2544 OFFLOAD(DMAC_15_0, 16, U16_MAX, eth.h_dest[4], 0, dmac_15_0),
2545 OFFLOAD(SMAC_47_16, 32, U32_MAX, eth.h_source[0], 0, smac_47_16),
2546 OFFLOAD(SMAC_15_0, 16, U16_MAX, eth.h_source[4], 0, smac_15_0),
2547 OFFLOAD(ETHERTYPE, 16, U16_MAX, eth.h_proto, 0, ethertype),
2548 OFFLOAD(FIRST_VID, 16, U16_MAX, vlan.h_vlan_TCI, 0, first_vid),
2550 OFFLOAD(IP_DSCP, 8, 0xfc, ip4.tos, 0, ip_dscp),
2551 OFFLOAD(IP_TTL, 8, U8_MAX, ip4.ttl, 0, ttl_hoplimit),
2552 OFFLOAD(SIPV4, 32, U32_MAX, ip4.saddr, 0, src_ipv4_src_ipv6.ipv4_layout.ipv4),
2553 OFFLOAD(DIPV4, 32, U32_MAX, ip4.daddr, 0, dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
2555 OFFLOAD(SIPV6_127_96, 32, U32_MAX, ip6.saddr.s6_addr32[0], 0,
2556 src_ipv4_src_ipv6.ipv6_layout.ipv6[0]),
2557 OFFLOAD(SIPV6_95_64, 32, U32_MAX, ip6.saddr.s6_addr32[1], 0,
2558 src_ipv4_src_ipv6.ipv6_layout.ipv6[4]),
2559 OFFLOAD(SIPV6_63_32, 32, U32_MAX, ip6.saddr.s6_addr32[2], 0,
2560 src_ipv4_src_ipv6.ipv6_layout.ipv6[8]),
2561 OFFLOAD(SIPV6_31_0, 32, U32_MAX, ip6.saddr.s6_addr32[3], 0,
2562 src_ipv4_src_ipv6.ipv6_layout.ipv6[12]),
2563 OFFLOAD(DIPV6_127_96, 32, U32_MAX, ip6.daddr.s6_addr32[0], 0,
2564 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[0]),
2565 OFFLOAD(DIPV6_95_64, 32, U32_MAX, ip6.daddr.s6_addr32[1], 0,
2566 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[4]),
2567 OFFLOAD(DIPV6_63_32, 32, U32_MAX, ip6.daddr.s6_addr32[2], 0,
2568 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[8]),
2569 OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
2570 dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
2571 OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
2572 OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
2574 OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
2575 OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
2576 /* in linux iphdr tcp_flags is 8 bits long */
2577 OFFLOAD(TCP_FLAGS, 8, U8_MAX, tcp.ack_seq, 5, tcp_flags),
2579 OFFLOAD(UDP_SPORT, 16, U16_MAX, udp.source, 0, udp_sport),
2580 OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
2583 static unsigned long mask_to_le(unsigned long mask, int size)
2589 mask_be32 = (__force __be32)(mask);
2590 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
2591 } else if (size == 16) {
2592 mask_be32 = (__force __be32)(mask);
2593 mask_be16 = *(__be16 *)&mask_be32;
2594 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
2599 static int offload_pedit_fields(struct mlx5e_priv *priv,
2601 struct pedit_headers_action *hdrs,
2602 struct mlx5e_tc_flow_parse_attr *parse_attr,
2604 struct netlink_ext_ack *extack)
2606 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
2607 int i, action_size, first, last, next_z;
2608 void *headers_c, *headers_v, *action, *vals_p;
2609 u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
2610 struct mlx5e_tc_mod_hdr_acts *mod_acts;
2611 struct mlx5_fields *f;
2612 unsigned long mask, field_mask;
2616 mod_acts = &parse_attr->mod_hdr_acts;
2617 headers_c = get_match_headers_criteria(*action_flags, &parse_attr->spec);
2618 headers_v = get_match_headers_value(*action_flags, &parse_attr->spec);
2620 set_masks = &hdrs[0].masks;
2621 add_masks = &hdrs[1].masks;
2622 set_vals = &hdrs[0].vals;
2623 add_vals = &hdrs[1].vals;
2625 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2627 for (i = 0; i < ARRAY_SIZE(fields); i++) {
2631 /* avoid seeing bits set from previous iterations */
2635 s_masks_p = (void *)set_masks + f->offset;
2636 a_masks_p = (void *)add_masks + f->offset;
2638 s_mask = *s_masks_p & f->field_mask;
2639 a_mask = *a_masks_p & f->field_mask;
2641 if (!s_mask && !a_mask) /* nothing to offload here */
2644 if (s_mask && a_mask) {
2645 NL_SET_ERR_MSG_MOD(extack,
2646 "can't set and add to the same HW field");
2647 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
2653 void *match_mask = headers_c + f->match_offset;
2654 void *match_val = headers_v + f->match_offset;
2656 cmd = MLX5_ACTION_TYPE_SET;
2658 vals_p = (void *)set_vals + f->offset;
2659 /* don't rewrite if we have a match on the same value */
2660 if (cmp_val_mask(vals_p, s_masks_p, match_val,
2661 match_mask, f->field_bsize))
2663 /* clear to denote we consumed this field */
2664 *s_masks_p &= ~f->field_mask;
2666 cmd = MLX5_ACTION_TYPE_ADD;
2668 vals_p = (void *)add_vals + f->offset;
2669 /* add 0 is no change */
2670 if ((*(u32 *)vals_p & f->field_mask) == 0)
2672 /* clear to denote we consumed this field */
2673 *a_masks_p &= ~f->field_mask;
2678 mask = mask_to_le(mask, f->field_bsize);
2680 first = find_first_bit(&mask, f->field_bsize);
2681 next_z = find_next_zero_bit(&mask, f->field_bsize, first);
2682 last = find_last_bit(&mask, f->field_bsize);
2683 if (first < next_z && next_z < last) {
2684 NL_SET_ERR_MSG_MOD(extack,
2685 "rewrite of few sub-fields isn't supported");
2686 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2691 err = alloc_mod_hdr_actions(priv->mdev, namespace, mod_acts);
2693 NL_SET_ERR_MSG_MOD(extack,
2694 "too many pedit actions, can't offload");
2695 mlx5_core_warn(priv->mdev,
2696 "mlx5: parsed %d pedit actions, can't do more\n",
2697 mod_acts->num_actions);
2701 action = mod_acts->actions +
2702 (mod_acts->num_actions * action_size);
2703 MLX5_SET(set_action_in, action, action_type, cmd);
2704 MLX5_SET(set_action_in, action, field, f->field);
2706 if (cmd == MLX5_ACTION_TYPE_SET) {
2709 field_mask = mask_to_le(f->field_mask, f->field_bsize);
2711 /* if field is bit sized it can start not from first bit */
2712 start = find_first_bit(&field_mask, f->field_bsize);
2714 MLX5_SET(set_action_in, action, offset, first - start);
2715 /* length is num of bits to be written, zero means length of 32 */
2716 MLX5_SET(set_action_in, action, length, (last - first + 1));
2719 if (f->field_bsize == 32)
2720 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2721 else if (f->field_bsize == 16)
2722 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2723 else if (f->field_bsize == 8)
2724 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2726 ++mod_acts->num_actions;
2732 static int mlx5e_flow_namespace_max_modify_action(struct mlx5_core_dev *mdev,
2735 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2736 return MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, max_modify_header_actions);
2737 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2738 return MLX5_CAP_FLOWTABLE_NIC_RX(mdev, max_modify_header_actions);
2741 int alloc_mod_hdr_actions(struct mlx5_core_dev *mdev,
2743 struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2745 int action_size, new_num_actions, max_hw_actions;
2746 size_t new_sz, old_sz;
2749 if (mod_hdr_acts->num_actions < mod_hdr_acts->max_actions)
2752 action_size = MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto);
2754 max_hw_actions = mlx5e_flow_namespace_max_modify_action(mdev,
2756 new_num_actions = min(max_hw_actions,
2757 mod_hdr_acts->actions ?
2758 mod_hdr_acts->max_actions * 2 : 1);
2759 if (mod_hdr_acts->max_actions == new_num_actions)
2762 new_sz = action_size * new_num_actions;
2763 old_sz = mod_hdr_acts->max_actions * action_size;
2764 ret = krealloc(mod_hdr_acts->actions, new_sz, GFP_KERNEL);
2768 memset(ret + old_sz, 0, new_sz - old_sz);
2769 mod_hdr_acts->actions = ret;
2770 mod_hdr_acts->max_actions = new_num_actions;
2775 void dealloc_mod_hdr_actions(struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts)
2777 kfree(mod_hdr_acts->actions);
2778 mod_hdr_acts->actions = NULL;
2779 mod_hdr_acts->num_actions = 0;
2780 mod_hdr_acts->max_actions = 0;
2783 static const struct pedit_headers zero_masks = {};
2786 parse_pedit_to_modify_hdr(struct mlx5e_priv *priv,
2787 const struct flow_action_entry *act, int namespace,
2788 struct mlx5e_tc_flow_parse_attr *parse_attr,
2789 struct pedit_headers_action *hdrs,
2790 struct netlink_ext_ack *extack)
2792 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2793 int err = -EOPNOTSUPP;
2794 u32 mask, val, offset;
2797 htype = act->mangle.htype;
2798 err = -EOPNOTSUPP; /* can't be all optimistic */
2800 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2801 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2805 if (!mlx5e_flow_namespace_max_modify_action(priv->mdev, namespace)) {
2806 NL_SET_ERR_MSG_MOD(extack,
2807 "The pedit offload action is not supported");
2811 mask = act->mangle.mask;
2812 val = act->mangle.val;
2813 offset = act->mangle.offset;
2815 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2827 parse_pedit_to_reformat(struct mlx5e_priv *priv,
2828 const struct flow_action_entry *act,
2829 struct mlx5e_tc_flow_parse_attr *parse_attr,
2830 struct netlink_ext_ack *extack)
2832 u32 mask, val, offset;
2835 if (act->id != FLOW_ACTION_MANGLE)
2838 if (act->mangle.htype != FLOW_ACT_MANGLE_HDR_TYPE_ETH) {
2839 NL_SET_ERR_MSG_MOD(extack, "Only Ethernet modification is supported");
2843 mask = ~act->mangle.mask;
2844 val = act->mangle.val;
2845 offset = act->mangle.offset;
2846 p = (u32 *)&parse_attr->eth;
2847 *(p + (offset >> 2)) |= (val & mask);
2852 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2853 const struct flow_action_entry *act, int namespace,
2854 struct mlx5e_tc_flow_parse_attr *parse_attr,
2855 struct pedit_headers_action *hdrs,
2856 struct mlx5e_tc_flow *flow,
2857 struct netlink_ext_ack *extack)
2859 if (flow && flow_flag_test(flow, L3_TO_L2_DECAP))
2860 return parse_pedit_to_reformat(priv, act, parse_attr, extack);
2862 return parse_pedit_to_modify_hdr(priv, act, namespace,
2863 parse_attr, hdrs, extack);
2866 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2867 struct mlx5e_tc_flow_parse_attr *parse_attr,
2868 struct pedit_headers_action *hdrs,
2870 struct netlink_ext_ack *extack)
2872 struct pedit_headers *cmd_masks;
2876 err = offload_pedit_fields(priv, namespace, hdrs, parse_attr,
2877 action_flags, extack);
2879 goto out_dealloc_parsed_actions;
2881 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2882 cmd_masks = &hdrs[cmd].masks;
2883 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2884 NL_SET_ERR_MSG_MOD(extack,
2885 "attempt to offload an unsupported field");
2886 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2887 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2888 16, 1, cmd_masks, sizeof(zero_masks), true);
2890 goto out_dealloc_parsed_actions;
2896 out_dealloc_parsed_actions:
2897 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
2901 static bool csum_offload_supported(struct mlx5e_priv *priv,
2904 struct netlink_ext_ack *extack)
2906 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2907 TCA_CSUM_UPDATE_FLAG_UDP;
2909 /* The HW recalcs checksums only if re-writing headers */
2910 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2911 NL_SET_ERR_MSG_MOD(extack,
2912 "TC csum action is only offloaded with pedit");
2913 netdev_warn(priv->netdev,
2914 "TC csum action is only offloaded with pedit\n");
2918 if (update_flags & ~prot_flags) {
2919 NL_SET_ERR_MSG_MOD(extack,
2920 "can't offload TC csum action for some header/s");
2921 netdev_warn(priv->netdev,
2922 "can't offload TC csum action for some header/s - flags %#x\n",
2930 struct ip_ttl_word {
2936 struct ipv6_hoplimit_word {
2942 static int is_action_keys_supported(const struct flow_action_entry *act,
2943 bool ct_flow, bool *modify_ip_header,
2945 struct netlink_ext_ack *extack)
2950 htype = act->mangle.htype;
2951 offset = act->mangle.offset;
2952 mask = ~act->mangle.mask;
2953 /* For IPv4 & IPv6 header check 4 byte word,
2954 * to determine that modified fields
2955 * are NOT ttl & hop_limit only.
2957 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
2958 struct ip_ttl_word *ttl_word =
2959 (struct ip_ttl_word *)&mask;
2961 if (offset != offsetof(struct iphdr, ttl) ||
2962 ttl_word->protocol ||
2964 *modify_ip_header = true;
2967 if (offset >= offsetof(struct iphdr, saddr))
2968 *modify_tuple = true;
2970 if (ct_flow && *modify_tuple) {
2971 NL_SET_ERR_MSG_MOD(extack,
2972 "can't offload re-write of ipv4 address with action ct");
2975 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2976 struct ipv6_hoplimit_word *hoplimit_word =
2977 (struct ipv6_hoplimit_word *)&mask;
2979 if (offset != offsetof(struct ipv6hdr, payload_len) ||
2980 hoplimit_word->payload_len ||
2981 hoplimit_word->nexthdr) {
2982 *modify_ip_header = true;
2985 if (ct_flow && offset >= offsetof(struct ipv6hdr, saddr))
2986 *modify_tuple = true;
2988 if (ct_flow && *modify_tuple) {
2989 NL_SET_ERR_MSG_MOD(extack,
2990 "can't offload re-write of ipv6 address with action ct");
2993 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_TCP ||
2994 htype == FLOW_ACT_MANGLE_HDR_TYPE_UDP) {
2995 *modify_tuple = true;
2997 NL_SET_ERR_MSG_MOD(extack,
2998 "can't offload re-write of transport header ports with action ct");
3006 static bool modify_tuple_supported(bool modify_tuple, bool ct_clear,
3007 bool ct_flow, struct netlink_ext_ack *extack,
3008 struct mlx5e_priv *priv,
3009 struct mlx5_flow_spec *spec)
3011 if (!modify_tuple || ct_clear)
3015 NL_SET_ERR_MSG_MOD(extack,
3016 "can't offload tuple modification with non-clear ct()");
3017 netdev_info(priv->netdev,
3018 "can't offload tuple modification with non-clear ct()");
3022 /* Add ct_state=-trk match so it will be offloaded for non ct flows
3023 * (or after clear action), as otherwise, since the tuple is changed,
3024 * we can't restore ct state
3026 if (mlx5_tc_ct_add_no_trk_match(spec)) {
3027 NL_SET_ERR_MSG_MOD(extack,
3028 "can't offload tuple modification with ct matches and no ct(clear) action");
3029 netdev_info(priv->netdev,
3030 "can't offload tuple modification with ct matches and no ct(clear) action");
3037 static bool modify_header_match_supported(struct mlx5e_priv *priv,
3038 struct mlx5_flow_spec *spec,
3039 struct flow_action *flow_action,
3040 u32 actions, bool ct_flow,
3042 struct netlink_ext_ack *extack)
3044 const struct flow_action_entry *act;
3045 bool modify_ip_header, modify_tuple;
3052 headers_c = get_match_headers_criteria(actions, spec);
3053 headers_v = get_match_headers_value(actions, spec);
3054 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
3056 /* for non-IP we only re-write MACs, so we're okay */
3057 if (MLX5_GET(fte_match_set_lyr_2_4, headers_c, ip_version) == 0 &&
3058 ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
3061 modify_ip_header = false;
3062 modify_tuple = false;
3063 flow_action_for_each(i, act, flow_action) {
3064 if (act->id != FLOW_ACTION_MANGLE &&
3065 act->id != FLOW_ACTION_ADD)
3068 err = is_action_keys_supported(act, ct_flow,
3070 &modify_tuple, extack);
3075 if (!modify_tuple_supported(modify_tuple, ct_clear, ct_flow, extack,
3079 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
3080 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
3081 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
3082 NL_SET_ERR_MSG_MOD(extack,
3083 "can't offload re-write of non TCP/UDP");
3084 netdev_info(priv->netdev, "can't offload re-write of ip proto %d\n",
3093 static bool actions_match_supported(struct mlx5e_priv *priv,
3094 struct flow_action *flow_action,
3095 struct mlx5e_tc_flow_parse_attr *parse_attr,
3096 struct mlx5e_tc_flow *flow,
3097 struct netlink_ext_ack *extack)
3099 bool ct_flow = false, ct_clear = false;
3102 ct_clear = flow->attr->ct_attr.ct_action &
3104 ct_flow = flow_flag_test(flow, CT) && !ct_clear;
3105 actions = flow->attr->action;
3107 if (mlx5e_is_eswitch_flow(flow)) {
3108 if (flow->attr->esw_attr->split_count && ct_flow &&
3109 !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
3110 /* All registers used by ct are cleared when using
3113 NL_SET_ERR_MSG_MOD(extack,
3114 "Can't offload mirroring with action ct");
3119 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3120 return modify_header_match_supported(priv, &parse_attr->spec,
3121 flow_action, actions,
3128 static bool same_port_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3130 return priv->mdev == peer_priv->mdev;
3133 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
3135 struct mlx5_core_dev *fmdev, *pmdev;
3136 u64 fsystem_guid, psystem_guid;
3139 pmdev = peer_priv->mdev;
3141 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
3142 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
3144 return (fsystem_guid == psystem_guid);
3147 static bool same_vf_reps(struct mlx5e_priv *priv,
3148 struct net_device *out_dev)
3150 return mlx5e_eswitch_vf_rep(priv->netdev) &&
3151 priv->netdev == out_dev;
3154 static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
3155 const struct flow_action_entry *act,
3156 struct mlx5e_tc_flow_parse_attr *parse_attr,
3157 struct pedit_headers_action *hdrs,
3158 u32 *action, struct netlink_ext_ack *extack)
3160 u16 mask16 = VLAN_VID_MASK;
3161 u16 val16 = act->vlan.vid & VLAN_VID_MASK;
3162 const struct flow_action_entry pedit_act = {
3163 .id = FLOW_ACTION_MANGLE,
3164 .mangle.htype = FLOW_ACT_MANGLE_HDR_TYPE_ETH,
3165 .mangle.offset = offsetof(struct vlan_ethhdr, h_vlan_TCI),
3166 .mangle.mask = ~(u32)be16_to_cpu(*(__be16 *)&mask16),
3167 .mangle.val = (u32)be16_to_cpu(*(__be16 *)&val16),
3169 u8 match_prio_mask, match_prio_val;
3170 void *headers_c, *headers_v;
3173 headers_c = get_match_headers_criteria(*action, &parse_attr->spec);
3174 headers_v = get_match_headers_value(*action, &parse_attr->spec);
3176 if (!(MLX5_GET(fte_match_set_lyr_2_4, headers_c, cvlan_tag) &&
3177 MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag))) {
3178 NL_SET_ERR_MSG_MOD(extack,
3179 "VLAN rewrite action must have VLAN protocol match");
3183 match_prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
3184 match_prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
3185 if (act->vlan.prio != (match_prio_val & match_prio_mask)) {
3186 NL_SET_ERR_MSG_MOD(extack,
3187 "Changing VLAN prio is not supported");
3191 err = parse_tc_pedit_action(priv, &pedit_act, namespace, parse_attr, hdrs, NULL, extack);
3192 *action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3198 add_vlan_prio_tag_rewrite_action(struct mlx5e_priv *priv,
3199 struct mlx5e_tc_flow_parse_attr *parse_attr,
3200 struct pedit_headers_action *hdrs,
3201 u32 *action, struct netlink_ext_ack *extack)
3203 const struct flow_action_entry prio_tag_act = {
3206 MLX5_GET(fte_match_set_lyr_2_4,
3207 get_match_headers_value(*action,
3210 MLX5_GET(fte_match_set_lyr_2_4,
3211 get_match_headers_criteria(*action,
3216 return add_vlan_rewrite_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3217 &prio_tag_act, parse_attr, hdrs, action,
3221 static int validate_goto_chain(struct mlx5e_priv *priv,
3222 struct mlx5e_tc_flow *flow,
3223 const struct flow_action_entry *act,
3225 struct netlink_ext_ack *extack)
3227 bool is_esw = mlx5e_is_eswitch_flow(flow);
3228 struct mlx5_flow_attr *attr = flow->attr;
3229 bool ft_flow = mlx5e_is_ft_flow(flow);
3230 u32 dest_chain = act->chain_index;
3231 struct mlx5_fs_chains *chains;
3232 struct mlx5_eswitch *esw;
3233 u32 reformat_and_fwd;
3236 esw = priv->mdev->priv.eswitch;
3237 chains = is_esw ? esw_chains(esw) : nic_chains(priv);
3238 max_chain = mlx5_chains_get_chain_range(chains);
3239 reformat_and_fwd = is_esw ?
3240 MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, reformat_and_fwd_to_table) :
3241 MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, reformat_and_fwd_to_table);
3244 NL_SET_ERR_MSG_MOD(extack, "Goto action is not supported");
3248 if (!mlx5_chains_backwards_supported(chains) &&
3249 dest_chain <= attr->chain) {
3250 NL_SET_ERR_MSG_MOD(extack,
3251 "Goto lower numbered chain isn't supported");
3255 if (dest_chain > max_chain) {
3256 NL_SET_ERR_MSG_MOD(extack,
3257 "Requested destination chain is out of supported range");
3261 if (actions & (MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
3262 MLX5_FLOW_CONTEXT_ACTION_DECAP) &&
3263 !reformat_and_fwd) {
3264 NL_SET_ERR_MSG_MOD(extack,
3265 "Goto chain is not allowed if action has reformat or decap");
3272 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
3273 struct flow_action *flow_action,
3274 struct mlx5e_tc_flow_parse_attr *parse_attr,
3275 struct mlx5e_tc_flow *flow,
3276 struct netlink_ext_ack *extack)
3278 struct mlx5_flow_attr *attr = flow->attr;
3279 struct pedit_headers_action hdrs[2] = {};
3280 const struct flow_action_entry *act;
3281 struct mlx5_nic_flow_attr *nic_attr;
3285 if (!flow_action_has_entries(flow_action))
3288 if (!flow_action_hw_stats_check(flow_action, extack,
3289 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3292 nic_attr = attr->nic_attr;
3294 nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
3296 flow_action_for_each(i, act, flow_action) {
3298 case FLOW_ACTION_ACCEPT:
3299 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3300 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3302 case FLOW_ACTION_DROP:
3303 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
3304 if (MLX5_CAP_FLOWTABLE(priv->mdev,
3305 flow_table_properties_nic_receive.flow_counter))
3306 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3308 case FLOW_ACTION_MANGLE:
3309 case FLOW_ACTION_ADD:
3310 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
3311 parse_attr, hdrs, NULL, extack);
3315 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3317 case FLOW_ACTION_VLAN_MANGLE:
3318 err = add_vlan_rewrite_action(priv,
3319 MLX5_FLOW_NAMESPACE_KERNEL,
3320 act, parse_attr, hdrs,
3326 case FLOW_ACTION_CSUM:
3327 if (csum_offload_supported(priv, action,
3333 case FLOW_ACTION_REDIRECT: {
3334 struct net_device *peer_dev = act->dev;
3336 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
3337 same_hw_devs(priv, netdev_priv(peer_dev))) {
3338 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
3339 flow_flag_set(flow, HAIRPIN);
3340 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3341 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3343 NL_SET_ERR_MSG_MOD(extack,
3344 "device is not on same HW, can't offload");
3345 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
3351 case FLOW_ACTION_MARK: {
3352 u32 mark = act->mark;
3354 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
3355 NL_SET_ERR_MSG_MOD(extack,
3356 "Bad flow mark - only 16 bit is supported");
3360 nic_attr->flow_tag = mark;
3361 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3364 case FLOW_ACTION_GOTO:
3365 err = validate_goto_chain(priv, flow, act, action,
3370 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3371 attr->dest_chain = act->chain_index;
3373 case FLOW_ACTION_CT:
3374 err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
3378 flow_flag_set(flow, CT);
3381 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3386 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3387 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3388 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
3389 parse_attr, hdrs, &action, extack);
3392 /* in case all pedit actions are skipped, remove the MOD_HDR
3395 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3396 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3397 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3401 attr->action = action;
3403 if (attr->dest_chain) {
3404 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
3405 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
3408 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3411 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
3412 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
3414 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3420 static bool is_merged_eswitch_vfs(struct mlx5e_priv *priv,
3421 struct net_device *peer_netdev)
3423 struct mlx5e_priv *peer_priv;
3425 peer_priv = netdev_priv(peer_netdev);
3427 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
3428 mlx5e_eswitch_vf_rep(priv->netdev) &&
3429 mlx5e_eswitch_vf_rep(peer_netdev) &&
3430 same_hw_devs(priv, peer_priv));
3433 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
3434 const struct flow_action_entry *act,
3435 struct mlx5_esw_flow_attr *attr,
3438 u8 vlan_idx = attr->total_vlan;
3440 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
3444 case FLOW_ACTION_VLAN_POP:
3446 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3447 MLX5_FS_VLAN_DEPTH))
3450 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
3452 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3455 case FLOW_ACTION_VLAN_PUSH:
3456 attr->vlan_vid[vlan_idx] = act->vlan.vid;
3457 attr->vlan_prio[vlan_idx] = act->vlan.prio;
3458 attr->vlan_proto[vlan_idx] = act->vlan.proto;
3459 if (!attr->vlan_proto[vlan_idx])
3460 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
3463 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
3464 MLX5_FS_VLAN_DEPTH))
3467 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
3469 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
3470 (act->vlan.proto != htons(ETH_P_8021Q) ||
3474 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
3481 attr->total_vlan = vlan_idx + 1;
3486 static struct net_device *get_fdb_out_dev(struct net_device *uplink_dev,
3487 struct net_device *out_dev)
3489 struct net_device *fdb_out_dev = out_dev;
3490 struct net_device *uplink_upper;
3493 uplink_upper = netdev_master_upper_dev_get_rcu(uplink_dev);
3494 if (uplink_upper && netif_is_lag_master(uplink_upper) &&
3495 uplink_upper == out_dev) {
3496 fdb_out_dev = uplink_dev;
3497 } else if (netif_is_lag_master(out_dev)) {
3498 fdb_out_dev = bond_option_active_slave_get_rcu(netdev_priv(out_dev));
3500 (!mlx5e_eswitch_rep(fdb_out_dev) ||
3501 !netdev_port_same_parent_id(fdb_out_dev, uplink_dev)))
3508 static int add_vlan_push_action(struct mlx5e_priv *priv,
3509 struct mlx5_flow_attr *attr,
3510 struct net_device **out_dev,
3513 struct net_device *vlan_dev = *out_dev;
3514 struct flow_action_entry vlan_act = {
3515 .id = FLOW_ACTION_VLAN_PUSH,
3516 .vlan.vid = vlan_dev_vlan_id(vlan_dev),
3517 .vlan.proto = vlan_dev_vlan_proto(vlan_dev),
3522 err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
3526 *out_dev = dev_get_by_index_rcu(dev_net(vlan_dev),
3527 dev_get_iflink(vlan_dev));
3528 if (is_vlan_dev(*out_dev))
3529 err = add_vlan_push_action(priv, attr, out_dev, action);
3534 static int add_vlan_pop_action(struct mlx5e_priv *priv,
3535 struct mlx5_flow_attr *attr,
3538 struct flow_action_entry vlan_act = {
3539 .id = FLOW_ACTION_VLAN_POP,
3541 int nest_level, err = 0;
3543 nest_level = attr->parse_attr->filter_dev->lower_level -
3544 priv->netdev->lower_level;
3545 while (nest_level--) {
3546 err = parse_tc_vlan_action(priv, &vlan_act, attr->esw_attr, action);
3554 static bool same_hw_reps(struct mlx5e_priv *priv,
3555 struct net_device *peer_netdev)
3557 struct mlx5e_priv *peer_priv;
3559 peer_priv = netdev_priv(peer_netdev);
3561 return mlx5e_eswitch_rep(priv->netdev) &&
3562 mlx5e_eswitch_rep(peer_netdev) &&
3563 same_hw_devs(priv, peer_priv);
3566 static bool is_lag_dev(struct mlx5e_priv *priv,
3567 struct net_device *peer_netdev)
3569 return ((mlx5_lag_is_sriov(priv->mdev) ||
3570 mlx5_lag_is_multipath(priv->mdev)) &&
3571 same_hw_reps(priv, peer_netdev));
3574 bool mlx5e_is_valid_eswitch_fwd_dev(struct mlx5e_priv *priv,
3575 struct net_device *out_dev)
3577 if (is_merged_eswitch_vfs(priv, out_dev))
3580 if (is_lag_dev(priv, out_dev))
3583 return mlx5e_eswitch_rep(out_dev) &&
3584 same_port_devs(priv, netdev_priv(out_dev));
3587 static bool is_duplicated_output_device(struct net_device *dev,
3588 struct net_device *out_dev,
3589 int *ifindexes, int if_count,
3590 struct netlink_ext_ack *extack)
3594 for (i = 0; i < if_count; i++) {
3595 if (ifindexes[i] == out_dev->ifindex) {
3596 NL_SET_ERR_MSG_MOD(extack,
3597 "can't duplicate output to same device");
3598 netdev_err(dev, "can't duplicate output to same device: %s\n",
3607 static int verify_uplink_forwarding(struct mlx5e_priv *priv,
3608 struct mlx5e_tc_flow *flow,
3609 struct net_device *out_dev,
3610 struct netlink_ext_ack *extack)
3612 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
3613 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3614 struct mlx5e_rep_priv *rep_priv;
3616 /* Forwarding non encapsulated traffic between
3617 * uplink ports is allowed only if
3618 * termination_table_raw_traffic cap is set.
3620 * Input vport was stored attr->in_rep.
3621 * In LAG case, *priv* is the private data of
3622 * uplink which may be not the input vport.
3624 rep_priv = mlx5e_rep_to_rep_priv(attr->in_rep);
3626 if (!(mlx5e_eswitch_uplink_rep(rep_priv->netdev) &&
3627 mlx5e_eswitch_uplink_rep(out_dev)))
3630 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev,
3631 termination_table_raw_traffic)) {
3632 NL_SET_ERR_MSG_MOD(extack,
3633 "devices are both uplink, can't offload forwarding");
3634 pr_err("devices %s %s are both uplink, can't offload forwarding\n",
3635 priv->netdev->name, out_dev->name);
3637 } else if (out_dev != rep_priv->netdev) {
3638 NL_SET_ERR_MSG_MOD(extack,
3639 "devices are not the same uplink, can't offload forwarding");
3640 pr_err("devices %s %s are both uplink but not the same, can't offload forwarding\n",
3641 priv->netdev->name, out_dev->name);
3647 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
3648 struct flow_action *flow_action,
3649 struct mlx5e_tc_flow *flow,
3650 struct netlink_ext_ack *extack,
3651 struct net_device *filter_dev)
3653 struct pedit_headers_action hdrs[2] = {};
3654 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3655 struct mlx5e_tc_flow_parse_attr *parse_attr;
3656 struct mlx5e_rep_priv *rpriv = priv->ppriv;
3657 const struct ip_tunnel_info *info = NULL;
3658 struct mlx5_flow_attr *attr = flow->attr;
3659 int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS];
3660 bool ft_flow = mlx5e_is_ft_flow(flow);
3661 const struct flow_action_entry *act;
3662 struct mlx5_esw_flow_attr *esw_attr;
3663 struct mlx5_sample_attr sample = {};
3664 bool encap = false, decap = false;
3665 u32 action = attr->action;
3666 int err, i, if_count = 0;
3667 bool mpls_push = false;
3669 if (!flow_action_has_entries(flow_action))
3672 if (!flow_action_hw_stats_check(flow_action, extack,
3673 FLOW_ACTION_HW_STATS_DELAYED_BIT))
3676 esw_attr = attr->esw_attr;
3677 parse_attr = attr->parse_attr;
3679 flow_action_for_each(i, act, flow_action) {
3681 case FLOW_ACTION_DROP:
3682 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
3683 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3685 case FLOW_ACTION_TRAP:
3686 if (!flow_offload_has_one_action(flow_action)) {
3687 NL_SET_ERR_MSG_MOD(extack,
3688 "action trap is supported as a sole action only");
3691 action |= (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3692 MLX5_FLOW_CONTEXT_ACTION_COUNT);
3693 attr->flags |= MLX5_ESW_ATTR_FLAG_SLOW_PATH;
3695 case FLOW_ACTION_MPLS_PUSH:
3696 if (!MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
3697 reformat_l2_to_l3_tunnel) ||
3698 act->mpls_push.proto != htons(ETH_P_MPLS_UC)) {
3699 NL_SET_ERR_MSG_MOD(extack,
3700 "mpls push is supported only for mpls_uc protocol");
3705 case FLOW_ACTION_MPLS_POP:
3706 /* we only support mpls pop if it is the first action
3707 * and the filter net device is bareudp. Subsequent
3708 * actions can be pedit and the last can be mirred
3712 NL_SET_ERR_MSG_MOD(extack,
3713 "mpls pop supported only as first action");
3716 if (!netif_is_bareudp(filter_dev)) {
3717 NL_SET_ERR_MSG_MOD(extack,
3718 "mpls pop supported only on bareudp devices");
3722 parse_attr->eth.h_proto = act->mpls_pop.proto;
3723 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
3724 flow_flag_set(flow, L3_TO_L2_DECAP);
3726 case FLOW_ACTION_MANGLE:
3727 case FLOW_ACTION_ADD:
3728 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
3729 parse_attr, hdrs, flow, extack);
3733 if (!flow_flag_test(flow, L3_TO_L2_DECAP)) {
3734 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3735 esw_attr->split_count = esw_attr->out_count;
3738 case FLOW_ACTION_CSUM:
3739 if (csum_offload_supported(priv, action,
3740 act->csum_flags, extack))
3744 case FLOW_ACTION_REDIRECT:
3745 case FLOW_ACTION_MIRRED: {
3746 struct mlx5e_priv *out_priv;
3747 struct net_device *out_dev;
3751 /* out_dev is NULL when filters with
3752 * non-existing mirred device are replayed to
3758 if (mpls_push && !netif_is_bareudp(out_dev)) {
3759 NL_SET_ERR_MSG_MOD(extack,
3760 "mpls is supported only through a bareudp device");
3764 if (ft_flow && out_dev == priv->netdev) {
3765 /* Ignore forward to self rules generated
3766 * by adding both mlx5 devs to the flow table
3767 * block on a normal nft offload setup.
3772 if (esw_attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
3773 NL_SET_ERR_MSG_MOD(extack,
3774 "can't support more output ports, can't offload forwarding");
3775 netdev_warn(priv->netdev,
3776 "can't support more than %d output ports, can't offload forwarding\n",
3777 esw_attr->out_count);
3781 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
3782 MLX5_FLOW_CONTEXT_ACTION_COUNT;
3784 parse_attr->mirred_ifindex[esw_attr->out_count] =
3786 parse_attr->tun_info[esw_attr->out_count] =
3787 mlx5e_dup_tun_info(info);
3788 if (!parse_attr->tun_info[esw_attr->out_count])
3791 esw_attr->dests[esw_attr->out_count].flags |=
3792 MLX5_ESW_DEST_ENCAP;
3793 esw_attr->out_count++;
3794 /* attr->dests[].rep is resolved when we
3797 } else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
3798 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3799 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
3801 if (is_duplicated_output_device(priv->netdev,
3808 ifindexes[if_count] = out_dev->ifindex;
3811 out_dev = get_fdb_out_dev(uplink_dev, out_dev);
3815 if (is_vlan_dev(out_dev)) {
3816 err = add_vlan_push_action(priv, attr,
3823 if (is_vlan_dev(parse_attr->filter_dev)) {
3824 err = add_vlan_pop_action(priv, attr,
3830 err = verify_uplink_forwarding(priv, flow, out_dev, extack);
3834 if (!mlx5e_is_valid_eswitch_fwd_dev(priv, out_dev)) {
3835 NL_SET_ERR_MSG_MOD(extack,
3836 "devices are not on same switch HW, can't offload forwarding");
3840 if (same_vf_reps(priv, out_dev)) {
3841 NL_SET_ERR_MSG_MOD(extack,
3842 "can't forward from a VF to itself");
3846 out_priv = netdev_priv(out_dev);
3847 rpriv = out_priv->ppriv;
3848 esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
3849 esw_attr->dests[esw_attr->out_count].mdev = out_priv->mdev;
3850 esw_attr->out_count++;
3851 } else if (parse_attr->filter_dev != priv->netdev) {
3852 /* All mlx5 devices are called to configure
3853 * high level device filters. Therefore, the
3854 * *attempt* to install a filter on invalid
3855 * eswitch should not trigger an explicit error
3859 NL_SET_ERR_MSG_MOD(extack,
3860 "devices are not on same switch HW, can't offload forwarding");
3861 netdev_warn(priv->netdev,
3862 "devices %s %s not on same switch HW, can't offload forwarding\n",
3869 case FLOW_ACTION_TUNNEL_ENCAP:
3877 case FLOW_ACTION_VLAN_PUSH:
3878 case FLOW_ACTION_VLAN_POP:
3879 if (act->id == FLOW_ACTION_VLAN_PUSH &&
3880 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP)) {
3881 /* Replace vlan pop+push with vlan modify */
3882 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3883 err = add_vlan_rewrite_action(priv,
3884 MLX5_FLOW_NAMESPACE_FDB,
3885 act, parse_attr, hdrs,
3888 err = parse_tc_vlan_action(priv, act, esw_attr, &action);
3893 esw_attr->split_count = esw_attr->out_count;
3895 case FLOW_ACTION_VLAN_MANGLE:
3896 err = add_vlan_rewrite_action(priv,
3897 MLX5_FLOW_NAMESPACE_FDB,
3898 act, parse_attr, hdrs,
3903 esw_attr->split_count = esw_attr->out_count;
3905 case FLOW_ACTION_TUNNEL_DECAP:
3908 case FLOW_ACTION_GOTO:
3909 err = validate_goto_chain(priv, flow, act, action,
3914 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
3915 attr->dest_chain = act->chain_index;
3917 case FLOW_ACTION_CT:
3918 if (flow_flag_test(flow, SAMPLE)) {
3919 NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
3922 err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
3926 flow_flag_set(flow, CT);
3927 esw_attr->split_count = esw_attr->out_count;
3929 case FLOW_ACTION_SAMPLE:
3930 if (flow_flag_test(flow, CT)) {
3931 NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
3934 sample.rate = act->sample.rate;
3935 sample.group_num = act->sample.psample_group->group_num;
3936 if (act->sample.truncate)
3937 sample.trunc_size = act->sample.trunc_size;
3938 flow_flag_set(flow, SAMPLE);
3941 NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
3946 /* always set IP version for indirect table handling */
3947 attr->ip_version = mlx5e_tc_get_ip_version(&parse_attr->spec, true);
3949 if (MLX5_CAP_GEN(esw->dev, prio_tag_required) &&
3950 action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) {
3951 /* For prio tag mode, replace vlan pop with rewrite vlan prio
3954 action &= ~MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
3955 err = add_vlan_prio_tag_rewrite_action(priv, parse_attr, hdrs,
3961 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
3962 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
3963 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
3964 parse_attr, hdrs, &action, extack);
3967 /* in case all pedit actions are skipped, remove the MOD_HDR
3968 * flag. we might have set split_count either by pedit or
3969 * pop/push. if there is no pop/push either, reset it too.
3971 if (parse_attr->mod_hdr_acts.num_actions == 0) {
3972 action &= ~MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
3973 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
3974 if (!((action & MLX5_FLOW_CONTEXT_ACTION_VLAN_POP) ||
3975 (action & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH)))
3976 esw_attr->split_count = 0;
3980 attr->action = action;
3981 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
3984 if (attr->dest_chain) {
3986 /* It can be supported if we'll create a mapping for
3987 * the tunnel device only (without tunnel), and set
3988 * this tunnel id with this decap flow.
3990 * On restore (miss), we'll just set this saved tunnel
3994 NL_SET_ERR_MSG(extack,
3995 "Decap with goto isn't supported");
3996 netdev_warn(priv->netdev,
3997 "Decap with goto isn't supported");
4001 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
4004 if (!(attr->action &
4005 (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) {
4006 NL_SET_ERR_MSG_MOD(extack,
4007 "Rule must have at least one forward/drop action");
4011 if (esw_attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
4012 NL_SET_ERR_MSG_MOD(extack,
4013 "current firmware doesn't support split rule for port mirroring");
4014 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
4018 /* Allocate sample attribute only when there is a sample action and
4019 * no errors after parsing.
4021 if (flow_flag_test(flow, SAMPLE)) {
4022 esw_attr->sample = kzalloc(sizeof(*esw_attr->sample), GFP_KERNEL);
4023 if (!esw_attr->sample)
4025 *esw_attr->sample = sample;
4031 static void get_flags(int flags, unsigned long *flow_flags)
4033 unsigned long __flow_flags = 0;
4035 if (flags & MLX5_TC_FLAG(INGRESS))
4036 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_INGRESS);
4037 if (flags & MLX5_TC_FLAG(EGRESS))
4038 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_EGRESS);
4040 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD))
4041 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4042 if (flags & MLX5_TC_FLAG(NIC_OFFLOAD))
4043 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4044 if (flags & MLX5_TC_FLAG(FT_OFFLOAD))
4045 __flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_FT);
4047 *flow_flags = __flow_flags;
4050 static const struct rhashtable_params tc_ht_params = {
4051 .head_offset = offsetof(struct mlx5e_tc_flow, node),
4052 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
4053 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
4054 .automatic_shrinking = true,
4057 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv,
4058 unsigned long flags)
4060 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4061 struct mlx5e_rep_priv *uplink_rpriv;
4063 if (flags & MLX5_TC_FLAG(ESW_OFFLOAD)) {
4064 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
4065 return &uplink_rpriv->uplink_priv.tc_ht;
4066 } else /* NIC offload */
4067 return &priv->fs.tc.ht;
4070 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
4072 struct mlx5_esw_flow_attr *esw_attr = flow->attr->esw_attr;
4073 struct mlx5_flow_attr *attr = flow->attr;
4074 bool is_rep_ingress = esw_attr->in_rep->vport != MLX5_VPORT_UPLINK &&
4075 flow_flag_test(flow, INGRESS);
4076 bool act_is_encap = !!(attr->action &
4077 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
4078 bool esw_paired = mlx5_devcom_is_paired(esw_attr->in_mdev->priv.devcom,
4079 MLX5_DEVCOM_ESW_OFFLOADS);
4084 if ((mlx5_lag_is_sriov(esw_attr->in_mdev) ||
4085 mlx5_lag_is_multipath(esw_attr->in_mdev)) &&
4086 (is_rep_ingress || act_is_encap))
4092 struct mlx5_flow_attr *
4093 mlx5_alloc_flow_attr(enum mlx5_flow_namespace_type type)
4095 u32 ex_attr_size = (type == MLX5_FLOW_NAMESPACE_FDB) ?
4096 sizeof(struct mlx5_esw_flow_attr) :
4097 sizeof(struct mlx5_nic_flow_attr);
4098 struct mlx5_flow_attr *attr;
4100 return kzalloc(sizeof(*attr) + ex_attr_size, GFP_KERNEL);
4104 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
4105 struct flow_cls_offload *f, unsigned long flow_flags,
4106 struct mlx5e_tc_flow_parse_attr **__parse_attr,
4107 struct mlx5e_tc_flow **__flow)
4109 struct mlx5e_tc_flow_parse_attr *parse_attr;
4110 struct mlx5_flow_attr *attr;
4111 struct mlx5e_tc_flow *flow;
4115 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
4116 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
4117 if (!parse_attr || !flow)
4120 flow->flags = flow_flags;
4121 flow->cookie = f->cookie;
4124 attr = mlx5_alloc_flow_attr(get_flow_name_space(flow));
4130 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
4131 INIT_LIST_HEAD(&flow->encaps[out_index].list);
4132 INIT_LIST_HEAD(&flow->hairpin);
4133 INIT_LIST_HEAD(&flow->l3_to_l2_reformat);
4134 refcount_set(&flow->refcnt, 1);
4135 init_completion(&flow->init_done);
4138 *__parse_attr = parse_attr;
4149 mlx5e_flow_attr_init(struct mlx5_flow_attr *attr,
4150 struct mlx5e_tc_flow_parse_attr *parse_attr,
4151 struct flow_cls_offload *f)
4153 attr->parse_attr = parse_attr;
4154 attr->chain = f->common.chain_index;
4155 attr->prio = f->common.prio;
4159 mlx5e_flow_esw_attr_init(struct mlx5_flow_attr *attr,
4160 struct mlx5e_priv *priv,
4161 struct mlx5e_tc_flow_parse_attr *parse_attr,
4162 struct flow_cls_offload *f,
4163 struct mlx5_eswitch_rep *in_rep,
4164 struct mlx5_core_dev *in_mdev)
4166 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4167 struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr;
4169 mlx5e_flow_attr_init(attr, parse_attr, f);
4171 esw_attr->in_rep = in_rep;
4172 esw_attr->in_mdev = in_mdev;
4174 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
4175 MLX5_COUNTER_SOURCE_ESWITCH)
4176 esw_attr->counter_dev = in_mdev;
4178 esw_attr->counter_dev = priv->mdev;
4181 static struct mlx5e_tc_flow *
4182 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4183 struct flow_cls_offload *f,
4184 unsigned long flow_flags,
4185 struct net_device *filter_dev,
4186 struct mlx5_eswitch_rep *in_rep,
4187 struct mlx5_core_dev *in_mdev)
4189 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4190 struct netlink_ext_ack *extack = f->common.extack;
4191 struct mlx5e_tc_flow_parse_attr *parse_attr;
4192 struct mlx5e_tc_flow *flow;
4195 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_ESWITCH);
4196 attr_size = sizeof(struct mlx5_esw_flow_attr);
4197 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4198 &parse_attr, &flow);
4202 parse_attr->filter_dev = filter_dev;
4203 mlx5e_flow_esw_attr_init(flow->attr,
4205 f, in_rep, in_mdev);
4207 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4212 /* actions validation depends on parsing the ct matches first */
4213 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4214 &flow->attr->ct_attr, extack);
4218 err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev);
4222 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
4223 complete_all(&flow->init_done);
4225 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
4228 add_unready_flow(flow);
4234 mlx5e_flow_put(priv, flow);
4236 return ERR_PTR(err);
4239 static int mlx5e_tc_add_fdb_peer_flow(struct flow_cls_offload *f,
4240 struct mlx5e_tc_flow *flow,
4241 unsigned long flow_flags)
4243 struct mlx5e_priv *priv = flow->priv, *peer_priv;
4244 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
4245 struct mlx5_esw_flow_attr *attr = flow->attr->esw_attr;
4246 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4247 struct mlx5e_tc_flow_parse_attr *parse_attr;
4248 struct mlx5e_rep_priv *peer_urpriv;
4249 struct mlx5e_tc_flow *peer_flow;
4250 struct mlx5_core_dev *in_mdev;
4253 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4257 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
4258 peer_priv = netdev_priv(peer_urpriv->netdev);
4260 /* in_mdev is assigned of which the packet originated from.
4261 * So packets redirected to uplink use the same mdev of the
4262 * original flow and packets redirected from uplink use the
4265 if (attr->in_rep->vport == MLX5_VPORT_UPLINK)
4266 in_mdev = peer_priv->mdev;
4268 in_mdev = priv->mdev;
4270 parse_attr = flow->attr->parse_attr;
4271 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
4272 parse_attr->filter_dev,
4273 attr->in_rep, in_mdev);
4274 if (IS_ERR(peer_flow)) {
4275 err = PTR_ERR(peer_flow);
4279 flow->peer_flow = peer_flow;
4280 flow_flag_set(flow, DUP);
4281 mutex_lock(&esw->offloads.peer_mutex);
4282 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
4283 mutex_unlock(&esw->offloads.peer_mutex);
4286 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4291 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
4292 struct flow_cls_offload *f,
4293 unsigned long flow_flags,
4294 struct net_device *filter_dev,
4295 struct mlx5e_tc_flow **__flow)
4297 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4298 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
4299 struct mlx5_core_dev *in_mdev = priv->mdev;
4300 struct mlx5e_tc_flow *flow;
4303 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
4306 return PTR_ERR(flow);
4308 if (is_peer_flow_needed(flow)) {
4309 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
4311 mlx5e_tc_del_fdb_flow(priv, flow);
4325 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
4326 struct flow_cls_offload *f,
4327 unsigned long flow_flags,
4328 struct net_device *filter_dev,
4329 struct mlx5e_tc_flow **__flow)
4331 struct flow_rule *rule = flow_cls_offload_flow_rule(f);
4332 struct netlink_ext_ack *extack = f->common.extack;
4333 struct mlx5e_tc_flow_parse_attr *parse_attr;
4334 struct mlx5e_tc_flow *flow;
4337 if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
4338 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
4340 } else if (!tc_can_offload_extack(priv->netdev, f->common.extack)) {
4344 flow_flags |= BIT(MLX5E_TC_FLOW_FLAG_NIC);
4345 attr_size = sizeof(struct mlx5_nic_flow_attr);
4346 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
4347 &parse_attr, &flow);
4351 parse_attr->filter_dev = filter_dev;
4352 mlx5e_flow_attr_init(flow->attr, parse_attr, f);
4354 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
4359 err = mlx5_tc_ct_match_add(get_ct_priv(priv), &parse_attr->spec, f,
4360 &flow->attr->ct_attr, extack);
4364 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
4368 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
4372 flow_flag_set(flow, OFFLOADED);
4378 flow_flag_set(flow, FAILED);
4379 dealloc_mod_hdr_actions(&parse_attr->mod_hdr_acts);
4380 mlx5e_flow_put(priv, flow);
4386 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
4387 struct flow_cls_offload *f,
4388 unsigned long flags,
4389 struct net_device *filter_dev,
4390 struct mlx5e_tc_flow **flow)
4392 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4393 unsigned long flow_flags;
4396 get_flags(flags, &flow_flags);
4398 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
4401 if (esw && esw->mode == MLX5_ESWITCH_OFFLOADS)
4402 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
4405 err = mlx5e_add_nic_flow(priv, f, flow_flags,
4411 static bool is_flow_rule_duplicate_allowed(struct net_device *dev,
4412 struct mlx5e_rep_priv *rpriv)
4414 /* Offloaded flow rule is allowed to duplicate on non-uplink representor
4415 * sharing tc block with other slaves of a lag device. Rpriv can be NULL if this
4416 * function is called from NIC mode.
4418 return netif_is_lag_port(dev) && rpriv && rpriv->rep->vport != MLX5_VPORT_UPLINK;
4421 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
4422 struct flow_cls_offload *f, unsigned long flags)
4424 struct netlink_ext_ack *extack = f->common.extack;
4425 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4426 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4427 struct mlx5e_tc_flow *flow;
4430 if (!mlx5_esw_hold(priv->mdev))
4433 mlx5_esw_get(priv->mdev);
4436 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4438 /* Same flow rule offloaded to non-uplink representor sharing tc block,
4441 if (is_flow_rule_duplicate_allowed(dev, rpriv) && flow->orig_dev != dev)
4444 NL_SET_ERR_MSG_MOD(extack,
4445 "flow cookie already exists, ignoring");
4446 netdev_warn_once(priv->netdev,
4447 "flow cookie %lx already exists, ignoring\n",
4457 trace_mlx5e_configure_flower(f);
4458 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
4462 /* Flow rule offloaded to non-uplink representor sharing tc block,
4463 * set the flow's owner dev.
4465 if (is_flow_rule_duplicate_allowed(dev, rpriv))
4466 flow->orig_dev = dev;
4468 err = rhashtable_lookup_insert_fast(tc_ht, &flow->node, tc_ht_params);
4472 mlx5_esw_release(priv->mdev);
4476 mlx5e_flow_put(priv, flow);
4478 mlx5_esw_put(priv->mdev);
4479 mlx5_esw_release(priv->mdev);
4483 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
4485 bool dir_ingress = !!(flags & MLX5_TC_FLAG(INGRESS));
4486 bool dir_egress = !!(flags & MLX5_TC_FLAG(EGRESS));
4488 return flow_flag_test(flow, INGRESS) == dir_ingress &&
4489 flow_flag_test(flow, EGRESS) == dir_egress;
4492 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
4493 struct flow_cls_offload *f, unsigned long flags)
4495 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4496 struct mlx5e_tc_flow *flow;
4500 flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
4501 if (!flow || !same_flow_direction(flow, flags)) {
4506 /* Only delete the flow if it doesn't have MLX5E_TC_FLOW_DELETED flag
4509 if (flow_flag_test_and_set(flow, DELETED)) {
4513 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
4516 trace_mlx5e_delete_flower(f);
4517 mlx5e_flow_put(priv, flow);
4519 mlx5_esw_put(priv->mdev);
4527 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
4528 struct flow_cls_offload *f, unsigned long flags)
4530 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
4531 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4532 struct mlx5_eswitch *peer_esw;
4533 struct mlx5e_tc_flow *flow;
4534 struct mlx5_fc *counter;
4541 flow = mlx5e_flow_get(rhashtable_lookup(tc_ht, &f->cookie,
4545 return PTR_ERR(flow);
4547 if (!same_flow_direction(flow, flags)) {
4552 if (mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, CT)) {
4553 counter = mlx5e_tc_get_counter(flow);
4557 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
4560 /* Under multipath it's possible for one rule to be currently
4561 * un-offloaded while the other rule is offloaded.
4563 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4567 if (flow_flag_test(flow, DUP) &&
4568 flow_flag_test(flow->peer_flow, OFFLOADED)) {
4573 counter = mlx5e_tc_get_counter(flow->peer_flow);
4575 goto no_peer_counter;
4576 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
4579 packets += packets2;
4580 lastuse = max_t(u64, lastuse, lastuse2);
4584 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
4586 flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
4587 FLOW_ACTION_HW_STATS_DELAYED);
4588 trace_mlx5e_stats_flower(f);
4590 mlx5e_flow_put(priv, flow);
4594 static int apply_police_params(struct mlx5e_priv *priv, u64 rate,
4595 struct netlink_ext_ack *extack)
4597 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4598 struct mlx5_eswitch *esw;
4603 vport_num = rpriv->rep->vport;
4604 if (vport_num >= MLX5_VPORT_ECPF) {
4605 NL_SET_ERR_MSG_MOD(extack,
4606 "Ingress rate limit is supported only for Eswitch ports connected to VFs");
4610 esw = priv->mdev->priv.eswitch;
4611 /* rate is given in bytes/sec.
4612 * First convert to bits/sec and then round to the nearest mbit/secs.
4613 * mbit means million bits.
4614 * Moreover, if rate is non zero we choose to configure to a minimum of
4618 rate = (rate * BITS_PER_BYTE) + 500000;
4619 do_div(rate, 1000000);
4620 rate_mbps = max_t(u32, rate, 1);
4623 err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps);
4625 NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware");
4630 static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
4631 struct flow_action *flow_action,
4632 struct netlink_ext_ack *extack)
4634 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4635 const struct flow_action_entry *act;
4639 if (!flow_action_has_entries(flow_action)) {
4640 NL_SET_ERR_MSG_MOD(extack, "matchall called with no action");
4644 if (!flow_offload_has_one_action(flow_action)) {
4645 NL_SET_ERR_MSG_MOD(extack, "matchall policing support only a single action");
4649 if (!flow_action_basic_hw_stats_check(flow_action, extack))
4652 flow_action_for_each(i, act, flow_action) {
4654 case FLOW_ACTION_POLICE:
4655 if (act->police.rate_pkt_ps) {
4656 NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
4659 err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
4663 rpriv->prev_vf_vport_stats = priv->stats.vf_vport;
4666 NL_SET_ERR_MSG_MOD(extack, "mlx5 supports only police action for matchall");
4674 int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
4675 struct tc_cls_matchall_offload *ma)
4677 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
4678 struct netlink_ext_ack *extack = ma->common.extack;
4680 if (!mlx5_esw_qos_enabled(esw)) {
4681 NL_SET_ERR_MSG_MOD(extack, "QoS is not supported on this device");
4685 if (ma->common.prio != 1) {
4686 NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
4690 return scan_tc_matchall_fdb_actions(priv, &ma->rule->action, extack);
4693 int mlx5e_tc_delete_matchall(struct mlx5e_priv *priv,
4694 struct tc_cls_matchall_offload *ma)
4696 struct netlink_ext_ack *extack = ma->common.extack;
4698 return apply_police_params(priv, 0, extack);
4701 void mlx5e_tc_stats_matchall(struct mlx5e_priv *priv,
4702 struct tc_cls_matchall_offload *ma)
4704 struct mlx5e_rep_priv *rpriv = priv->ppriv;
4705 struct rtnl_link_stats64 cur_stats;
4709 cur_stats = priv->stats.vf_vport;
4710 dpkts = cur_stats.rx_packets - rpriv->prev_vf_vport_stats.rx_packets;
4711 dbytes = cur_stats.rx_bytes - rpriv->prev_vf_vport_stats.rx_bytes;
4712 rpriv->prev_vf_vport_stats = cur_stats;
4713 flow_stats_update(&ma->stats, dbytes, dpkts, 0, jiffies,
4714 FLOW_ACTION_HW_STATS_DELAYED);
4717 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
4718 struct mlx5e_priv *peer_priv)
4720 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
4721 struct mlx5e_hairpin_entry *hpe, *tmp;
4722 LIST_HEAD(init_wait_list);
4726 if (!same_hw_devs(priv, peer_priv))
4729 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
4731 mutex_lock(&priv->fs.tc.hairpin_tbl_lock);
4732 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist)
4733 if (refcount_inc_not_zero(&hpe->refcnt))
4734 list_add(&hpe->dead_peer_wait_list, &init_wait_list);
4735 mutex_unlock(&priv->fs.tc.hairpin_tbl_lock);
4737 list_for_each_entry_safe(hpe, tmp, &init_wait_list, dead_peer_wait_list) {
4738 wait_for_completion(&hpe->res_ready);
4739 if (!IS_ERR_OR_NULL(hpe->hp) && hpe->peer_vhca_id == peer_vhca_id)
4740 hpe->hp->pair->peer_gone = true;
4742 mlx5e_hairpin_put(priv, hpe);
4746 static int mlx5e_tc_netdev_event(struct notifier_block *this,
4747 unsigned long event, void *ptr)
4749 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
4750 struct mlx5e_flow_steering *fs;
4751 struct mlx5e_priv *peer_priv;
4752 struct mlx5e_tc_table *tc;
4753 struct mlx5e_priv *priv;
4755 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
4756 event != NETDEV_UNREGISTER ||
4757 ndev->reg_state == NETREG_REGISTERED)
4760 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
4761 fs = container_of(tc, struct mlx5e_flow_steering, tc);
4762 priv = container_of(fs, struct mlx5e_priv, fs);
4763 peer_priv = netdev_priv(ndev);
4764 if (priv == peer_priv ||
4765 !(priv->netdev->features & NETIF_F_HW_TC))
4768 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
4773 static int mlx5e_tc_nic_get_ft_size(struct mlx5_core_dev *dev)
4775 int tc_grp_size, tc_tbl_size;
4776 u32 max_flow_counter;
4778 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
4779 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
4781 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
4783 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
4784 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
4789 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
4791 struct mlx5e_tc_table *tc = &priv->fs.tc;
4792 struct mlx5_core_dev *dev = priv->mdev;
4793 struct mapping_ctx *chains_mapping;
4794 struct mlx5_chains_attr attr = {};
4797 mlx5e_mod_hdr_tbl_init(&tc->mod_hdr);
4798 mutex_init(&tc->t_lock);
4799 mutex_init(&tc->hairpin_tbl_lock);
4800 hash_init(tc->hairpin_tbl);
4802 err = rhashtable_init(&tc->ht, &tc_ht_params);
4806 lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
4808 chains_mapping = mapping_create(sizeof(struct mlx5_mapped_obj),
4809 MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
4810 if (IS_ERR(chains_mapping)) {
4811 err = PTR_ERR(chains_mapping);
4814 tc->mapping = chains_mapping;
4816 if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
4817 attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
4818 MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
4819 attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
4820 attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
4821 attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
4822 attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
4823 attr.mapping = chains_mapping;
4825 tc->chains = mlx5_chains_create(dev, &attr);
4826 if (IS_ERR(tc->chains)) {
4827 err = PTR_ERR(tc->chains);
4831 tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
4832 MLX5_FLOW_NAMESPACE_KERNEL);
4834 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
4835 err = register_netdevice_notifier_dev_net(priv->netdev,
4839 tc->netdevice_nb.notifier_call = NULL;
4840 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
4847 mlx5_tc_ct_clean(tc->ct);
4848 mlx5_chains_destroy(tc->chains);
4850 mapping_destroy(chains_mapping);
4852 rhashtable_destroy(&tc->ht);
4856 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
4858 struct mlx5e_tc_flow *flow = ptr;
4859 struct mlx5e_priv *priv = flow->priv;
4861 mlx5e_tc_del_flow(priv, flow);
4865 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
4867 struct mlx5e_tc_table *tc = &priv->fs.tc;
4869 if (tc->netdevice_nb.notifier_call)
4870 unregister_netdevice_notifier_dev_net(priv->netdev,
4874 mlx5e_mod_hdr_tbl_destroy(&tc->mod_hdr);
4875 mutex_destroy(&tc->hairpin_tbl_lock);
4877 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
4879 if (!IS_ERR_OR_NULL(tc->t)) {
4880 mlx5_chains_put_table(tc->chains, 0, 1, MLX5E_TC_FT_LEVEL);
4883 mutex_destroy(&tc->t_lock);
4885 mlx5_tc_ct_clean(tc->ct);
4886 mapping_destroy(tc->mapping);
4887 mlx5_chains_destroy(tc->chains);
4890 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
4892 const size_t sz_enc_opts = sizeof(struct tunnel_match_enc_opts);
4893 struct mlx5_rep_uplink_priv *uplink_priv;
4894 struct mlx5e_rep_priv *rpriv;
4895 struct mapping_ctx *mapping;
4896 struct mlx5_eswitch *esw;
4897 struct mlx5e_priv *priv;
4900 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
4901 rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv);
4902 priv = netdev_priv(rpriv->netdev);
4903 esw = priv->mdev->priv.eswitch;
4905 uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev),
4907 &esw->offloads.mod_hdr,
4908 MLX5_FLOW_NAMESPACE_FDB);
4910 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
4911 uplink_priv->esw_psample = mlx5_esw_sample_init(netdev_priv(priv->netdev));
4914 mapping = mapping_create(sizeof(struct tunnel_match_key),
4915 TUNNEL_INFO_BITS_MASK, true);
4916 if (IS_ERR(mapping)) {
4917 err = PTR_ERR(mapping);
4918 goto err_tun_mapping;
4920 uplink_priv->tunnel_mapping = mapping;
4922 /* 0xFFF is reserved for stack devices slow path table mark */
4923 mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true);
4924 if (IS_ERR(mapping)) {
4925 err = PTR_ERR(mapping);
4926 goto err_enc_opts_mapping;
4928 uplink_priv->tunnel_enc_opts_mapping = mapping;
4930 err = rhashtable_init(tc_ht, &tc_ht_params);
4934 lockdep_set_class(&tc_ht->mutex, &tc_ht_lock_key);
4936 uplink_priv->encap = mlx5e_tc_tun_init(priv);
4937 if (IS_ERR(uplink_priv->encap)) {
4938 err = PTR_ERR(uplink_priv->encap);
4939 goto err_register_fib_notifier;
4944 err_register_fib_notifier:
4945 rhashtable_destroy(tc_ht);
4947 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
4948 err_enc_opts_mapping:
4949 mapping_destroy(uplink_priv->tunnel_mapping);
4951 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
4952 mlx5_esw_sample_cleanup(uplink_priv->esw_psample);
4954 mlx5_tc_ct_clean(uplink_priv->ct_priv);
4955 netdev_warn(priv->netdev,
4956 "Failed to initialize tc (eswitch), err: %d", err);
4960 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
4962 struct mlx5_rep_uplink_priv *uplink_priv;
4964 uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht);
4966 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
4967 mlx5e_tc_tun_cleanup(uplink_priv->encap);
4969 mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
4970 mapping_destroy(uplink_priv->tunnel_mapping);
4972 #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
4973 mlx5_esw_sample_cleanup(uplink_priv->esw_psample);
4975 mlx5_tc_ct_clean(uplink_priv->ct_priv);
4978 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags)
4980 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
4982 return atomic_read(&tc_ht->nelems);
4985 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
4987 struct mlx5e_tc_flow *flow, *tmp;
4989 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
4990 __mlx5e_tc_del_fdb_peer_flow(flow);
4993 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
4995 struct mlx5_rep_uplink_priv *rpriv =
4996 container_of(work, struct mlx5_rep_uplink_priv,
4997 reoffload_flows_work);
4998 struct mlx5e_tc_flow *flow, *tmp;
5000 mutex_lock(&rpriv->unready_flows_lock);
5001 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
5002 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
5003 unready_flow_del(flow);
5005 mutex_unlock(&rpriv->unready_flows_lock);
5008 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
5009 struct flow_cls_offload *cls_flower,
5010 unsigned long flags)
5012 switch (cls_flower->command) {
5013 case FLOW_CLS_REPLACE:
5014 return mlx5e_configure_flower(priv->netdev, priv, cls_flower,
5016 case FLOW_CLS_DESTROY:
5017 return mlx5e_delete_flower(priv->netdev, priv, cls_flower,
5019 case FLOW_CLS_STATS:
5020 return mlx5e_stats_flower(priv->netdev, priv, cls_flower,
5027 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
5030 unsigned long flags = MLX5_TC_FLAG(INGRESS);
5031 struct mlx5e_priv *priv = cb_priv;
5033 if (!priv->netdev || !netif_device_present(priv->netdev))
5036 if (mlx5e_is_uplink_rep(priv))
5037 flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
5039 flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
5042 case TC_SETUP_CLSFLOWER:
5043 return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
5049 bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
5050 struct sk_buff *skb)
5052 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
5053 u32 chain = 0, chain_tag, reg_b, zone_restore_id;
5054 struct mlx5e_priv *priv = netdev_priv(skb->dev);
5055 struct mlx5e_tc_table *tc = &priv->fs.tc;
5056 struct mlx5_mapped_obj mapped_obj;
5057 struct tc_skb_ext *tc_skb_ext;
5060 reg_b = be32_to_cpu(cqe->ft_metadata);
5062 chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
5064 err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
5066 netdev_dbg(priv->netdev,
5067 "Couldn't find chain for chain tag: %d, err: %d\n",
5072 if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
5073 chain = mapped_obj.chain;
5074 tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
5075 if (WARN_ON(!tc_skb_ext))
5078 tc_skb_ext->chain = chain;
5080 zone_restore_id = (reg_b >> REG_MAPPING_SHIFT(NIC_ZONE_RESTORE_TO_REG)) &
5083 if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
5087 netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
5090 #endif /* CONFIG_NET_TC_SKB_EXT */