2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include <net/tc_act/tc_tunnel_key.h>
44 #include <net/tc_act/tc_pedit.h>
45 #include <net/tc_act/tc_csum.h>
53 #include "en/tc_tun.h"
54 #include "lib/devcom.h"
56 struct mlx5_nic_flow_attr {
62 struct mlx5_flow_table *hairpin_ft;
63 struct mlx5_fc *counter;
66 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
69 MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
70 MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
71 MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD,
72 MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD,
73 MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE),
74 MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1),
75 MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2),
76 MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3),
77 MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4),
80 #define MLX5E_TC_MAX_SPLITS 1
82 /* Helper struct for accessing a struct containing list_head array.
91 * To access the containing struct from one of the list_head items:
92 * 1. Get the helper item from the list_head item using
94 * container_of(list_head item, helper struct type, list_head field)
95 * 2. Get the contining struct from the helper item and its index in the array:
97 * container_of(helper item, containing struct type, helper field[index])
99 struct encap_flow_item {
100 struct list_head list;
104 struct mlx5e_tc_flow {
105 struct rhash_head node;
106 struct mlx5e_priv *priv;
109 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
110 /* Flow can be associated with multiple encap IDs.
111 * The number of encaps is bounded by the number of supported
114 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
115 struct mlx5e_tc_flow *peer_flow;
116 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
117 struct list_head hairpin; /* flows sharing the same hairpin */
118 struct list_head peer; /* flows with peer flow */
120 struct mlx5_esw_flow_attr esw_attr[0];
121 struct mlx5_nic_flow_attr nic_attr[0];
125 struct mlx5e_tc_flow_parse_attr {
126 struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
127 struct net_device *filter_dev;
128 struct mlx5_flow_spec spec;
129 int num_mod_hdr_actions;
130 int max_mod_hdr_actions;
131 void *mod_hdr_actions;
132 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
135 #define MLX5E_TC_TABLE_NUM_GROUPS 4
136 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
138 struct mlx5e_hairpin {
139 struct mlx5_hairpin *pair;
141 struct mlx5_core_dev *func_mdev;
142 struct mlx5e_priv *func_priv;
147 struct mlx5e_rqt indir_rqt;
148 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
149 struct mlx5e_ttc_table ttc;
152 struct mlx5e_hairpin_entry {
153 /* a node of a hash table which keeps all the hairpin entries */
154 struct hlist_node hairpin_hlist;
156 /* flows sharing the same hairpin */
157 struct list_head flows;
161 struct mlx5e_hairpin *hp;
169 struct mlx5e_mod_hdr_entry {
170 /* a node of a hash table which keeps all the mod_hdr entries */
171 struct hlist_node mod_hdr_hlist;
173 /* flows sharing the same mod_hdr entry */
174 struct list_head flows;
176 struct mod_hdr_key key;
181 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
183 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
185 return jhash(key->actions,
186 key->num_actions * MLX5_MH_ACT_SZ, 0);
189 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
190 struct mod_hdr_key *b)
192 if (a->num_actions != b->num_actions)
195 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
198 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
199 struct mlx5e_tc_flow *flow,
200 struct mlx5e_tc_flow_parse_attr *parse_attr)
202 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
203 int num_actions, actions_size, namespace, err;
204 struct mlx5e_mod_hdr_entry *mh;
205 struct mod_hdr_key key;
209 num_actions = parse_attr->num_mod_hdr_actions;
210 actions_size = MLX5_MH_ACT_SZ * num_actions;
212 key.actions = parse_attr->mod_hdr_actions;
213 key.num_actions = num_actions;
215 hash_key = hash_mod_hdr_info(&key);
217 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
218 namespace = MLX5_FLOW_NAMESPACE_FDB;
219 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
220 mod_hdr_hlist, hash_key) {
221 if (!cmp_mod_hdr_info(&mh->key, &key)) {
227 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
228 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
229 mod_hdr_hlist, hash_key) {
230 if (!cmp_mod_hdr_info(&mh->key, &key)) {
240 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
244 mh->key.actions = (void *)mh + sizeof(*mh);
245 memcpy(mh->key.actions, key.actions, actions_size);
246 mh->key.num_actions = num_actions;
247 INIT_LIST_HEAD(&mh->flows);
249 err = mlx5_modify_header_alloc(priv->mdev, namespace,
256 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
257 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
259 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
262 list_add(&flow->mod_hdr, &mh->flows);
263 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
264 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
266 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
275 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
276 struct mlx5e_tc_flow *flow)
278 struct list_head *next = flow->mod_hdr.next;
280 list_del(&flow->mod_hdr);
282 if (list_empty(next)) {
283 struct mlx5e_mod_hdr_entry *mh;
285 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
287 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
288 hash_del(&mh->mod_hdr_hlist);
294 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
296 struct net_device *netdev;
297 struct mlx5e_priv *priv;
299 netdev = __dev_get_by_index(net, ifindex);
300 priv = netdev_priv(netdev);
304 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
306 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
310 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
314 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
316 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
317 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
318 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
320 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
327 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
332 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
334 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
335 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
338 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
340 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
341 struct mlx5e_priv *priv = hp->func_priv;
342 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
344 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
347 for (i = 0; i < sz; i++) {
349 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
350 ix = mlx5e_bits_invert(i, ilog2(sz));
351 ix = indirection_rqt[ix];
352 rqn = hp->pair->rqn[ix];
353 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
357 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
359 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
360 struct mlx5e_priv *priv = hp->func_priv;
361 struct mlx5_core_dev *mdev = priv->mdev;
365 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
366 in = kvzalloc(inlen, GFP_KERNEL);
370 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
372 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
373 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
375 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
377 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
379 hp->indir_rqt.enabled = true;
385 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
387 struct mlx5e_priv *priv = hp->func_priv;
388 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
392 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
393 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
395 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
396 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
398 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
399 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
400 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
401 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
403 err = mlx5_core_create_tir(hp->func_mdev, in,
404 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
406 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
407 goto err_destroy_tirs;
413 for (i = 0; i < tt; i++)
414 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
418 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
422 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
423 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
426 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
427 struct ttc_params *ttc_params)
429 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
432 memset(ttc_params, 0, sizeof(*ttc_params));
434 ttc_params->any_tt_tirn = hp->tirn;
436 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
437 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
439 ft_attr->max_fte = MLX5E_NUM_TT;
440 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
441 ft_attr->prio = MLX5E_TC_PRIO;
444 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
446 struct mlx5e_priv *priv = hp->func_priv;
447 struct ttc_params ttc_params;
450 err = mlx5e_hairpin_create_indirect_rqt(hp);
454 err = mlx5e_hairpin_create_indirect_tirs(hp);
456 goto err_create_indirect_tirs;
458 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
459 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
461 goto err_create_ttc_table;
463 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
464 hp->num_channels, hp->ttc.ft.t->id);
468 err_create_ttc_table:
469 mlx5e_hairpin_destroy_indirect_tirs(hp);
470 err_create_indirect_tirs:
471 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
476 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
478 struct mlx5e_priv *priv = hp->func_priv;
480 mlx5e_destroy_ttc_table(priv, &hp->ttc);
481 mlx5e_hairpin_destroy_indirect_tirs(hp);
482 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
485 static struct mlx5e_hairpin *
486 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
489 struct mlx5_core_dev *func_mdev, *peer_mdev;
490 struct mlx5e_hairpin *hp;
491 struct mlx5_hairpin *pair;
494 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
496 return ERR_PTR(-ENOMEM);
498 func_mdev = priv->mdev;
499 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
501 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
504 goto create_pair_err;
507 hp->func_mdev = func_mdev;
508 hp->func_priv = priv;
509 hp->num_channels = params->num_channels;
511 err = mlx5e_hairpin_create_transport(hp);
513 goto create_transport_err;
515 if (hp->num_channels > 1) {
516 err = mlx5e_hairpin_rss_init(hp);
524 mlx5e_hairpin_destroy_transport(hp);
525 create_transport_err:
526 mlx5_core_hairpin_destroy(hp->pair);
532 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
534 if (hp->num_channels > 1)
535 mlx5e_hairpin_rss_cleanup(hp);
536 mlx5e_hairpin_destroy_transport(hp);
537 mlx5_core_hairpin_destroy(hp->pair);
541 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
543 return (peer_vhca_id << 16 | prio);
546 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
547 u16 peer_vhca_id, u8 prio)
549 struct mlx5e_hairpin_entry *hpe;
550 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
552 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
553 hairpin_hlist, hash_key) {
554 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
561 #define UNKNOWN_MATCH_PRIO 8
563 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
564 struct mlx5_flow_spec *spec, u8 *match_prio,
565 struct netlink_ext_ack *extack)
567 void *headers_c, *headers_v;
568 u8 prio_val, prio_mask = 0;
571 #ifdef CONFIG_MLX5_CORE_EN_DCB
572 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
573 NL_SET_ERR_MSG_MOD(extack,
574 "only PCP trust state supported for hairpin");
578 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
579 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
581 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
583 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
584 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
587 if (!vlan_present || !prio_mask) {
588 prio_val = UNKNOWN_MATCH_PRIO;
589 } else if (prio_mask != 0x7) {
590 NL_SET_ERR_MSG_MOD(extack,
591 "masked priority match not supported for hairpin");
595 *match_prio = prio_val;
599 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
600 struct mlx5e_tc_flow *flow,
601 struct mlx5e_tc_flow_parse_attr *parse_attr,
602 struct netlink_ext_ack *extack)
604 int peer_ifindex = parse_attr->mirred_ifindex[0];
605 struct mlx5_hairpin_params params;
606 struct mlx5_core_dev *peer_mdev;
607 struct mlx5e_hairpin_entry *hpe;
608 struct mlx5e_hairpin *hp;
615 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
616 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
617 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
621 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
622 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
626 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
630 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
634 INIT_LIST_HEAD(&hpe->flows);
635 hpe->peer_vhca_id = peer_id;
636 hpe->prio = match_prio;
638 params.log_data_size = 15;
639 params.log_data_size = min_t(u8, params.log_data_size,
640 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
641 params.log_data_size = max_t(u8, params.log_data_size,
642 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
644 params.log_num_packets = params.log_data_size -
645 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
646 params.log_num_packets = min_t(u8, params.log_num_packets,
647 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
649 params.q_counter = priv->q_counter;
650 /* set hairpin pair per each 50Gbs share of the link */
651 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
652 link_speed = max_t(u32, link_speed, 50000);
653 link_speed64 = link_speed;
654 do_div(link_speed64, 50000);
655 params.num_channels = link_speed64;
657 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
660 goto create_hairpin_err;
663 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
664 hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
665 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
668 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
669 hash_hairpin_info(peer_id, match_prio));
672 if (hpe->hp->num_channels > 1) {
673 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
674 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
676 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
678 list_add(&flow->hairpin, &hpe->flows);
687 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
688 struct mlx5e_tc_flow *flow)
690 struct list_head *next = flow->hairpin.next;
692 list_del(&flow->hairpin);
694 /* no more hairpin flows for us, release the hairpin pair */
695 if (list_empty(next)) {
696 struct mlx5e_hairpin_entry *hpe;
698 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
700 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
701 hpe->hp->pair->peer_mdev->priv.name);
703 mlx5e_hairpin_destroy(hpe->hp);
704 hash_del(&hpe->hairpin_hlist);
710 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
711 struct mlx5e_tc_flow_parse_attr *parse_attr,
712 struct mlx5e_tc_flow *flow,
713 struct netlink_ext_ack *extack)
715 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
716 struct mlx5_core_dev *dev = priv->mdev;
717 struct mlx5_flow_destination dest[2] = {};
718 struct mlx5_flow_act flow_act = {
719 .action = attr->action,
720 .flow_tag = attr->flow_tag,
722 .flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
724 struct mlx5_fc *counter = NULL;
725 bool table_created = false;
726 int err, dest_ix = 0;
728 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
729 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
731 goto err_add_hairpin_flow;
733 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
734 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
735 dest[dest_ix].ft = attr->hairpin_ft;
737 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
738 dest[dest_ix].tir_num = attr->hairpin_tirn;
741 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
742 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
743 dest[dest_ix].ft = priv->fs.vlan.ft.t;
747 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
748 counter = mlx5_fc_create(dev, true);
749 if (IS_ERR(counter)) {
750 err = PTR_ERR(counter);
753 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
754 dest[dest_ix].counter_id = mlx5_fc_id(counter);
756 attr->counter = counter;
759 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
760 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
761 flow_act.modify_id = attr->mod_hdr_id;
762 kfree(parse_attr->mod_hdr_actions);
764 goto err_create_mod_hdr_id;
767 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
768 int tc_grp_size, tc_tbl_size;
769 u32 max_flow_counter;
771 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
772 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
774 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
776 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
777 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
780 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
783 MLX5E_TC_TABLE_NUM_GROUPS,
784 MLX5E_TC_FT_LEVEL, 0);
785 if (IS_ERR(priv->fs.tc.t)) {
786 NL_SET_ERR_MSG_MOD(extack,
787 "Failed to create tc offload table\n");
788 netdev_err(priv->netdev,
789 "Failed to create tc offload table\n");
790 err = PTR_ERR(priv->fs.tc.t);
794 table_created = true;
797 if (attr->match_level != MLX5_MATCH_NONE)
798 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
800 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
801 &flow_act, dest, dest_ix);
803 if (IS_ERR(flow->rule[0])) {
804 err = PTR_ERR(flow->rule[0]);
812 mlx5_destroy_flow_table(priv->fs.tc.t);
813 priv->fs.tc.t = NULL;
816 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
817 mlx5e_detach_mod_hdr(priv, flow);
818 err_create_mod_hdr_id:
819 mlx5_fc_destroy(dev, counter);
821 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
822 mlx5e_hairpin_flow_del(priv, flow);
823 err_add_hairpin_flow:
827 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
828 struct mlx5e_tc_flow *flow)
830 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
831 struct mlx5_fc *counter = NULL;
833 counter = attr->counter;
834 mlx5_del_flow_rules(flow->rule[0]);
835 mlx5_fc_destroy(priv->mdev, counter);
837 if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) {
838 mlx5_destroy_flow_table(priv->fs.tc.t);
839 priv->fs.tc.t = NULL;
842 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
843 mlx5e_detach_mod_hdr(priv, flow);
845 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
846 mlx5e_hairpin_flow_del(priv, flow);
849 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
850 struct mlx5e_tc_flow *flow, int out_index);
852 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
853 struct ip_tunnel_info *tun_info,
854 struct net_device *mirred_dev,
855 struct net_device **encap_dev,
856 struct mlx5e_tc_flow *flow,
857 struct netlink_ext_ack *extack,
860 static struct mlx5_flow_handle *
861 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
862 struct mlx5e_tc_flow *flow,
863 struct mlx5_flow_spec *spec,
864 struct mlx5_esw_flow_attr *attr)
866 struct mlx5_flow_handle *rule;
868 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
872 if (attr->split_count) {
873 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
874 if (IS_ERR(flow->rule[1])) {
875 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
876 return flow->rule[1];
880 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
885 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
886 struct mlx5e_tc_flow *flow,
887 struct mlx5_esw_flow_attr *attr)
889 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
891 if (attr->split_count)
892 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
894 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
897 static struct mlx5_flow_handle *
898 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
899 struct mlx5e_tc_flow *flow,
900 struct mlx5_flow_spec *spec,
901 struct mlx5_esw_flow_attr *slow_attr)
903 struct mlx5_flow_handle *rule;
905 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
906 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
907 slow_attr->split_count = 0;
908 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
910 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
912 flow->flags |= MLX5E_TC_FLOW_SLOW;
918 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
919 struct mlx5e_tc_flow *flow,
920 struct mlx5_esw_flow_attr *slow_attr)
922 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
923 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
924 slow_attr->split_count = 0;
925 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
926 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
927 flow->flags &= ~MLX5E_TC_FLOW_SLOW;
931 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
932 struct mlx5e_tc_flow_parse_attr *parse_attr,
933 struct mlx5e_tc_flow *flow,
934 struct netlink_ext_ack *extack)
936 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
937 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
938 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
939 u16 max_prio = mlx5_eswitch_get_prio_range(esw);
940 struct net_device *out_dev, *encap_dev = NULL;
941 struct mlx5_fc *counter = NULL;
942 struct mlx5e_rep_priv *rpriv;
943 struct mlx5e_priv *out_priv;
944 int err = 0, encap_err = 0;
947 if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
948 NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
952 if (attr->chain > max_chain) {
953 NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
955 goto err_max_prio_chain;
958 if (attr->prio > max_prio) {
959 NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
961 goto err_max_prio_chain;
964 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
967 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
970 mirred_ifindex = attr->parse_attr->mirred_ifindex[out_index];
971 out_dev = __dev_get_by_index(dev_net(priv->netdev),
973 err = mlx5e_attach_encap(priv,
974 &parse_attr->tun_info[out_index],
975 out_dev, &encap_dev, flow,
977 if (err && err != -EAGAIN)
978 goto err_attach_encap;
981 out_priv = netdev_priv(encap_dev);
982 rpriv = out_priv->ppriv;
983 attr->dests[out_index].rep = rpriv->rep;
984 attr->dests[out_index].mdev = out_priv->mdev;
987 err = mlx5_eswitch_add_vlan_action(esw, attr);
991 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
992 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
993 kfree(parse_attr->mod_hdr_actions);
998 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
999 counter = mlx5_fc_create(attr->counter_dev, true);
1000 if (IS_ERR(counter)) {
1001 err = PTR_ERR(counter);
1002 goto err_create_counter;
1005 attr->counter = counter;
1008 /* we get here if (1) there's no error or when
1009 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
1011 if (encap_err == -EAGAIN) {
1012 /* continue with goto slow path rule instead */
1013 struct mlx5_esw_flow_attr slow_attr;
1015 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
1017 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1020 if (IS_ERR(flow->rule[0])) {
1021 err = PTR_ERR(flow->rule[0]);
1028 mlx5_fc_destroy(attr->counter_dev, counter);
1030 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1031 mlx5e_detach_mod_hdr(priv, flow);
1033 mlx5_eswitch_del_vlan_action(esw, attr);
1035 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1036 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1037 mlx5e_detach_encap(priv, flow, out_index);
1043 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1044 struct mlx5e_tc_flow *flow)
1046 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1047 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1048 struct mlx5_esw_flow_attr slow_attr;
1051 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1052 if (flow->flags & MLX5E_TC_FLOW_SLOW)
1053 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1055 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1058 mlx5_eswitch_del_vlan_action(esw, attr);
1060 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1061 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1062 mlx5e_detach_encap(priv, flow, out_index);
1063 kvfree(attr->parse_attr);
1065 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1066 mlx5e_detach_mod_hdr(priv, flow);
1068 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1069 mlx5_fc_destroy(attr->counter_dev, attr->counter);
1072 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1073 struct mlx5e_encap_entry *e)
1075 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1076 struct mlx5_esw_flow_attr slow_attr, *esw_attr;
1077 struct mlx5_flow_handle *rule;
1078 struct mlx5_flow_spec *spec;
1079 struct encap_flow_item *efi;
1080 struct mlx5e_tc_flow *flow;
1083 err = mlx5_packet_reformat_alloc(priv->mdev,
1085 e->encap_size, e->encap_header,
1086 MLX5_FLOW_NAMESPACE_FDB,
1089 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
1093 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1094 mlx5e_rep_queue_neigh_stats_work(priv);
1096 list_for_each_entry(efi, &e->flows, list) {
1097 bool all_flow_encaps_valid = true;
1100 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1101 esw_attr = flow->esw_attr;
1102 spec = &esw_attr->parse_attr->spec;
1104 esw_attr->dests[efi->index].encap_id = e->encap_id;
1105 esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1106 /* Flow can be associated with multiple encap entries.
1107 * Before offloading the flow verify that all of them have
1108 * a valid neighbour.
1110 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1111 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1113 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1114 all_flow_encaps_valid = false;
1118 /* Do not offload flows with unresolved neighbors */
1119 if (!all_flow_encaps_valid)
1121 /* update from slow path rule to encap rule */
1122 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1124 err = PTR_ERR(rule);
1125 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1130 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1131 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
1132 flow->rule[0] = rule;
1136 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1137 struct mlx5e_encap_entry *e)
1139 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1140 struct mlx5_esw_flow_attr slow_attr;
1141 struct mlx5_flow_handle *rule;
1142 struct mlx5_flow_spec *spec;
1143 struct encap_flow_item *efi;
1144 struct mlx5e_tc_flow *flow;
1147 list_for_each_entry(efi, &e->flows, list) {
1148 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1149 spec = &flow->esw_attr->parse_attr->spec;
1151 /* update from encap rule to slow path rule */
1152 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
1153 /* mark the flow's encap dest as non-valid */
1154 flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1157 err = PTR_ERR(rule);
1158 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1163 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1164 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
1165 flow->rule[0] = rule;
1168 /* we know that the encap is valid */
1169 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1170 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1173 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1175 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1176 return flow->esw_attr->counter;
1178 return flow->nic_attr->counter;
1181 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1183 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1184 u64 bytes, packets, lastuse = 0;
1185 struct mlx5e_tc_flow *flow;
1186 struct mlx5e_encap_entry *e;
1187 struct mlx5_fc *counter;
1188 struct neigh_table *tbl;
1189 bool neigh_used = false;
1190 struct neighbour *n;
1192 if (m_neigh->family == AF_INET)
1194 #if IS_ENABLED(CONFIG_IPV6)
1195 else if (m_neigh->family == AF_INET6)
1201 list_for_each_entry(e, &nhe->encap_list, encap_list) {
1202 struct encap_flow_item *efi;
1203 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
1205 list_for_each_entry(efi, &e->flows, list) {
1206 flow = container_of(efi, struct mlx5e_tc_flow,
1207 encaps[efi->index]);
1208 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1209 counter = mlx5e_tc_get_counter(flow);
1210 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1211 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1222 nhe->reported_lastuse = jiffies;
1224 /* find the relevant neigh according to the cached device and
1227 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1231 neigh_event_send(n, NULL);
1236 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1237 struct mlx5e_tc_flow *flow, int out_index)
1239 struct list_head *next = flow->encaps[out_index].list.next;
1241 list_del(&flow->encaps[out_index].list);
1242 if (list_empty(next)) {
1243 struct mlx5e_encap_entry *e;
1245 e = list_entry(next, struct mlx5e_encap_entry, flows);
1246 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1248 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1249 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1251 hash_del_rcu(&e->encap_hlist);
1252 kfree(e->encap_header);
1257 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1259 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1261 if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
1262 !(flow->flags & MLX5E_TC_FLOW_DUP))
1265 mutex_lock(&esw->offloads.peer_mutex);
1266 list_del(&flow->peer);
1267 mutex_unlock(&esw->offloads.peer_mutex);
1269 flow->flags &= ~MLX5E_TC_FLOW_DUP;
1271 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1272 kvfree(flow->peer_flow);
1273 flow->peer_flow = NULL;
1276 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1278 struct mlx5_core_dev *dev = flow->priv->mdev;
1279 struct mlx5_devcom *devcom = dev->priv.devcom;
1280 struct mlx5_eswitch *peer_esw;
1282 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1286 __mlx5e_tc_del_fdb_peer_flow(flow);
1287 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1290 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1291 struct mlx5e_tc_flow *flow)
1293 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1294 mlx5e_tc_del_fdb_peer_flow(flow);
1295 mlx5e_tc_del_fdb_flow(priv, flow);
1297 mlx5e_tc_del_nic_flow(priv, flow);
1302 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1303 struct mlx5_flow_spec *spec,
1304 struct tc_cls_flower_offload *f,
1305 struct net_device *filter_dev, u8 *match_level)
1307 struct netlink_ext_ack *extack = f->common.extack;
1308 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1310 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1312 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
1313 struct flow_match_control enc_control;
1316 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1317 headers_c, headers_v, match_level);
1319 NL_SET_ERR_MSG_MOD(extack,
1320 "failed to parse tunnel attributes");
1324 flow_rule_match_enc_control(rule, &enc_control);
1326 if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1327 struct flow_match_ipv4_addrs match;
1329 flow_rule_match_enc_ipv4_addrs(rule, &match);
1330 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1331 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1332 ntohl(match.mask->src));
1333 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1334 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1335 ntohl(match.key->src));
1337 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1338 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1339 ntohl(match.mask->dst));
1340 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1341 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1342 ntohl(match.key->dst));
1344 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1345 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
1346 } else if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1347 struct flow_match_ipv6_addrs match;
1349 flow_rule_match_enc_ipv6_addrs(rule, &match);
1350 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1351 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1352 &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1353 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1354 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1355 &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1357 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1358 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1359 &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1360 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1361 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1362 &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1364 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1365 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
1368 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
1369 struct flow_match_ip match;
1371 flow_rule_match_enc_ip(rule, &match);
1372 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
1373 match.mask->tos & 0x3);
1374 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
1375 match.key->tos & 0x3);
1377 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
1378 match.mask->tos >> 2);
1379 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
1380 match.key->tos >> 2);
1382 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
1384 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
1387 if (match.mask->ttl &&
1388 !MLX5_CAP_ESW_FLOWTABLE_FDB
1390 ft_field_support.outer_ipv4_ttl)) {
1391 NL_SET_ERR_MSG_MOD(extack,
1392 "Matching on TTL is not supported");
1398 /* Enforce DMAC when offloading incoming tunneled flows.
1399 * Flow counters require a match on the DMAC.
1401 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1402 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1403 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1404 dmac_47_16), priv->netdev->dev_addr);
1406 /* let software handle IP fragments */
1407 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1408 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1413 static int __parse_cls_flower(struct mlx5e_priv *priv,
1414 struct mlx5_flow_spec *spec,
1415 struct tc_cls_flower_offload *f,
1416 struct net_device *filter_dev,
1417 u8 *match_level, u8 *tunnel_match_level)
1419 struct netlink_ext_ack *extack = f->common.extack;
1420 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1422 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1424 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1426 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1428 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
1429 struct flow_dissector *dissector = rule->match.dissector;
1433 *match_level = MLX5_MATCH_NONE;
1435 if (dissector->used_keys &
1436 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1437 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1438 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1439 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1440 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1441 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1442 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1443 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1444 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1445 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1446 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1447 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1448 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1449 BIT(FLOW_DISSECTOR_KEY_TCP) |
1450 BIT(FLOW_DISSECTOR_KEY_IP) |
1451 BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
1452 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
1453 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1454 dissector->used_keys);
1458 if ((flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1459 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1460 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1461 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1462 struct flow_match_control match;
1464 flow_rule_match_enc_control(rule, &match);
1465 switch (match.key->addr_type) {
1466 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1467 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1468 if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
1475 /* In decap flow, header pointers should point to the inner
1476 * headers, outer header were already set by parse_tunnel_attr
1478 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1480 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1484 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1485 struct flow_match_basic match;
1487 flow_rule_match_basic(rule, &match);
1488 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1489 ntohs(match.mask->n_proto));
1490 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1491 ntohs(match.key->n_proto));
1493 if (match.mask->n_proto)
1494 *match_level = MLX5_MATCH_L2;
1497 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1498 struct flow_match_vlan match;
1500 flow_rule_match_vlan(rule, &match);
1501 if (match.mask->vlan_id ||
1502 match.mask->vlan_priority ||
1503 match.mask->vlan_tpid) {
1504 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
1505 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1507 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1510 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1512 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1516 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
1517 match.mask->vlan_id);
1518 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
1519 match.key->vlan_id);
1521 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
1522 match.mask->vlan_priority);
1523 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
1524 match.key->vlan_priority);
1526 *match_level = MLX5_MATCH_L2;
1528 } else if (*match_level != MLX5_MATCH_NONE) {
1529 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1530 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1531 *match_level = MLX5_MATCH_L2;
1534 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
1535 struct flow_match_vlan match;
1537 flow_rule_match_vlan(rule, &match);
1538 if (match.mask->vlan_id ||
1539 match.mask->vlan_priority ||
1540 match.mask->vlan_tpid) {
1541 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
1542 MLX5_SET(fte_match_set_misc, misc_c,
1543 outer_second_svlan_tag, 1);
1544 MLX5_SET(fte_match_set_misc, misc_v,
1545 outer_second_svlan_tag, 1);
1547 MLX5_SET(fte_match_set_misc, misc_c,
1548 outer_second_cvlan_tag, 1);
1549 MLX5_SET(fte_match_set_misc, misc_v,
1550 outer_second_cvlan_tag, 1);
1553 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1554 match.mask->vlan_id);
1555 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1556 match.key->vlan_id);
1557 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1558 match.mask->vlan_priority);
1559 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1560 match.key->vlan_priority);
1562 *match_level = MLX5_MATCH_L2;
1566 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1567 struct flow_match_eth_addrs match;
1569 flow_rule_match_eth_addrs(rule, &match);
1570 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1573 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1577 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1580 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1584 if (!is_zero_ether_addr(match.mask->src) ||
1585 !is_zero_ether_addr(match.mask->dst))
1586 *match_level = MLX5_MATCH_L2;
1589 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1590 struct flow_match_control match;
1592 flow_rule_match_control(rule, &match);
1593 addr_type = match.key->addr_type;
1595 /* the HW doesn't support frag first/later */
1596 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
1599 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
1600 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1601 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1602 match.key->flags & FLOW_DIS_IS_FRAGMENT);
1604 /* the HW doesn't need L3 inline to match on frag=no */
1605 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
1606 *match_level = MLX5_MATCH_L2;
1607 /* *** L2 attributes parsing up to here *** */
1609 *match_level = MLX5_MATCH_L3;
1613 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1614 struct flow_match_basic match;
1616 flow_rule_match_basic(rule, &match);
1617 ip_proto = match.key->ip_proto;
1619 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1620 match.mask->ip_proto);
1621 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1622 match.key->ip_proto);
1624 if (match.mask->ip_proto)
1625 *match_level = MLX5_MATCH_L3;
1628 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1629 struct flow_match_ipv4_addrs match;
1631 flow_rule_match_ipv4_addrs(rule, &match);
1632 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1633 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1634 &match.mask->src, sizeof(match.mask->src));
1635 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1636 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1637 &match.key->src, sizeof(match.key->src));
1638 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1639 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1640 &match.mask->dst, sizeof(match.mask->dst));
1641 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1642 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1643 &match.key->dst, sizeof(match.key->dst));
1645 if (match.mask->src || match.mask->dst)
1646 *match_level = MLX5_MATCH_L3;
1649 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1650 struct flow_match_ipv6_addrs match;
1652 flow_rule_match_ipv6_addrs(rule, &match);
1653 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1654 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1655 &match.mask->src, sizeof(match.mask->src));
1656 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1657 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1658 &match.key->src, sizeof(match.key->src));
1660 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1661 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1662 &match.mask->dst, sizeof(match.mask->dst));
1663 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1664 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1665 &match.key->dst, sizeof(match.key->dst));
1667 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
1668 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
1669 *match_level = MLX5_MATCH_L3;
1672 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
1673 struct flow_match_ip match;
1675 flow_rule_match_ip(rule, &match);
1676 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
1677 match.mask->tos & 0x3);
1678 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
1679 match.key->tos & 0x3);
1681 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
1682 match.mask->tos >> 2);
1683 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
1684 match.key->tos >> 2);
1686 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
1688 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
1691 if (match.mask->ttl &&
1692 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1693 ft_field_support.outer_ipv4_ttl)) {
1694 NL_SET_ERR_MSG_MOD(extack,
1695 "Matching on TTL is not supported");
1699 if (match.mask->tos || match.mask->ttl)
1700 *match_level = MLX5_MATCH_L3;
1703 /* *** L3 attributes parsing up to here *** */
1705 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1706 struct flow_match_ports match;
1708 flow_rule_match_ports(rule, &match);
1711 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1712 tcp_sport, ntohs(match.mask->src));
1713 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1714 tcp_sport, ntohs(match.key->src));
1716 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1717 tcp_dport, ntohs(match.mask->dst));
1718 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1719 tcp_dport, ntohs(match.key->dst));
1723 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1724 udp_sport, ntohs(match.mask->src));
1725 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1726 udp_sport, ntohs(match.key->src));
1728 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1729 udp_dport, ntohs(match.mask->dst));
1730 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1731 udp_dport, ntohs(match.key->dst));
1734 NL_SET_ERR_MSG_MOD(extack,
1735 "Only UDP and TCP transports are supported for L4 matching");
1736 netdev_err(priv->netdev,
1737 "Only UDP and TCP transport are supported\n");
1741 if (match.mask->src || match.mask->dst)
1742 *match_level = MLX5_MATCH_L4;
1745 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
1746 struct flow_match_tcp match;
1748 flow_rule_match_tcp(rule, &match);
1749 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1750 ntohs(match.mask->flags));
1751 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1752 ntohs(match.key->flags));
1754 if (match.mask->flags)
1755 *match_level = MLX5_MATCH_L4;
1761 static int parse_cls_flower(struct mlx5e_priv *priv,
1762 struct mlx5e_tc_flow *flow,
1763 struct mlx5_flow_spec *spec,
1764 struct tc_cls_flower_offload *f,
1765 struct net_device *filter_dev)
1767 struct netlink_ext_ack *extack = f->common.extack;
1768 struct mlx5_core_dev *dev = priv->mdev;
1769 struct mlx5_eswitch *esw = dev->priv.eswitch;
1770 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1771 u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
1772 struct mlx5_eswitch_rep *rep;
1775 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
1777 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1779 if (rep->vport != MLX5_VPORT_UPLINK &&
1780 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1781 esw->offloads.inline_mode < match_level)) {
1782 NL_SET_ERR_MSG_MOD(extack,
1783 "Flow is not offloaded due to min inline setting");
1784 netdev_warn(priv->netdev,
1785 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1786 match_level, esw->offloads.inline_mode);
1791 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1792 flow->esw_attr->match_level = match_level;
1793 flow->esw_attr->tunnel_match_level = tunnel_match_level;
1795 flow->nic_attr->match_level = match_level;
1801 struct pedit_headers {
1809 struct pedit_headers_action {
1810 struct pedit_headers vals;
1811 struct pedit_headers masks;
1815 static int pedit_header_offsets[] = {
1816 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1817 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1818 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1819 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1820 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1823 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1825 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1826 struct pedit_headers_action *hdrs)
1828 u32 *curr_pmask, *curr_pval;
1833 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
1834 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
1836 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1839 *curr_pmask |= mask;
1840 *curr_pval |= (val & mask);
1848 struct mlx5_fields {
1854 #define OFFLOAD(fw_field, size, field, off) \
1855 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1857 static struct mlx5_fields fields[] = {
1858 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1859 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1860 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1861 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1862 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1864 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1865 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1866 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1868 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1869 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1870 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1871 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1872 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1873 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1874 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1875 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
1876 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1878 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1879 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1880 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1882 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1883 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1886 /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
1887 * max from the SW pedit action. On success, attr->num_mod_hdr_actions
1888 * says how many HW actions were actually parsed.
1890 static int offload_pedit_fields(struct pedit_headers_action *hdrs,
1891 struct mlx5e_tc_flow_parse_attr *parse_attr,
1892 struct netlink_ext_ack *extack)
1894 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1895 int i, action_size, nactions, max_actions, first, last, next_z;
1896 void *s_masks_p, *a_masks_p, *vals_p;
1897 struct mlx5_fields *f;
1898 u8 cmd, field_bsize;
1905 set_masks = &hdrs[0].masks;
1906 add_masks = &hdrs[1].masks;
1907 set_vals = &hdrs[0].vals;
1908 add_vals = &hdrs[1].vals;
1910 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1911 action = parse_attr->mod_hdr_actions +
1912 parse_attr->num_mod_hdr_actions * action_size;
1914 max_actions = parse_attr->max_mod_hdr_actions;
1915 nactions = parse_attr->num_mod_hdr_actions;
1917 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1919 /* avoid seeing bits set from previous iterations */
1923 s_masks_p = (void *)set_masks + f->offset;
1924 a_masks_p = (void *)add_masks + f->offset;
1926 memcpy(&s_mask, s_masks_p, f->size);
1927 memcpy(&a_mask, a_masks_p, f->size);
1929 if (!s_mask && !a_mask) /* nothing to offload here */
1932 if (s_mask && a_mask) {
1933 NL_SET_ERR_MSG_MOD(extack,
1934 "can't set and add to the same HW field");
1935 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1939 if (nactions == max_actions) {
1940 NL_SET_ERR_MSG_MOD(extack,
1941 "too many pedit actions, can't offload");
1942 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1947 cmd = MLX5_ACTION_TYPE_SET;
1949 vals_p = (void *)set_vals + f->offset;
1950 /* clear to denote we consumed this field */
1951 memset(s_masks_p, 0, f->size);
1953 cmd = MLX5_ACTION_TYPE_ADD;
1955 vals_p = (void *)add_vals + f->offset;
1956 /* clear to denote we consumed this field */
1957 memset(a_masks_p, 0, f->size);
1960 field_bsize = f->size * BITS_PER_BYTE;
1962 if (field_bsize == 32) {
1963 mask_be32 = *(__be32 *)&mask;
1964 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1965 } else if (field_bsize == 16) {
1966 mask_be16 = *(__be16 *)&mask;
1967 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1970 first = find_first_bit(&mask, field_bsize);
1971 next_z = find_next_zero_bit(&mask, field_bsize, first);
1972 last = find_last_bit(&mask, field_bsize);
1973 if (first < next_z && next_z < last) {
1974 NL_SET_ERR_MSG_MOD(extack,
1975 "rewrite of few sub-fields isn't supported");
1976 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
1981 MLX5_SET(set_action_in, action, action_type, cmd);
1982 MLX5_SET(set_action_in, action, field, f->field);
1984 if (cmd == MLX5_ACTION_TYPE_SET) {
1985 MLX5_SET(set_action_in, action, offset, first);
1986 /* length is num of bits to be written, zero means length of 32 */
1987 MLX5_SET(set_action_in, action, length, (last - first + 1));
1990 if (field_bsize == 32)
1991 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
1992 else if (field_bsize == 16)
1993 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
1994 else if (field_bsize == 8)
1995 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
1997 action += action_size;
2001 parse_attr->num_mod_hdr_actions = nactions;
2005 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2006 struct pedit_headers_action *hdrs,
2008 struct mlx5e_tc_flow_parse_attr *parse_attr)
2010 int nkeys, action_size, max_actions;
2012 nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits +
2013 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
2014 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2016 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2017 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
2018 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2019 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
2021 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
2022 max_actions = min(max_actions, nkeys * 16);
2024 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
2025 if (!parse_attr->mod_hdr_actions)
2028 parse_attr->max_mod_hdr_actions = max_actions;
2032 static const struct pedit_headers zero_masks = {};
2034 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2035 const struct flow_action_entry *act, int namespace,
2036 struct mlx5e_tc_flow_parse_attr *parse_attr,
2037 struct pedit_headers_action *hdrs,
2038 struct netlink_ext_ack *extack)
2040 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2041 int err = -EOPNOTSUPP;
2042 u32 mask, val, offset;
2045 htype = act->mangle.htype;
2046 err = -EOPNOTSUPP; /* can't be all optimistic */
2048 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2049 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2053 mask = act->mangle.mask;
2054 val = act->mangle.val;
2055 offset = act->mangle.offset;
2057 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2068 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2069 struct mlx5e_tc_flow_parse_attr *parse_attr,
2070 struct pedit_headers_action *hdrs,
2071 struct netlink_ext_ack *extack)
2073 struct pedit_headers *cmd_masks;
2077 if (!parse_attr->mod_hdr_actions) {
2078 err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
2083 err = offload_pedit_fields(hdrs, parse_attr, extack);
2085 goto out_dealloc_parsed_actions;
2087 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2088 cmd_masks = &hdrs[cmd].masks;
2089 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2090 NL_SET_ERR_MSG_MOD(extack,
2091 "attempt to offload an unsupported field");
2092 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2093 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2094 16, 1, cmd_masks, sizeof(zero_masks), true);
2096 goto out_dealloc_parsed_actions;
2102 out_dealloc_parsed_actions:
2103 kfree(parse_attr->mod_hdr_actions);
2108 static bool csum_offload_supported(struct mlx5e_priv *priv,
2111 struct netlink_ext_ack *extack)
2113 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2114 TCA_CSUM_UPDATE_FLAG_UDP;
2116 /* The HW recalcs checksums only if re-writing headers */
2117 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2118 NL_SET_ERR_MSG_MOD(extack,
2119 "TC csum action is only offloaded with pedit");
2120 netdev_warn(priv->netdev,
2121 "TC csum action is only offloaded with pedit\n");
2125 if (update_flags & ~prot_flags) {
2126 NL_SET_ERR_MSG_MOD(extack,
2127 "can't offload TC csum action for some header/s");
2128 netdev_warn(priv->netdev,
2129 "can't offload TC csum action for some header/s - flags %#x\n",
2137 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2138 struct flow_action *flow_action,
2140 struct netlink_ext_ack *extack)
2142 const struct flow_action_entry *act;
2143 bool modify_ip_header;
2149 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2150 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2152 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2154 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2156 /* for non-IP we only re-write MACs, so we're okay */
2157 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2160 modify_ip_header = false;
2161 flow_action_for_each(i, act, flow_action) {
2162 if (act->id != FLOW_ACTION_MANGLE &&
2163 act->id != FLOW_ACTION_ADD)
2166 htype = act->mangle.htype;
2167 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4 ||
2168 htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2169 modify_ip_header = true;
2174 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
2175 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
2176 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
2177 NL_SET_ERR_MSG_MOD(extack,
2178 "can't offload re-write of non TCP/UDP");
2179 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2187 static bool actions_match_supported(struct mlx5e_priv *priv,
2188 struct flow_action *flow_action,
2189 struct mlx5e_tc_flow_parse_attr *parse_attr,
2190 struct mlx5e_tc_flow *flow,
2191 struct netlink_ext_ack *extack)
2195 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
2196 actions = flow->esw_attr->action;
2198 actions = flow->nic_attr->action;
2200 if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
2201 !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
2204 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2205 return modify_header_match_supported(&parse_attr->spec,
2206 flow_action, actions,
2212 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2214 struct mlx5_core_dev *fmdev, *pmdev;
2215 u64 fsystem_guid, psystem_guid;
2218 pmdev = peer_priv->mdev;
2220 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
2221 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
2223 return (fsystem_guid == psystem_guid);
2226 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
2227 struct flow_action *flow_action,
2228 struct mlx5e_tc_flow_parse_attr *parse_attr,
2229 struct mlx5e_tc_flow *flow,
2230 struct netlink_ext_ack *extack)
2232 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
2233 struct pedit_headers_action hdrs[2] = {};
2234 const struct flow_action_entry *act;
2238 if (!flow_action_has_entries(flow_action))
2241 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2243 flow_action_for_each(i, act, flow_action) {
2245 case FLOW_ACTION_DROP:
2246 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2247 if (MLX5_CAP_FLOWTABLE(priv->mdev,
2248 flow_table_properties_nic_receive.flow_counter))
2249 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2251 case FLOW_ACTION_MANGLE:
2252 case FLOW_ACTION_ADD:
2253 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
2254 parse_attr, hdrs, extack);
2258 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2259 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2261 case FLOW_ACTION_CSUM:
2262 if (csum_offload_supported(priv, action,
2268 case FLOW_ACTION_REDIRECT: {
2269 struct net_device *peer_dev = act->dev;
2271 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2272 same_hw_devs(priv, netdev_priv(peer_dev))) {
2273 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
2274 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
2275 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2276 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2278 NL_SET_ERR_MSG_MOD(extack,
2279 "device is not on same HW, can't offload");
2280 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2286 case FLOW_ACTION_MARK: {
2287 u32 mark = act->mark;
2289 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2290 NL_SET_ERR_MSG_MOD(extack,
2291 "Bad flow mark - only 16 bit is supported");
2295 attr->flow_tag = mark;
2296 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2304 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2305 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2306 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
2307 parse_attr, hdrs, extack);
2312 attr->action = action;
2313 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
2319 static inline int cmp_encap_info(struct ip_tunnel_key *a,
2320 struct ip_tunnel_key *b)
2322 return memcmp(a, b, sizeof(*a));
2325 static inline int hash_encap_info(struct ip_tunnel_key *key)
2327 return jhash(key, sizeof(*key), 0);
2331 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2332 struct net_device *peer_netdev)
2334 struct mlx5e_priv *peer_priv;
2336 peer_priv = netdev_priv(peer_netdev);
2338 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2339 (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
2340 same_hw_devs(priv, peer_priv) &&
2341 MLX5_VPORT_MANAGER(peer_priv->mdev) &&
2342 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
2347 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2348 struct ip_tunnel_info *tun_info,
2349 struct net_device *mirred_dev,
2350 struct net_device **encap_dev,
2351 struct mlx5e_tc_flow *flow,
2352 struct netlink_ext_ack *extack,
2355 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2356 unsigned short family = ip_tunnel_info_af(tun_info);
2357 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2358 struct ip_tunnel_key *key = &tun_info->key;
2359 struct mlx5e_encap_entry *e;
2364 hash_key = hash_encap_info(key);
2366 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2367 encap_hlist, hash_key) {
2368 if (!cmp_encap_info(&e->tun_info.key, key)) {
2374 /* must verify if encap is valid or not */
2378 e = kzalloc(sizeof(*e), GFP_KERNEL);
2382 e->tun_info = *tun_info;
2383 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
2387 INIT_LIST_HEAD(&e->flows);
2389 if (family == AF_INET)
2390 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
2391 else if (family == AF_INET6)
2392 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
2394 if (err && err != -EAGAIN)
2397 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2400 list_add(&flow->encaps[out_index].list, &e->flows);
2401 flow->encaps[out_index].index = out_index;
2402 *encap_dev = e->out_dev;
2403 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
2404 attr->dests[out_index].encap_id = e->encap_id;
2405 attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
2417 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
2418 const struct flow_action_entry *act,
2419 struct mlx5_esw_flow_attr *attr,
2422 u8 vlan_idx = attr->total_vlan;
2424 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
2428 case FLOW_ACTION_VLAN_POP:
2430 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2431 MLX5_FS_VLAN_DEPTH))
2434 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
2436 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2439 case FLOW_ACTION_VLAN_PUSH:
2440 attr->vlan_vid[vlan_idx] = act->vlan.vid;
2441 attr->vlan_prio[vlan_idx] = act->vlan.prio;
2442 attr->vlan_proto[vlan_idx] = act->vlan.proto;
2443 if (!attr->vlan_proto[vlan_idx])
2444 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
2447 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2448 MLX5_FS_VLAN_DEPTH))
2451 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
2453 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
2454 (act->vlan.proto != htons(ETH_P_8021Q) ||
2458 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2462 /* action is FLOW_ACT_VLAN_MANGLE */
2466 attr->total_vlan = vlan_idx + 1;
2471 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
2472 struct flow_action *flow_action,
2473 struct mlx5e_tc_flow_parse_attr *parse_attr,
2474 struct mlx5e_tc_flow *flow,
2475 struct netlink_ext_ack *extack)
2477 struct pedit_headers_action hdrs[2] = {};
2478 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2479 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2480 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2481 const struct ip_tunnel_info *info = NULL;
2482 const struct flow_action_entry *act;
2487 if (!flow_action_has_entries(flow_action))
2490 attr->in_rep = rpriv->rep;
2491 attr->in_mdev = priv->mdev;
2493 flow_action_for_each(i, act, flow_action) {
2495 case FLOW_ACTION_DROP:
2496 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2497 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2499 case FLOW_ACTION_MANGLE:
2500 case FLOW_ACTION_ADD:
2501 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
2502 parse_attr, hdrs, extack);
2506 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2507 attr->split_count = attr->out_count;
2509 case FLOW_ACTION_CSUM:
2510 if (csum_offload_supported(priv, action,
2511 act->csum_flags, extack))
2515 case FLOW_ACTION_REDIRECT:
2516 case FLOW_ACTION_MIRRED: {
2517 struct mlx5e_priv *out_priv;
2518 struct net_device *out_dev;
2522 /* out_dev is NULL when filters with
2523 * non-existing mirred device are replayed to
2529 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
2530 NL_SET_ERR_MSG_MOD(extack,
2531 "can't support more output ports, can't offload forwarding");
2532 pr_err("can't support more than %d output ports, can't offload forwarding\n",
2537 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2538 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2539 if (netdev_port_same_parent_id(priv->netdev,
2541 is_merged_eswitch_dev(priv, out_dev)) {
2542 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2543 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
2544 struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev);
2547 netif_is_lag_master(uplink_upper) &&
2548 uplink_upper == out_dev)
2549 out_dev = uplink_dev;
2551 if (!mlx5e_eswitch_rep(out_dev))
2554 out_priv = netdev_priv(out_dev);
2555 rpriv = out_priv->ppriv;
2556 attr->dests[attr->out_count].rep = rpriv->rep;
2557 attr->dests[attr->out_count].mdev = out_priv->mdev;
2560 parse_attr->mirred_ifindex[attr->out_count] =
2562 parse_attr->tun_info[attr->out_count] = *info;
2564 attr->parse_attr = parse_attr;
2565 attr->dests[attr->out_count].flags |=
2566 MLX5_ESW_DEST_ENCAP;
2568 /* attr->dests[].rep is resolved when we
2571 } else if (parse_attr->filter_dev != priv->netdev) {
2572 /* All mlx5 devices are called to configure
2573 * high level device filters. Therefore, the
2574 * *attempt* to install a filter on invalid
2575 * eswitch should not trigger an explicit error
2579 NL_SET_ERR_MSG_MOD(extack,
2580 "devices are not on same switch HW, can't offload forwarding");
2581 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2582 priv->netdev->name, out_dev->name);
2587 case FLOW_ACTION_TUNNEL_ENCAP:
2595 case FLOW_ACTION_VLAN_PUSH:
2596 case FLOW_ACTION_VLAN_POP:
2597 err = parse_tc_vlan_action(priv, act, attr, &action);
2601 attr->split_count = attr->out_count;
2603 case FLOW_ACTION_TUNNEL_DECAP:
2604 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2606 case FLOW_ACTION_GOTO: {
2607 u32 dest_chain = act->chain_index;
2608 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
2610 if (dest_chain <= attr->chain) {
2611 NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
2614 if (dest_chain > max_chain) {
2615 NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
2618 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2619 attr->dest_chain = dest_chain;
2627 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2628 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2629 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
2630 parse_attr, hdrs, extack);
2635 attr->action = action;
2636 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
2639 if (attr->dest_chain) {
2640 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
2641 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
2644 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2647 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
2648 NL_SET_ERR_MSG_MOD(extack,
2649 "current firmware doesn't support split rule for port mirroring");
2650 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
2657 static void get_flags(int flags, u16 *flow_flags)
2659 u16 __flow_flags = 0;
2661 if (flags & MLX5E_TC_INGRESS)
2662 __flow_flags |= MLX5E_TC_FLOW_INGRESS;
2663 if (flags & MLX5E_TC_EGRESS)
2664 __flow_flags |= MLX5E_TC_FLOW_EGRESS;
2666 if (flags & MLX5E_TC_ESW_OFFLOAD)
2667 __flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2668 if (flags & MLX5E_TC_NIC_OFFLOAD)
2669 __flow_flags |= MLX5E_TC_FLOW_NIC;
2671 *flow_flags = __flow_flags;
2674 static const struct rhashtable_params tc_ht_params = {
2675 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2676 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2677 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2678 .automatic_shrinking = true,
2681 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
2683 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2684 struct mlx5e_rep_priv *uplink_rpriv;
2686 if (flags & MLX5E_TC_ESW_OFFLOAD) {
2687 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2688 return &uplink_rpriv->uplink_priv.tc_ht;
2689 } else /* NIC offload */
2690 return &priv->fs.tc.ht;
2693 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
2695 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2696 bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
2697 flow->flags & MLX5E_TC_FLOW_INGRESS;
2698 bool act_is_encap = !!(attr->action &
2699 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
2700 bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
2701 MLX5_DEVCOM_ESW_OFFLOADS);
2703 return esw_paired && mlx5_lag_is_sriov(attr->in_mdev) &&
2704 (is_rep_ingress || act_is_encap);
2708 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
2709 struct tc_cls_flower_offload *f, u16 flow_flags,
2710 struct mlx5e_tc_flow_parse_attr **__parse_attr,
2711 struct mlx5e_tc_flow **__flow)
2713 struct mlx5e_tc_flow_parse_attr *parse_attr;
2714 struct mlx5e_tc_flow *flow;
2717 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2718 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2719 if (!parse_attr || !flow) {
2724 flow->cookie = f->cookie;
2725 flow->flags = flow_flags;
2729 *__parse_attr = parse_attr;
2739 static struct mlx5e_tc_flow *
2740 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2741 struct tc_cls_flower_offload *f,
2743 struct net_device *filter_dev,
2744 struct mlx5_eswitch_rep *in_rep,
2745 struct mlx5_core_dev *in_mdev)
2747 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
2748 struct netlink_ext_ack *extack = f->common.extack;
2749 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2750 struct mlx5e_tc_flow_parse_attr *parse_attr;
2751 struct mlx5e_tc_flow *flow;
2754 flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2755 attr_size = sizeof(struct mlx5_esw_flow_attr);
2756 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
2757 &parse_attr, &flow);
2760 parse_attr->filter_dev = filter_dev;
2761 flow->esw_attr->parse_attr = parse_attr;
2762 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
2767 flow->esw_attr->chain = f->common.chain_index;
2768 flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
2769 err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack);
2773 flow->esw_attr->in_rep = in_rep;
2774 flow->esw_attr->in_mdev = in_mdev;
2776 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
2777 MLX5_COUNTER_SOURCE_ESWITCH)
2778 flow->esw_attr->counter_dev = in_mdev;
2780 flow->esw_attr->counter_dev = priv->mdev;
2782 err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
2792 return ERR_PTR(err);
2795 static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
2796 struct mlx5e_tc_flow *flow)
2798 struct mlx5e_priv *priv = flow->priv, *peer_priv;
2799 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
2800 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
2801 struct mlx5e_tc_flow_parse_attr *parse_attr;
2802 struct mlx5e_rep_priv *peer_urpriv;
2803 struct mlx5e_tc_flow *peer_flow;
2804 struct mlx5_core_dev *in_mdev;
2807 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2811 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
2812 peer_priv = netdev_priv(peer_urpriv->netdev);
2814 /* in_mdev is assigned of which the packet originated from.
2815 * So packets redirected to uplink use the same mdev of the
2816 * original flow and packets redirected from uplink use the
2819 if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
2820 in_mdev = peer_priv->mdev;
2822 in_mdev = priv->mdev;
2824 parse_attr = flow->esw_attr->parse_attr;
2825 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags,
2826 parse_attr->filter_dev,
2827 flow->esw_attr->in_rep, in_mdev);
2828 if (IS_ERR(peer_flow)) {
2829 err = PTR_ERR(peer_flow);
2833 flow->peer_flow = peer_flow;
2834 flow->flags |= MLX5E_TC_FLOW_DUP;
2835 mutex_lock(&esw->offloads.peer_mutex);
2836 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
2837 mutex_unlock(&esw->offloads.peer_mutex);
2840 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2845 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2846 struct tc_cls_flower_offload *f,
2848 struct net_device *filter_dev,
2849 struct mlx5e_tc_flow **__flow)
2851 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2852 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
2853 struct mlx5_core_dev *in_mdev = priv->mdev;
2854 struct mlx5e_tc_flow *flow;
2857 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
2860 return PTR_ERR(flow);
2862 if (is_peer_flow_needed(flow)) {
2863 err = mlx5e_tc_add_fdb_peer_flow(f, flow);
2865 mlx5e_tc_del_fdb_flow(priv, flow);
2879 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
2880 struct tc_cls_flower_offload *f,
2882 struct net_device *filter_dev,
2883 struct mlx5e_tc_flow **__flow)
2885 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
2886 struct netlink_ext_ack *extack = f->common.extack;
2887 struct mlx5e_tc_flow_parse_attr *parse_attr;
2888 struct mlx5e_tc_flow *flow;
2891 /* multi-chain not supported for NIC rules */
2892 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
2895 flow_flags |= MLX5E_TC_FLOW_NIC;
2896 attr_size = sizeof(struct mlx5_nic_flow_attr);
2897 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
2898 &parse_attr, &flow);
2902 parse_attr->filter_dev = filter_dev;
2903 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
2908 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
2912 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
2916 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2930 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
2931 struct tc_cls_flower_offload *f,
2933 struct net_device *filter_dev,
2934 struct mlx5e_tc_flow **flow)
2936 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2940 get_flags(flags, &flow_flags);
2942 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
2945 if (esw && esw->mode == SRIOV_OFFLOADS)
2946 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
2949 err = mlx5e_add_nic_flow(priv, f, flow_flags,
2955 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
2956 struct tc_cls_flower_offload *f, int flags)
2958 struct netlink_ext_ack *extack = f->common.extack;
2959 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
2960 struct mlx5e_tc_flow *flow;
2963 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2965 NL_SET_ERR_MSG_MOD(extack,
2966 "flow cookie already exists, ignoring");
2967 netdev_warn_once(priv->netdev,
2968 "flow cookie %lx already exists, ignoring\n",
2973 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
2977 err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
2984 mlx5e_tc_del_flow(priv, flow);
2990 #define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
2991 #define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
2993 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
2995 if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
3001 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
3002 struct tc_cls_flower_offload *f, int flags)
3004 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3005 struct mlx5e_tc_flow *flow;
3007 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3008 if (!flow || !same_flow_direction(flow, flags))
3011 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
3013 mlx5e_tc_del_flow(priv, flow);
3020 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
3021 struct tc_cls_flower_offload *f, int flags)
3023 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
3024 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3025 struct mlx5_eswitch *peer_esw;
3026 struct mlx5e_tc_flow *flow;
3027 struct mlx5_fc *counter;
3032 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3033 if (!flow || !same_flow_direction(flow, flags))
3036 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
3039 counter = mlx5e_tc_get_counter(flow);
3043 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3045 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3049 if ((flow->flags & MLX5E_TC_FLOW_DUP) &&
3050 (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) {
3055 counter = mlx5e_tc_get_counter(flow->peer_flow);
3056 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
3059 packets += packets2;
3060 lastuse = max_t(u64, lastuse, lastuse2);
3063 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3066 flow_stats_update(&f->stats, bytes, packets, lastuse);
3071 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
3072 struct mlx5e_priv *peer_priv)
3074 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
3075 struct mlx5e_hairpin_entry *hpe;
3079 if (!same_hw_devs(priv, peer_priv))
3082 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
3084 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
3085 if (hpe->peer_vhca_id == peer_vhca_id)
3086 hpe->hp->pair->peer_gone = true;
3090 static int mlx5e_tc_netdev_event(struct notifier_block *this,
3091 unsigned long event, void *ptr)
3093 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3094 struct mlx5e_flow_steering *fs;
3095 struct mlx5e_priv *peer_priv;
3096 struct mlx5e_tc_table *tc;
3097 struct mlx5e_priv *priv;
3099 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
3100 event != NETDEV_UNREGISTER ||
3101 ndev->reg_state == NETREG_REGISTERED)
3104 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
3105 fs = container_of(tc, struct mlx5e_flow_steering, tc);
3106 priv = container_of(fs, struct mlx5e_priv, fs);
3107 peer_priv = netdev_priv(ndev);
3108 if (priv == peer_priv ||
3109 !(priv->netdev->features & NETIF_F_HW_TC))
3112 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
3117 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
3119 struct mlx5e_tc_table *tc = &priv->fs.tc;
3122 hash_init(tc->mod_hdr_tbl);
3123 hash_init(tc->hairpin_tbl);
3125 err = rhashtable_init(&tc->ht, &tc_ht_params);
3129 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
3130 if (register_netdevice_notifier(&tc->netdevice_nb)) {
3131 tc->netdevice_nb.notifier_call = NULL;
3132 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
3138 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
3140 struct mlx5e_tc_flow *flow = ptr;
3141 struct mlx5e_priv *priv = flow->priv;
3143 mlx5e_tc_del_flow(priv, flow);
3147 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
3149 struct mlx5e_tc_table *tc = &priv->fs.tc;
3151 if (tc->netdevice_nb.notifier_call)
3152 unregister_netdevice_notifier(&tc->netdevice_nb);
3154 rhashtable_destroy(&tc->ht);
3156 if (!IS_ERR_OR_NULL(tc->t)) {
3157 mlx5_destroy_flow_table(tc->t);
3162 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
3164 return rhashtable_init(tc_ht, &tc_ht_params);
3167 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
3169 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
3172 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags)
3174 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3176 return atomic_read(&tc_ht->nelems);
3179 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
3181 struct mlx5e_tc_flow *flow, *tmp;
3183 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
3184 __mlx5e_tc_del_fdb_peer_flow(flow);