2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
53 #include "lib/vxlan.h"
57 struct mlx5_nic_flow_attr {
63 struct mlx5_flow_table *hairpin_ft;
66 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
69 MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
70 MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
71 MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
72 MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
73 MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
74 MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
75 MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
78 #define MLX5E_TC_MAX_SPLITS 1
80 struct mlx5e_tc_flow {
81 struct rhash_head node;
82 struct mlx5e_priv *priv;
85 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
86 struct list_head encap; /* flows sharing the same encap ID */
87 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
88 struct list_head hairpin; /* flows sharing the same hairpin */
90 struct mlx5_esw_flow_attr esw_attr[0];
91 struct mlx5_nic_flow_attr nic_attr[0];
95 struct mlx5e_tc_flow_parse_attr {
96 struct ip_tunnel_info tun_info;
97 struct mlx5_flow_spec spec;
98 int num_mod_hdr_actions;
99 void *mod_hdr_actions;
103 #define MLX5E_TC_TABLE_NUM_GROUPS 4
104 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
106 struct mlx5e_hairpin {
107 struct mlx5_hairpin *pair;
109 struct mlx5_core_dev *func_mdev;
110 struct mlx5e_priv *func_priv;
115 struct mlx5e_rqt indir_rqt;
116 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
117 struct mlx5e_ttc_table ttc;
120 struct mlx5e_hairpin_entry {
121 /* a node of a hash table which keeps all the hairpin entries */
122 struct hlist_node hairpin_hlist;
124 /* flows sharing the same hairpin */
125 struct list_head flows;
129 struct mlx5e_hairpin *hp;
137 struct mlx5e_mod_hdr_entry {
138 /* a node of a hash table which keeps all the mod_hdr entries */
139 struct hlist_node mod_hdr_hlist;
141 /* flows sharing the same mod_hdr entry */
142 struct list_head flows;
144 struct mod_hdr_key key;
149 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
151 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
153 return jhash(key->actions,
154 key->num_actions * MLX5_MH_ACT_SZ, 0);
157 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
158 struct mod_hdr_key *b)
160 if (a->num_actions != b->num_actions)
163 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
166 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
167 struct mlx5e_tc_flow *flow,
168 struct mlx5e_tc_flow_parse_attr *parse_attr)
170 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
171 int num_actions, actions_size, namespace, err;
172 struct mlx5e_mod_hdr_entry *mh;
173 struct mod_hdr_key key;
177 num_actions = parse_attr->num_mod_hdr_actions;
178 actions_size = MLX5_MH_ACT_SZ * num_actions;
180 key.actions = parse_attr->mod_hdr_actions;
181 key.num_actions = num_actions;
183 hash_key = hash_mod_hdr_info(&key);
185 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
186 namespace = MLX5_FLOW_NAMESPACE_FDB;
187 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
188 mod_hdr_hlist, hash_key) {
189 if (!cmp_mod_hdr_info(&mh->key, &key)) {
195 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
196 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
197 mod_hdr_hlist, hash_key) {
198 if (!cmp_mod_hdr_info(&mh->key, &key)) {
208 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
212 mh->key.actions = (void *)mh + sizeof(*mh);
213 memcpy(mh->key.actions, key.actions, actions_size);
214 mh->key.num_actions = num_actions;
215 INIT_LIST_HEAD(&mh->flows);
217 err = mlx5_modify_header_alloc(priv->mdev, namespace,
224 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
225 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
227 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
230 list_add(&flow->mod_hdr, &mh->flows);
231 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
232 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
234 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
243 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
244 struct mlx5e_tc_flow *flow)
246 struct list_head *next = flow->mod_hdr.next;
248 list_del(&flow->mod_hdr);
250 if (list_empty(next)) {
251 struct mlx5e_mod_hdr_entry *mh;
253 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
255 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
256 hash_del(&mh->mod_hdr_hlist);
262 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
264 struct net_device *netdev;
265 struct mlx5e_priv *priv;
267 netdev = __dev_get_by_index(net, ifindex);
268 priv = netdev_priv(netdev);
272 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
274 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
278 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
282 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
284 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
285 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
286 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
288 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
295 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
300 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
302 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
303 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
306 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
308 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
309 struct mlx5e_priv *priv = hp->func_priv;
310 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
312 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
315 for (i = 0; i < sz; i++) {
317 if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
318 ix = mlx5e_bits_invert(i, ilog2(sz));
319 ix = indirection_rqt[ix];
320 rqn = hp->pair->rqn[ix];
321 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
325 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
327 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
328 struct mlx5e_priv *priv = hp->func_priv;
329 struct mlx5_core_dev *mdev = priv->mdev;
333 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
334 in = kvzalloc(inlen, GFP_KERNEL);
338 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
340 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
341 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
343 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
345 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
347 hp->indir_rqt.enabled = true;
353 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
355 struct mlx5e_priv *priv = hp->func_priv;
356 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
360 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
361 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
362 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
364 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
365 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
366 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
367 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
369 err = mlx5_core_create_tir(hp->func_mdev, in,
370 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
372 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
373 goto err_destroy_tirs;
379 for (i = 0; i < tt; i++)
380 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
384 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
388 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
389 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
392 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
393 struct ttc_params *ttc_params)
395 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
398 memset(ttc_params, 0, sizeof(*ttc_params));
400 ttc_params->any_tt_tirn = hp->tirn;
402 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
403 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
405 ft_attr->max_fte = MLX5E_NUM_TT;
406 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
407 ft_attr->prio = MLX5E_TC_PRIO;
410 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
412 struct mlx5e_priv *priv = hp->func_priv;
413 struct ttc_params ttc_params;
416 err = mlx5e_hairpin_create_indirect_rqt(hp);
420 err = mlx5e_hairpin_create_indirect_tirs(hp);
422 goto err_create_indirect_tirs;
424 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
425 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
427 goto err_create_ttc_table;
429 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
430 hp->num_channels, hp->ttc.ft.t->id);
434 err_create_ttc_table:
435 mlx5e_hairpin_destroy_indirect_tirs(hp);
436 err_create_indirect_tirs:
437 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
442 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
444 struct mlx5e_priv *priv = hp->func_priv;
446 mlx5e_destroy_ttc_table(priv, &hp->ttc);
447 mlx5e_hairpin_destroy_indirect_tirs(hp);
448 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
451 static struct mlx5e_hairpin *
452 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
455 struct mlx5_core_dev *func_mdev, *peer_mdev;
456 struct mlx5e_hairpin *hp;
457 struct mlx5_hairpin *pair;
460 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
462 return ERR_PTR(-ENOMEM);
464 func_mdev = priv->mdev;
465 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
467 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
470 goto create_pair_err;
473 hp->func_mdev = func_mdev;
474 hp->func_priv = priv;
475 hp->num_channels = params->num_channels;
477 err = mlx5e_hairpin_create_transport(hp);
479 goto create_transport_err;
481 if (hp->num_channels > 1) {
482 err = mlx5e_hairpin_rss_init(hp);
490 mlx5e_hairpin_destroy_transport(hp);
491 create_transport_err:
492 mlx5_core_hairpin_destroy(hp->pair);
498 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
500 if (hp->num_channels > 1)
501 mlx5e_hairpin_rss_cleanup(hp);
502 mlx5e_hairpin_destroy_transport(hp);
503 mlx5_core_hairpin_destroy(hp->pair);
507 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
509 return (peer_vhca_id << 16 | prio);
512 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
513 u16 peer_vhca_id, u8 prio)
515 struct mlx5e_hairpin_entry *hpe;
516 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
518 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
519 hairpin_hlist, hash_key) {
520 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
527 #define UNKNOWN_MATCH_PRIO 8
529 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
530 struct mlx5_flow_spec *spec, u8 *match_prio,
531 struct netlink_ext_ack *extack)
533 void *headers_c, *headers_v;
534 u8 prio_val, prio_mask = 0;
537 #ifdef CONFIG_MLX5_CORE_EN_DCB
538 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
539 NL_SET_ERR_MSG_MOD(extack,
540 "only PCP trust state supported for hairpin");
544 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
545 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
547 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
549 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
550 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
553 if (!vlan_present || !prio_mask) {
554 prio_val = UNKNOWN_MATCH_PRIO;
555 } else if (prio_mask != 0x7) {
556 NL_SET_ERR_MSG_MOD(extack,
557 "masked priority match not supported for hairpin");
561 *match_prio = prio_val;
565 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
566 struct mlx5e_tc_flow *flow,
567 struct mlx5e_tc_flow_parse_attr *parse_attr,
568 struct netlink_ext_ack *extack)
570 int peer_ifindex = parse_attr->mirred_ifindex;
571 struct mlx5_hairpin_params params;
572 struct mlx5_core_dev *peer_mdev;
573 struct mlx5e_hairpin_entry *hpe;
574 struct mlx5e_hairpin *hp;
581 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
582 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
583 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
587 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
588 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
592 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
596 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
600 INIT_LIST_HEAD(&hpe->flows);
601 hpe->peer_vhca_id = peer_id;
602 hpe->prio = match_prio;
604 params.log_data_size = 15;
605 params.log_data_size = min_t(u8, params.log_data_size,
606 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
607 params.log_data_size = max_t(u8, params.log_data_size,
608 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
610 params.log_num_packets = params.log_data_size -
611 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
612 params.log_num_packets = min_t(u8, params.log_num_packets,
613 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
615 params.q_counter = priv->q_counter;
616 /* set hairpin pair per each 50Gbs share of the link */
617 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
618 link_speed = max_t(u32, link_speed, 50000);
619 link_speed64 = link_speed;
620 do_div(link_speed64, 50000);
621 params.num_channels = link_speed64;
623 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
626 goto create_hairpin_err;
629 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
630 hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
631 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
634 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
635 hash_hairpin_info(peer_id, match_prio));
638 if (hpe->hp->num_channels > 1) {
639 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
640 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
642 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
644 list_add(&flow->hairpin, &hpe->flows);
653 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
654 struct mlx5e_tc_flow *flow)
656 struct list_head *next = flow->hairpin.next;
658 list_del(&flow->hairpin);
660 /* no more hairpin flows for us, release the hairpin pair */
661 if (list_empty(next)) {
662 struct mlx5e_hairpin_entry *hpe;
664 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
666 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
667 hpe->hp->pair->peer_mdev->priv.name);
669 mlx5e_hairpin_destroy(hpe->hp);
670 hash_del(&hpe->hairpin_hlist);
675 static struct mlx5_flow_handle *
676 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
677 struct mlx5e_tc_flow_parse_attr *parse_attr,
678 struct mlx5e_tc_flow *flow,
679 struct netlink_ext_ack *extack)
681 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
682 struct mlx5_core_dev *dev = priv->mdev;
683 struct mlx5_flow_destination dest[2] = {};
684 struct mlx5_flow_act flow_act = {
685 .action = attr->action,
686 .has_flow_tag = true,
687 .flow_tag = attr->flow_tag,
690 struct mlx5_fc *counter = NULL;
691 struct mlx5_flow_handle *rule;
692 bool table_created = false;
693 int err, dest_ix = 0;
695 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
696 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
699 goto err_add_hairpin_flow;
701 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
702 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
703 dest[dest_ix].ft = attr->hairpin_ft;
705 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
706 dest[dest_ix].tir_num = attr->hairpin_tirn;
709 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
710 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
711 dest[dest_ix].ft = priv->fs.vlan.ft.t;
715 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
716 counter = mlx5_fc_create(dev, true);
717 if (IS_ERR(counter)) {
718 rule = ERR_CAST(counter);
721 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
722 dest[dest_ix].counter = counter;
726 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
727 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
728 flow_act.modify_id = attr->mod_hdr_id;
729 kfree(parse_attr->mod_hdr_actions);
732 goto err_create_mod_hdr_id;
736 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
737 int tc_grp_size, tc_tbl_size;
738 u32 max_flow_counter;
740 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
741 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
743 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
745 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
746 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
749 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
752 MLX5E_TC_TABLE_NUM_GROUPS,
753 MLX5E_TC_FT_LEVEL, 0);
754 if (IS_ERR(priv->fs.tc.t)) {
755 NL_SET_ERR_MSG_MOD(extack,
756 "Failed to create tc offload table\n");
757 netdev_err(priv->netdev,
758 "Failed to create tc offload table\n");
759 rule = ERR_CAST(priv->fs.tc.t);
763 table_created = true;
766 if (attr->match_level != MLX5_MATCH_NONE)
767 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
769 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
770 &flow_act, dest, dest_ix);
779 mlx5_destroy_flow_table(priv->fs.tc.t);
780 priv->fs.tc.t = NULL;
783 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
784 mlx5e_detach_mod_hdr(priv, flow);
785 err_create_mod_hdr_id:
786 mlx5_fc_destroy(dev, counter);
788 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
789 mlx5e_hairpin_flow_del(priv, flow);
790 err_add_hairpin_flow:
794 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
795 struct mlx5e_tc_flow *flow)
797 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
798 struct mlx5_fc *counter = NULL;
800 counter = mlx5_flow_rule_counter(flow->rule[0]);
801 mlx5_del_flow_rules(flow->rule[0]);
802 mlx5_fc_destroy(priv->mdev, counter);
804 if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
805 mlx5_destroy_flow_table(priv->fs.tc.t);
806 priv->fs.tc.t = NULL;
809 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
810 mlx5e_detach_mod_hdr(priv, flow);
812 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
813 mlx5e_hairpin_flow_del(priv, flow);
816 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
817 struct mlx5e_tc_flow *flow);
819 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
820 struct ip_tunnel_info *tun_info,
821 struct net_device *mirred_dev,
822 struct net_device **encap_dev,
823 struct mlx5e_tc_flow *flow,
824 struct netlink_ext_ack *extack);
826 static struct mlx5_flow_handle *
827 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
828 struct mlx5e_tc_flow_parse_attr *parse_attr,
829 struct mlx5e_tc_flow *flow,
830 struct netlink_ext_ack *extack)
832 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
833 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
834 struct net_device *out_dev, *encap_dev = NULL;
835 struct mlx5_flow_handle *rule = NULL;
836 struct mlx5e_rep_priv *rpriv;
837 struct mlx5e_priv *out_priv;
840 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
841 out_dev = __dev_get_by_index(dev_net(priv->netdev),
842 attr->parse_attr->mirred_ifindex);
843 err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
844 out_dev, &encap_dev, flow, extack);
848 goto err_attach_encap;
850 out_priv = netdev_priv(encap_dev);
851 rpriv = out_priv->ppriv;
852 attr->out_rep[attr->out_count] = rpriv->rep;
853 attr->out_mdev[attr->out_count++] = out_priv->mdev;
856 err = mlx5_eswitch_add_vlan_action(esw, attr);
862 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
863 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
864 kfree(parse_attr->mod_hdr_actions);
871 /* we get here if (1) there's no error (rule being null) or when
872 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
874 if (rule != ERR_PTR(-EAGAIN)) {
875 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
879 if (attr->mirror_count) {
880 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
881 if (IS_ERR(flow->rule[1]))
888 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
889 rule = flow->rule[1];
891 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
892 mlx5e_detach_mod_hdr(priv, flow);
894 mlx5_eswitch_del_vlan_action(esw, attr);
896 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
897 mlx5e_detach_encap(priv, flow);
902 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
903 struct mlx5e_tc_flow *flow)
905 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
906 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
908 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
909 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
910 if (attr->mirror_count)
911 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
912 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
915 mlx5_eswitch_del_vlan_action(esw, attr);
917 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
918 mlx5e_detach_encap(priv, flow);
919 kvfree(attr->parse_attr);
922 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
923 mlx5e_detach_mod_hdr(priv, flow);
926 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
927 struct mlx5e_encap_entry *e)
929 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
930 struct mlx5_esw_flow_attr *esw_attr;
931 struct mlx5e_tc_flow *flow;
934 err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
935 e->encap_size, e->encap_header,
936 MLX5_FLOW_NAMESPACE_FDB,
939 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
943 e->flags |= MLX5_ENCAP_ENTRY_VALID;
944 mlx5e_rep_queue_neigh_stats_work(priv);
946 list_for_each_entry(flow, &e->flows, encap) {
947 esw_attr = flow->esw_attr;
948 esw_attr->encap_id = e->encap_id;
949 flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
950 if (IS_ERR(flow->rule[0])) {
951 err = PTR_ERR(flow->rule[0]);
952 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
957 if (esw_attr->mirror_count) {
958 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
959 if (IS_ERR(flow->rule[1])) {
960 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr);
961 err = PTR_ERR(flow->rule[1]);
962 mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n",
968 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
972 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
973 struct mlx5e_encap_entry *e)
975 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
976 struct mlx5e_tc_flow *flow;
978 list_for_each_entry(flow, &e->flows, encap) {
979 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
980 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
982 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
983 if (attr->mirror_count)
984 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
985 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
989 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
990 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
991 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
995 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
997 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
998 u64 bytes, packets, lastuse = 0;
999 struct mlx5e_tc_flow *flow;
1000 struct mlx5e_encap_entry *e;
1001 struct mlx5_fc *counter;
1002 struct neigh_table *tbl;
1003 bool neigh_used = false;
1004 struct neighbour *n;
1006 if (m_neigh->family == AF_INET)
1008 #if IS_ENABLED(CONFIG_IPV6)
1009 else if (m_neigh->family == AF_INET6)
1015 list_for_each_entry(e, &nhe->encap_list, encap_list) {
1016 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
1018 list_for_each_entry(flow, &e->flows, encap) {
1019 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1020 counter = mlx5_flow_rule_counter(flow->rule[0]);
1021 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1022 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1033 nhe->reported_lastuse = jiffies;
1035 /* find the relevant neigh according to the cached device and
1038 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1042 neigh_event_send(n, NULL);
1047 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1048 struct mlx5e_tc_flow *flow)
1050 struct list_head *next = flow->encap.next;
1052 list_del(&flow->encap);
1053 if (list_empty(next)) {
1054 struct mlx5e_encap_entry *e;
1056 e = list_entry(next, struct mlx5e_encap_entry, flows);
1057 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1059 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1060 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1062 hash_del_rcu(&e->encap_hlist);
1063 kfree(e->encap_header);
1068 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1069 struct mlx5e_tc_flow *flow)
1071 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1072 mlx5e_tc_del_fdb_flow(priv, flow);
1074 mlx5e_tc_del_nic_flow(priv, flow);
1077 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
1078 struct tc_cls_flower_offload *f)
1080 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1082 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1084 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1086 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1089 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
1090 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1092 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1093 struct flow_dissector_key_keyid *key =
1094 skb_flow_dissector_target(f->dissector,
1095 FLOW_DISSECTOR_KEY_ENC_KEYID,
1097 struct flow_dissector_key_keyid *mask =
1098 skb_flow_dissector_target(f->dissector,
1099 FLOW_DISSECTOR_KEY_ENC_KEYID,
1101 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
1102 be32_to_cpu(mask->keyid));
1103 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
1104 be32_to_cpu(key->keyid));
1108 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1109 struct mlx5_flow_spec *spec,
1110 struct tc_cls_flower_offload *f)
1112 struct netlink_ext_ack *extack = f->common.extack;
1113 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1115 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1118 struct flow_dissector_key_control *enc_control =
1119 skb_flow_dissector_target(f->dissector,
1120 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1123 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
1124 struct flow_dissector_key_ports *key =
1125 skb_flow_dissector_target(f->dissector,
1126 FLOW_DISSECTOR_KEY_ENC_PORTS,
1128 struct flow_dissector_key_ports *mask =
1129 skb_flow_dissector_target(f->dissector,
1130 FLOW_DISSECTOR_KEY_ENC_PORTS,
1133 /* Full udp dst port must be given */
1134 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
1135 goto vxlan_match_offload_err;
1137 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) &&
1138 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
1139 parse_vxlan_attr(spec, f);
1141 NL_SET_ERR_MSG_MOD(extack,
1142 "port isn't an offloaded vxlan udp dport");
1143 netdev_warn(priv->netdev,
1144 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
1148 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1149 udp_dport, ntohs(mask->dst));
1150 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1151 udp_dport, ntohs(key->dst));
1153 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1154 udp_sport, ntohs(mask->src));
1155 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1156 udp_sport, ntohs(key->src));
1157 } else { /* udp dst port must be given */
1158 vxlan_match_offload_err:
1159 NL_SET_ERR_MSG_MOD(extack,
1160 "IP tunnel decap offload supported only for vxlan, must set UDP dport");
1161 netdev_warn(priv->netdev,
1162 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
1166 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1167 struct flow_dissector_key_ipv4_addrs *key =
1168 skb_flow_dissector_target(f->dissector,
1169 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1171 struct flow_dissector_key_ipv4_addrs *mask =
1172 skb_flow_dissector_target(f->dissector,
1173 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1175 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1176 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1178 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1179 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1182 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1183 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1185 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1186 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1189 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1190 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
1191 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1192 struct flow_dissector_key_ipv6_addrs *key =
1193 skb_flow_dissector_target(f->dissector,
1194 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1196 struct flow_dissector_key_ipv6_addrs *mask =
1197 skb_flow_dissector_target(f->dissector,
1198 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1201 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1202 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1203 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1204 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1205 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1206 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1208 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1209 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1210 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1211 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1212 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1213 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1215 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1216 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
1219 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IP)) {
1220 struct flow_dissector_key_ip *key =
1221 skb_flow_dissector_target(f->dissector,
1222 FLOW_DISSECTOR_KEY_ENC_IP,
1224 struct flow_dissector_key_ip *mask =
1225 skb_flow_dissector_target(f->dissector,
1226 FLOW_DISSECTOR_KEY_ENC_IP,
1229 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1230 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1232 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1233 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1235 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1236 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1239 !MLX5_CAP_ESW_FLOWTABLE_FDB
1241 ft_field_support.outer_ipv4_ttl)) {
1242 NL_SET_ERR_MSG_MOD(extack,
1243 "Matching on TTL is not supported");
1249 /* Enforce DMAC when offloading incoming tunneled flows.
1250 * Flow counters require a match on the DMAC.
1252 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1253 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1254 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1255 dmac_47_16), priv->netdev->dev_addr);
1257 /* let software handle IP fragments */
1258 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1259 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1264 static int __parse_cls_flower(struct mlx5e_priv *priv,
1265 struct mlx5_flow_spec *spec,
1266 struct tc_cls_flower_offload *f,
1269 struct netlink_ext_ack *extack = f->common.extack;
1270 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1272 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1274 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1276 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1281 *match_level = MLX5_MATCH_NONE;
1283 if (f->dissector->used_keys &
1284 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1285 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1286 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1287 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1288 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1289 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1290 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1291 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1292 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1293 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1294 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1295 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1296 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1297 BIT(FLOW_DISSECTOR_KEY_TCP) |
1298 BIT(FLOW_DISSECTOR_KEY_IP) |
1299 BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
1300 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
1301 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1302 f->dissector->used_keys);
1306 if ((dissector_uses_key(f->dissector,
1307 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1308 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1309 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1310 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1311 struct flow_dissector_key_control *key =
1312 skb_flow_dissector_target(f->dissector,
1313 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1315 switch (key->addr_type) {
1316 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1317 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1318 if (parse_tunnel_attr(priv, spec, f))
1325 /* In decap flow, header pointers should point to the inner
1326 * headers, outer header were already set by parse_tunnel_attr
1328 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1330 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1334 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1335 struct flow_dissector_key_eth_addrs *key =
1336 skb_flow_dissector_target(f->dissector,
1337 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1339 struct flow_dissector_key_eth_addrs *mask =
1340 skb_flow_dissector_target(f->dissector,
1341 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1344 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1347 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1351 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1354 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1358 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
1359 *match_level = MLX5_MATCH_L2;
1362 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
1363 struct flow_dissector_key_vlan *key =
1364 skb_flow_dissector_target(f->dissector,
1365 FLOW_DISSECTOR_KEY_VLAN,
1367 struct flow_dissector_key_vlan *mask =
1368 skb_flow_dissector_target(f->dissector,
1369 FLOW_DISSECTOR_KEY_VLAN,
1371 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1372 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1373 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1375 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1378 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1380 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1384 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
1385 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
1387 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
1388 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
1390 *match_level = MLX5_MATCH_L2;
1393 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1394 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1397 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
1398 struct flow_dissector_key_vlan *key =
1399 skb_flow_dissector_target(f->dissector,
1400 FLOW_DISSECTOR_KEY_CVLAN,
1402 struct flow_dissector_key_vlan *mask =
1403 skb_flow_dissector_target(f->dissector,
1404 FLOW_DISSECTOR_KEY_CVLAN,
1406 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1407 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1408 MLX5_SET(fte_match_set_misc, misc_c,
1409 outer_second_svlan_tag, 1);
1410 MLX5_SET(fte_match_set_misc, misc_v,
1411 outer_second_svlan_tag, 1);
1413 MLX5_SET(fte_match_set_misc, misc_c,
1414 outer_second_cvlan_tag, 1);
1415 MLX5_SET(fte_match_set_misc, misc_v,
1416 outer_second_cvlan_tag, 1);
1419 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1421 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1423 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1424 mask->vlan_priority);
1425 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1426 key->vlan_priority);
1428 *match_level = MLX5_MATCH_L2;
1432 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1433 struct flow_dissector_key_basic *key =
1434 skb_flow_dissector_target(f->dissector,
1435 FLOW_DISSECTOR_KEY_BASIC,
1437 struct flow_dissector_key_basic *mask =
1438 skb_flow_dissector_target(f->dissector,
1439 FLOW_DISSECTOR_KEY_BASIC,
1441 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1442 ntohs(mask->n_proto));
1443 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1444 ntohs(key->n_proto));
1447 *match_level = MLX5_MATCH_L2;
1450 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
1451 struct flow_dissector_key_control *key =
1452 skb_flow_dissector_target(f->dissector,
1453 FLOW_DISSECTOR_KEY_CONTROL,
1456 struct flow_dissector_key_control *mask =
1457 skb_flow_dissector_target(f->dissector,
1458 FLOW_DISSECTOR_KEY_CONTROL,
1460 addr_type = key->addr_type;
1462 /* the HW doesn't support frag first/later */
1463 if (mask->flags & FLOW_DIS_FIRST_FRAG)
1466 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
1467 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1468 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1469 key->flags & FLOW_DIS_IS_FRAGMENT);
1471 /* the HW doesn't need L3 inline to match on frag=no */
1472 if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
1473 *match_level = MLX5_INLINE_MODE_L2;
1474 /* *** L2 attributes parsing up to here *** */
1476 *match_level = MLX5_INLINE_MODE_IP;
1480 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1481 struct flow_dissector_key_basic *key =
1482 skb_flow_dissector_target(f->dissector,
1483 FLOW_DISSECTOR_KEY_BASIC,
1485 struct flow_dissector_key_basic *mask =
1486 skb_flow_dissector_target(f->dissector,
1487 FLOW_DISSECTOR_KEY_BASIC,
1489 ip_proto = key->ip_proto;
1491 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1493 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1497 *match_level = MLX5_MATCH_L3;
1500 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1501 struct flow_dissector_key_ipv4_addrs *key =
1502 skb_flow_dissector_target(f->dissector,
1503 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1505 struct flow_dissector_key_ipv4_addrs *mask =
1506 skb_flow_dissector_target(f->dissector,
1507 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1510 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1511 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1512 &mask->src, sizeof(mask->src));
1513 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1514 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1515 &key->src, sizeof(key->src));
1516 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1517 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1518 &mask->dst, sizeof(mask->dst));
1519 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1520 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1521 &key->dst, sizeof(key->dst));
1523 if (mask->src || mask->dst)
1524 *match_level = MLX5_MATCH_L3;
1527 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1528 struct flow_dissector_key_ipv6_addrs *key =
1529 skb_flow_dissector_target(f->dissector,
1530 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1532 struct flow_dissector_key_ipv6_addrs *mask =
1533 skb_flow_dissector_target(f->dissector,
1534 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1537 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1538 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1539 &mask->src, sizeof(mask->src));
1540 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1541 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1542 &key->src, sizeof(key->src));
1544 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1545 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1546 &mask->dst, sizeof(mask->dst));
1547 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1548 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1549 &key->dst, sizeof(key->dst));
1551 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
1552 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
1553 *match_level = MLX5_MATCH_L3;
1556 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
1557 struct flow_dissector_key_ip *key =
1558 skb_flow_dissector_target(f->dissector,
1559 FLOW_DISSECTOR_KEY_IP,
1561 struct flow_dissector_key_ip *mask =
1562 skb_flow_dissector_target(f->dissector,
1563 FLOW_DISSECTOR_KEY_IP,
1566 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1567 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1569 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1570 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1572 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1573 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1576 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1577 ft_field_support.outer_ipv4_ttl)) {
1578 NL_SET_ERR_MSG_MOD(extack,
1579 "Matching on TTL is not supported");
1583 if (mask->tos || mask->ttl)
1584 *match_level = MLX5_MATCH_L3;
1587 /* *** L3 attributes parsing up to here *** */
1589 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
1590 struct flow_dissector_key_ports *key =
1591 skb_flow_dissector_target(f->dissector,
1592 FLOW_DISSECTOR_KEY_PORTS,
1594 struct flow_dissector_key_ports *mask =
1595 skb_flow_dissector_target(f->dissector,
1596 FLOW_DISSECTOR_KEY_PORTS,
1600 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1601 tcp_sport, ntohs(mask->src));
1602 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1603 tcp_sport, ntohs(key->src));
1605 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1606 tcp_dport, ntohs(mask->dst));
1607 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1608 tcp_dport, ntohs(key->dst));
1612 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1613 udp_sport, ntohs(mask->src));
1614 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1615 udp_sport, ntohs(key->src));
1617 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1618 udp_dport, ntohs(mask->dst));
1619 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1620 udp_dport, ntohs(key->dst));
1623 NL_SET_ERR_MSG_MOD(extack,
1624 "Only UDP and TCP transports are supported for L4 matching");
1625 netdev_err(priv->netdev,
1626 "Only UDP and TCP transport are supported\n");
1630 if (mask->src || mask->dst)
1631 *match_level = MLX5_MATCH_L4;
1634 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1635 struct flow_dissector_key_tcp *key =
1636 skb_flow_dissector_target(f->dissector,
1637 FLOW_DISSECTOR_KEY_TCP,
1639 struct flow_dissector_key_tcp *mask =
1640 skb_flow_dissector_target(f->dissector,
1641 FLOW_DISSECTOR_KEY_TCP,
1644 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1645 ntohs(mask->flags));
1646 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1650 *match_level = MLX5_MATCH_L4;
1656 static int parse_cls_flower(struct mlx5e_priv *priv,
1657 struct mlx5e_tc_flow *flow,
1658 struct mlx5_flow_spec *spec,
1659 struct tc_cls_flower_offload *f)
1661 struct netlink_ext_ack *extack = f->common.extack;
1662 struct mlx5_core_dev *dev = priv->mdev;
1663 struct mlx5_eswitch *esw = dev->priv.eswitch;
1664 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1665 struct mlx5_eswitch_rep *rep;
1669 err = __parse_cls_flower(priv, spec, f, &match_level);
1671 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1673 if (rep->vport != FDB_UPLINK_VPORT &&
1674 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1675 esw->offloads.inline_mode < match_level)) {
1676 NL_SET_ERR_MSG_MOD(extack,
1677 "Flow is not offloaded due to min inline setting");
1678 netdev_warn(priv->netdev,
1679 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1680 match_level, esw->offloads.inline_mode);
1685 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1686 flow->esw_attr->match_level = match_level;
1688 flow->nic_attr->match_level = match_level;
1693 struct pedit_headers {
1701 static int pedit_header_offsets[] = {
1702 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1703 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1704 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1705 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1706 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1709 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1711 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1712 struct pedit_headers *masks,
1713 struct pedit_headers *vals)
1715 u32 *curr_pmask, *curr_pval;
1717 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1720 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1721 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
1723 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1726 *curr_pmask |= mask;
1727 *curr_pval |= (val & mask);
1735 struct mlx5_fields {
1741 #define OFFLOAD(fw_field, size, field, off) \
1742 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1744 static struct mlx5_fields fields[] = {
1745 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1746 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1747 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1748 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1749 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1751 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1752 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1753 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1755 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1756 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1757 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1758 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1759 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1760 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1761 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1762 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
1763 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1765 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1766 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1767 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1769 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1770 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1773 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1774 * max from the SW pedit action. On success, it says how many HW actions were
1777 static int offload_pedit_fields(struct pedit_headers *masks,
1778 struct pedit_headers *vals,
1779 struct mlx5e_tc_flow_parse_attr *parse_attr,
1780 struct netlink_ext_ack *extack)
1782 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1783 int i, action_size, nactions, max_actions, first, last, next_z;
1784 void *s_masks_p, *a_masks_p, *vals_p;
1785 struct mlx5_fields *f;
1786 u8 cmd, field_bsize;
1793 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1794 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1795 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1796 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1798 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1799 action = parse_attr->mod_hdr_actions;
1800 max_actions = parse_attr->num_mod_hdr_actions;
1803 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1805 /* avoid seeing bits set from previous iterations */
1809 s_masks_p = (void *)set_masks + f->offset;
1810 a_masks_p = (void *)add_masks + f->offset;
1812 memcpy(&s_mask, s_masks_p, f->size);
1813 memcpy(&a_mask, a_masks_p, f->size);
1815 if (!s_mask && !a_mask) /* nothing to offload here */
1818 if (s_mask && a_mask) {
1819 NL_SET_ERR_MSG_MOD(extack,
1820 "can't set and add to the same HW field");
1821 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1825 if (nactions == max_actions) {
1826 NL_SET_ERR_MSG_MOD(extack,
1827 "too many pedit actions, can't offload");
1828 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1833 cmd = MLX5_ACTION_TYPE_SET;
1835 vals_p = (void *)set_vals + f->offset;
1836 /* clear to denote we consumed this field */
1837 memset(s_masks_p, 0, f->size);
1839 cmd = MLX5_ACTION_TYPE_ADD;
1841 vals_p = (void *)add_vals + f->offset;
1842 /* clear to denote we consumed this field */
1843 memset(a_masks_p, 0, f->size);
1846 field_bsize = f->size * BITS_PER_BYTE;
1848 if (field_bsize == 32) {
1849 mask_be32 = *(__be32 *)&mask;
1850 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1851 } else if (field_bsize == 16) {
1852 mask_be16 = *(__be16 *)&mask;
1853 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1856 first = find_first_bit(&mask, field_bsize);
1857 next_z = find_next_zero_bit(&mask, field_bsize, first);
1858 last = find_last_bit(&mask, field_bsize);
1859 if (first < next_z && next_z < last) {
1860 NL_SET_ERR_MSG_MOD(extack,
1861 "rewrite of few sub-fields isn't supported");
1862 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
1867 MLX5_SET(set_action_in, action, action_type, cmd);
1868 MLX5_SET(set_action_in, action, field, f->field);
1870 if (cmd == MLX5_ACTION_TYPE_SET) {
1871 MLX5_SET(set_action_in, action, offset, first);
1872 /* length is num of bits to be written, zero means length of 32 */
1873 MLX5_SET(set_action_in, action, length, (last - first + 1));
1876 if (field_bsize == 32)
1877 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
1878 else if (field_bsize == 16)
1879 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
1880 else if (field_bsize == 8)
1881 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
1883 action += action_size;
1887 parse_attr->num_mod_hdr_actions = nactions;
1891 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1892 const struct tc_action *a, int namespace,
1893 struct mlx5e_tc_flow_parse_attr *parse_attr)
1895 int nkeys, action_size, max_actions;
1897 nkeys = tcf_pedit_nkeys(a);
1898 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1900 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1901 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1902 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1903 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1905 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1906 max_actions = min(max_actions, nkeys * 16);
1908 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1909 if (!parse_attr->mod_hdr_actions)
1912 parse_attr->num_mod_hdr_actions = max_actions;
1916 static const struct pedit_headers zero_masks = {};
1918 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1919 const struct tc_action *a, int namespace,
1920 struct mlx5e_tc_flow_parse_attr *parse_attr,
1921 struct netlink_ext_ack *extack)
1923 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1924 int nkeys, i, err = -EOPNOTSUPP;
1925 u32 mask, val, offset;
1928 nkeys = tcf_pedit_nkeys(a);
1930 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1931 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1933 for (i = 0; i < nkeys; i++) {
1934 htype = tcf_pedit_htype(a, i);
1935 cmd = tcf_pedit_cmd(a, i);
1936 err = -EOPNOTSUPP; /* can't be all optimistic */
1938 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1939 NL_SET_ERR_MSG_MOD(extack,
1940 "legacy pedit isn't offloaded");
1944 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1945 NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
1949 mask = tcf_pedit_mask(a, i);
1950 val = tcf_pedit_val(a, i);
1951 offset = tcf_pedit_offset(a, i);
1953 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1958 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1962 err = offload_pedit_fields(masks, vals, parse_attr, extack);
1964 goto out_dealloc_parsed_actions;
1966 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1967 cmd_masks = &masks[cmd];
1968 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1969 NL_SET_ERR_MSG_MOD(extack,
1970 "attempt to offload an unsupported field");
1971 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
1972 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1973 16, 1, cmd_masks, sizeof(zero_masks), true);
1975 goto out_dealloc_parsed_actions;
1981 out_dealloc_parsed_actions:
1982 kfree(parse_attr->mod_hdr_actions);
1987 static bool csum_offload_supported(struct mlx5e_priv *priv,
1990 struct netlink_ext_ack *extack)
1992 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1993 TCA_CSUM_UPDATE_FLAG_UDP;
1995 /* The HW recalcs checksums only if re-writing headers */
1996 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1997 NL_SET_ERR_MSG_MOD(extack,
1998 "TC csum action is only offloaded with pedit");
1999 netdev_warn(priv->netdev,
2000 "TC csum action is only offloaded with pedit\n");
2004 if (update_flags & ~prot_flags) {
2005 NL_SET_ERR_MSG_MOD(extack,
2006 "can't offload TC csum action for some header/s");
2007 netdev_warn(priv->netdev,
2008 "can't offload TC csum action for some header/s - flags %#x\n",
2016 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2017 struct tcf_exts *exts,
2018 struct netlink_ext_ack *extack)
2020 const struct tc_action *a;
2021 bool modify_ip_header;
2028 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2029 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2031 /* for non-IP we only re-write MACs, so we're okay */
2032 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2035 modify_ip_header = false;
2036 tcf_exts_for_each_action(i, a, exts) {
2039 if (!is_tcf_pedit(a))
2042 nkeys = tcf_pedit_nkeys(a);
2043 for (k = 0; k < nkeys; k++) {
2044 htype = tcf_pedit_htype(a, k);
2045 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
2046 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
2047 modify_ip_header = true;
2053 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
2054 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
2055 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
2056 NL_SET_ERR_MSG_MOD(extack,
2057 "can't offload re-write of non TCP/UDP");
2058 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2066 static bool actions_match_supported(struct mlx5e_priv *priv,
2067 struct tcf_exts *exts,
2068 struct mlx5e_tc_flow_parse_attr *parse_attr,
2069 struct mlx5e_tc_flow *flow,
2070 struct netlink_ext_ack *extack)
2074 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
2075 actions = flow->esw_attr->action;
2077 actions = flow->nic_attr->action;
2079 if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
2080 !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
2083 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2084 return modify_header_match_supported(&parse_attr->spec, exts,
2090 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2092 struct mlx5_core_dev *fmdev, *pmdev;
2093 u64 fsystem_guid, psystem_guid;
2096 pmdev = peer_priv->mdev;
2098 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
2099 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
2101 return (fsystem_guid == psystem_guid);
2104 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2105 struct mlx5e_tc_flow_parse_attr *parse_attr,
2106 struct mlx5e_tc_flow *flow,
2107 struct netlink_ext_ack *extack)
2109 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
2110 const struct tc_action *a;
2115 if (!tcf_exts_has_actions(exts))
2118 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2120 tcf_exts_for_each_action(i, a, exts) {
2121 if (is_tcf_gact_shot(a)) {
2122 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2123 if (MLX5_CAP_FLOWTABLE(priv->mdev,
2124 flow_table_properties_nic_receive.flow_counter))
2125 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2129 if (is_tcf_pedit(a)) {
2130 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
2131 parse_attr, extack);
2135 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2136 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2140 if (is_tcf_csum(a)) {
2141 if (csum_offload_supported(priv, action,
2142 tcf_csum_update_flags(a),
2149 if (is_tcf_mirred_egress_redirect(a)) {
2150 struct net_device *peer_dev = tcf_mirred_dev(a);
2152 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2153 same_hw_devs(priv, netdev_priv(peer_dev))) {
2154 parse_attr->mirred_ifindex = peer_dev->ifindex;
2155 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
2156 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2157 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2159 NL_SET_ERR_MSG_MOD(extack,
2160 "device is not on same HW, can't offload");
2161 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2168 if (is_tcf_skbedit_mark(a)) {
2169 u32 mark = tcf_skbedit_mark(a);
2171 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2172 NL_SET_ERR_MSG_MOD(extack,
2173 "Bad flow mark - only 16 bit is supported");
2177 attr->flow_tag = mark;
2178 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2185 attr->action = action;
2186 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
2192 static inline int cmp_encap_info(struct ip_tunnel_key *a,
2193 struct ip_tunnel_key *b)
2195 return memcmp(a, b, sizeof(*a));
2198 static inline int hash_encap_info(struct ip_tunnel_key *key)
2200 return jhash(key, sizeof(*key), 0);
2203 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
2204 struct net_device *mirred_dev,
2205 struct net_device **out_dev,
2207 struct neighbour **out_n,
2210 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2211 struct mlx5e_rep_priv *uplink_rpriv;
2213 struct neighbour *n = NULL;
2215 #if IS_ENABLED(CONFIG_INET)
2218 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
2219 ret = PTR_ERR_OR_ZERO(rt);
2225 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2226 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2227 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
2228 *out_dev = uplink_rpriv->netdev;
2230 *out_dev = rt->dst.dev;
2233 *out_ttl = ip4_dst_hoplimit(&rt->dst);
2234 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
2243 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2244 struct net_device *peer_netdev)
2246 struct mlx5e_priv *peer_priv;
2248 peer_priv = netdev_priv(peer_netdev);
2250 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2251 (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
2252 same_hw_devs(priv, peer_priv) &&
2253 MLX5_VPORT_MANAGER(peer_priv->mdev) &&
2254 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
2257 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
2258 struct net_device *mirred_dev,
2259 struct net_device **out_dev,
2261 struct neighbour **out_n,
2264 struct neighbour *n = NULL;
2265 struct dst_entry *dst;
2267 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
2268 struct mlx5e_rep_priv *uplink_rpriv;
2269 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2272 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
2278 *out_ttl = ip6_dst_hoplimit(dst);
2280 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2281 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2282 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
2283 *out_dev = uplink_rpriv->netdev;
2285 *out_dev = dst->dev;
2290 n = dst_neigh_lookup(dst, &fl6->daddr);
2299 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
2300 char buf[], int encap_size,
2301 unsigned char h_dest[ETH_ALEN],
2305 __be16 udp_dst_port,
2308 struct ethhdr *eth = (struct ethhdr *)buf;
2309 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
2310 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
2311 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2313 memset(buf, 0, encap_size);
2315 ether_addr_copy(eth->h_dest, h_dest);
2316 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2317 eth->h_proto = htons(ETH_P_IP);
2324 ip->protocol = IPPROTO_UDP;
2328 udp->dest = udp_dst_port;
2329 vxh->vx_flags = VXLAN_HF_VNI;
2330 vxh->vx_vni = vxlan_vni_field(vx_vni);
2333 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
2334 char buf[], int encap_size,
2335 unsigned char h_dest[ETH_ALEN],
2337 struct in6_addr *daddr,
2338 struct in6_addr *saddr,
2339 __be16 udp_dst_port,
2342 struct ethhdr *eth = (struct ethhdr *)buf;
2343 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
2344 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
2345 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2347 memset(buf, 0, encap_size);
2349 ether_addr_copy(eth->h_dest, h_dest);
2350 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2351 eth->h_proto = htons(ETH_P_IPV6);
2353 ip6_flow_hdr(ip6h, tos, 0);
2354 /* the HW fills up ipv6 payload len */
2355 ip6h->nexthdr = IPPROTO_UDP;
2356 ip6h->hop_limit = ttl;
2357 ip6h->daddr = *daddr;
2358 ip6h->saddr = *saddr;
2360 udp->dest = udp_dst_port;
2361 vxh->vx_flags = VXLAN_HF_VNI;
2362 vxh->vx_vni = vxlan_vni_field(vx_vni);
2365 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
2366 struct net_device *mirred_dev,
2367 struct mlx5e_encap_entry *e)
2369 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2370 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
2371 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2372 struct net_device *out_dev;
2373 struct neighbour *n = NULL;
2374 struct flowi4 fl4 = {};
2375 u8 nud_state, tos, ttl;
2379 if (max_encap_size < ipv4_encap_size) {
2380 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2381 ipv4_encap_size, max_encap_size);
2385 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
2389 switch (e->tunnel_type) {
2390 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
2391 fl4.flowi4_proto = IPPROTO_UDP;
2392 fl4.fl4_dport = tun_key->tp_dst;
2402 fl4.flowi4_tos = tun_key->tos;
2403 fl4.daddr = tun_key->u.ipv4.dst;
2404 fl4.saddr = tun_key->u.ipv4.src;
2406 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
2411 /* used by mlx5e_detach_encap to lookup a neigh hash table
2412 * entry in the neigh hash table when a user deletes a rule
2414 e->m_neigh.dev = n->dev;
2415 e->m_neigh.family = n->ops->family;
2416 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2417 e->out_dev = out_dev;
2419 /* It's importent to add the neigh to the hash table before checking
2420 * the neigh validity state. So if we'll get a notification, in case the
2421 * neigh changes it's validity state, we would find the relevant neigh
2424 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2428 read_lock_bh(&n->lock);
2429 nud_state = n->nud_state;
2430 ether_addr_copy(e->h_dest, n->ha);
2431 read_unlock_bh(&n->lock);
2433 switch (e->tunnel_type) {
2434 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
2435 gen_vxlan_header_ipv4(out_dev, encap_header,
2436 ipv4_encap_size, e->h_dest, tos, ttl,
2438 fl4.saddr, tun_key->tp_dst,
2439 tunnel_id_to_key32(tun_key->tun_id));
2443 goto destroy_neigh_entry;
2445 e->encap_size = ipv4_encap_size;
2446 e->encap_header = encap_header;
2448 if (!(nud_state & NUD_VALID)) {
2449 neigh_event_send(n, NULL);
2454 err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
2455 ipv4_encap_size, encap_header,
2456 MLX5_FLOW_NAMESPACE_FDB,
2459 goto destroy_neigh_entry;
2461 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2462 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2466 destroy_neigh_entry:
2467 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2469 kfree(encap_header);
2476 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
2477 struct net_device *mirred_dev,
2478 struct mlx5e_encap_entry *e)
2480 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2481 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
2482 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2483 struct net_device *out_dev;
2484 struct neighbour *n = NULL;
2485 struct flowi6 fl6 = {};
2486 u8 nud_state, tos, ttl;
2490 if (max_encap_size < ipv6_encap_size) {
2491 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2492 ipv6_encap_size, max_encap_size);
2496 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
2500 switch (e->tunnel_type) {
2501 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
2502 fl6.flowi6_proto = IPPROTO_UDP;
2503 fl6.fl6_dport = tun_key->tp_dst;
2513 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
2514 fl6.daddr = tun_key->u.ipv6.dst;
2515 fl6.saddr = tun_key->u.ipv6.src;
2517 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
2522 /* used by mlx5e_detach_encap to lookup a neigh hash table
2523 * entry in the neigh hash table when a user deletes a rule
2525 e->m_neigh.dev = n->dev;
2526 e->m_neigh.family = n->ops->family;
2527 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2528 e->out_dev = out_dev;
2530 /* It's importent to add the neigh to the hash table before checking
2531 * the neigh validity state. So if we'll get a notification, in case the
2532 * neigh changes it's validity state, we would find the relevant neigh
2535 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2539 read_lock_bh(&n->lock);
2540 nud_state = n->nud_state;
2541 ether_addr_copy(e->h_dest, n->ha);
2542 read_unlock_bh(&n->lock);
2544 switch (e->tunnel_type) {
2545 case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
2546 gen_vxlan_header_ipv6(out_dev, encap_header,
2547 ipv6_encap_size, e->h_dest, tos, ttl,
2549 &fl6.saddr, tun_key->tp_dst,
2550 tunnel_id_to_key32(tun_key->tun_id));
2554 goto destroy_neigh_entry;
2557 e->encap_size = ipv6_encap_size;
2558 e->encap_header = encap_header;
2560 if (!(nud_state & NUD_VALID)) {
2561 neigh_event_send(n, NULL);
2566 err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
2567 ipv6_encap_size, encap_header,
2568 MLX5_FLOW_NAMESPACE_FDB,
2571 goto destroy_neigh_entry;
2573 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2574 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2578 destroy_neigh_entry:
2579 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2581 kfree(encap_header);
2588 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2589 struct ip_tunnel_info *tun_info,
2590 struct net_device *mirred_dev,
2591 struct net_device **encap_dev,
2592 struct mlx5e_tc_flow *flow,
2593 struct netlink_ext_ack *extack)
2595 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2596 unsigned short family = ip_tunnel_info_af(tun_info);
2597 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2598 struct ip_tunnel_key *key = &tun_info->key;
2599 struct mlx5e_encap_entry *e;
2600 int tunnel_type, err = 0;
2604 /* udp dst port must be set */
2605 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2606 goto vxlan_encap_offload_err;
2608 /* setting udp src port isn't supported */
2609 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
2610 vxlan_encap_offload_err:
2611 NL_SET_ERR_MSG_MOD(extack,
2612 "must set udp dst port and not set udp src port");
2613 netdev_warn(priv->netdev,
2614 "must set udp dst port and not set udp src port\n");
2618 if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
2619 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
2620 tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
2622 NL_SET_ERR_MSG_MOD(extack,
2623 "port isn't an offloaded vxlan udp dport");
2624 netdev_warn(priv->netdev,
2625 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
2629 hash_key = hash_encap_info(key);
2631 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2632 encap_hlist, hash_key) {
2633 if (!cmp_encap_info(&e->tun_info.key, key)) {
2639 /* must verify if encap is valid or not */
2643 e = kzalloc(sizeof(*e), GFP_KERNEL);
2647 e->tun_info = *tun_info;
2648 e->tunnel_type = tunnel_type;
2649 INIT_LIST_HEAD(&e->flows);
2651 if (family == AF_INET)
2652 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
2653 else if (family == AF_INET6)
2654 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
2656 if (err && err != -EAGAIN)
2659 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2662 list_add(&flow->encap, &e->flows);
2663 *encap_dev = e->out_dev;
2664 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
2665 attr->encap_id = e->encap_id;
2676 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
2677 const struct tc_action *a,
2678 struct mlx5_esw_flow_attr *attr,
2681 u8 vlan_idx = attr->total_vlan;
2683 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
2686 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2688 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2689 MLX5_FS_VLAN_DEPTH))
2692 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
2694 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2696 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2697 attr->vlan_vid[vlan_idx] = tcf_vlan_push_vid(a);
2698 attr->vlan_prio[vlan_idx] = tcf_vlan_push_prio(a);
2699 attr->vlan_proto[vlan_idx] = tcf_vlan_push_proto(a);
2700 if (!attr->vlan_proto[vlan_idx])
2701 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
2704 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2705 MLX5_FS_VLAN_DEPTH))
2708 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
2710 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
2711 (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2712 tcf_vlan_push_prio(a)))
2715 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2717 } else { /* action is TCA_VLAN_ACT_MODIFY */
2721 attr->total_vlan = vlan_idx + 1;
2726 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2727 struct mlx5e_tc_flow_parse_attr *parse_attr,
2728 struct mlx5e_tc_flow *flow,
2729 struct netlink_ext_ack *extack)
2731 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2732 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2733 struct ip_tunnel_info *info = NULL;
2734 const struct tc_action *a;
2740 if (!tcf_exts_has_actions(exts))
2743 attr->in_rep = rpriv->rep;
2744 attr->in_mdev = priv->mdev;
2746 tcf_exts_for_each_action(i, a, exts) {
2747 if (is_tcf_gact_shot(a)) {
2748 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2749 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2753 if (is_tcf_pedit(a)) {
2754 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
2755 parse_attr, extack);
2759 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2760 attr->mirror_count = attr->out_count;
2764 if (is_tcf_csum(a)) {
2765 if (csum_offload_supported(priv, action,
2766 tcf_csum_update_flags(a),
2773 if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
2774 struct mlx5e_priv *out_priv;
2775 struct net_device *out_dev;
2777 out_dev = tcf_mirred_dev(a);
2779 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
2780 NL_SET_ERR_MSG_MOD(extack,
2781 "can't support more output ports, can't offload forwarding");
2782 pr_err("can't support more than %d output ports, can't offload forwarding\n",
2787 if (switchdev_port_same_parent_id(priv->netdev,
2789 is_merged_eswitch_dev(priv, out_dev)) {
2790 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2791 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2792 out_priv = netdev_priv(out_dev);
2793 rpriv = out_priv->ppriv;
2794 attr->out_rep[attr->out_count] = rpriv->rep;
2795 attr->out_mdev[attr->out_count++] = out_priv->mdev;
2797 parse_attr->mirred_ifindex = out_dev->ifindex;
2798 parse_attr->tun_info = *info;
2799 attr->parse_attr = parse_attr;
2800 action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
2801 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2802 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2803 /* attr->out_rep is resolved when we handle encap */
2805 NL_SET_ERR_MSG_MOD(extack,
2806 "devices are not on same switch HW, can't offload forwarding");
2807 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2808 priv->netdev->name, out_dev->name);
2814 if (is_tcf_tunnel_set(a)) {
2815 info = tcf_tunnel_info(a);
2820 attr->mirror_count = attr->out_count;
2824 if (is_tcf_vlan(a)) {
2825 err = parse_tc_vlan_action(priv, a, attr, &action);
2830 attr->mirror_count = attr->out_count;
2834 if (is_tcf_tunnel_release(a)) {
2835 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2842 attr->action = action;
2843 if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
2846 if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
2847 NL_SET_ERR_MSG_MOD(extack,
2848 "current firmware doesn't support split rule for port mirroring");
2849 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
2856 static void get_flags(int flags, u8 *flow_flags)
2858 u8 __flow_flags = 0;
2860 if (flags & MLX5E_TC_INGRESS)
2861 __flow_flags |= MLX5E_TC_FLOW_INGRESS;
2862 if (flags & MLX5E_TC_EGRESS)
2863 __flow_flags |= MLX5E_TC_FLOW_EGRESS;
2865 *flow_flags = __flow_flags;
2868 static const struct rhashtable_params tc_ht_params = {
2869 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2870 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2871 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2872 .automatic_shrinking = true,
2875 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
2877 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2878 struct mlx5e_rep_priv *uplink_rpriv;
2880 if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
2881 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2882 return &uplink_rpriv->tc_ht;
2884 return &priv->fs.tc.ht;
2887 int mlx5e_configure_flower(struct mlx5e_priv *priv,
2888 struct tc_cls_flower_offload *f, int flags)
2890 struct netlink_ext_ack *extack = f->common.extack;
2891 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2892 struct mlx5e_tc_flow_parse_attr *parse_attr;
2893 struct rhashtable *tc_ht = get_tc_ht(priv);
2894 struct mlx5e_tc_flow *flow;
2895 int attr_size, err = 0;
2898 get_flags(flags, &flow_flags);
2900 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2902 NL_SET_ERR_MSG_MOD(extack,
2903 "flow cookie already exists, ignoring");
2904 netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
2908 if (esw && esw->mode == SRIOV_OFFLOADS) {
2909 flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2910 attr_size = sizeof(struct mlx5_esw_flow_attr);
2912 flow_flags |= MLX5E_TC_FLOW_NIC;
2913 attr_size = sizeof(struct mlx5_nic_flow_attr);
2916 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2917 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2918 if (!parse_attr || !flow) {
2923 flow->cookie = f->cookie;
2924 flow->flags = flow_flags;
2927 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
2931 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
2932 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow,
2936 flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow,
2939 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow,
2943 flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow,
2947 if (IS_ERR(flow->rule[0])) {
2948 err = PTR_ERR(flow->rule[0]);
2954 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2956 if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
2957 !(flow->esw_attr->action &
2958 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
2961 err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
2963 mlx5e_tc_del_flow(priv, flow);
2975 #define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
2976 #define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
2978 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
2980 if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
2986 int mlx5e_delete_flower(struct mlx5e_priv *priv,
2987 struct tc_cls_flower_offload *f, int flags)
2989 struct rhashtable *tc_ht = get_tc_ht(priv);
2990 struct mlx5e_tc_flow *flow;
2992 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2993 if (!flow || !same_flow_direction(flow, flags))
2996 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
2998 mlx5e_tc_del_flow(priv, flow);
3005 int mlx5e_stats_flower(struct mlx5e_priv *priv,
3006 struct tc_cls_flower_offload *f, int flags)
3008 struct rhashtable *tc_ht = get_tc_ht(priv);
3009 struct mlx5e_tc_flow *flow;
3010 struct mlx5_fc *counter;
3015 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3016 if (!flow || !same_flow_direction(flow, flags))
3019 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
3022 counter = mlx5_flow_rule_counter(flow->rule[0]);
3026 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3028 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
3033 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
3034 struct mlx5e_priv *peer_priv)
3036 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
3037 struct mlx5e_hairpin_entry *hpe;
3041 if (!same_hw_devs(priv, peer_priv))
3044 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
3046 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
3047 if (hpe->peer_vhca_id == peer_vhca_id)
3048 hpe->hp->pair->peer_gone = true;
3052 static int mlx5e_tc_netdev_event(struct notifier_block *this,
3053 unsigned long event, void *ptr)
3055 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3056 struct mlx5e_flow_steering *fs;
3057 struct mlx5e_priv *peer_priv;
3058 struct mlx5e_tc_table *tc;
3059 struct mlx5e_priv *priv;
3061 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
3062 event != NETDEV_UNREGISTER ||
3063 ndev->reg_state == NETREG_REGISTERED)
3066 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
3067 fs = container_of(tc, struct mlx5e_flow_steering, tc);
3068 priv = container_of(fs, struct mlx5e_priv, fs);
3069 peer_priv = netdev_priv(ndev);
3070 if (priv == peer_priv ||
3071 !(priv->netdev->features & NETIF_F_HW_TC))
3074 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
3079 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
3081 struct mlx5e_tc_table *tc = &priv->fs.tc;
3084 hash_init(tc->mod_hdr_tbl);
3085 hash_init(tc->hairpin_tbl);
3087 err = rhashtable_init(&tc->ht, &tc_ht_params);
3091 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
3092 if (register_netdevice_notifier(&tc->netdevice_nb)) {
3093 tc->netdevice_nb.notifier_call = NULL;
3094 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
3100 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
3102 struct mlx5e_tc_flow *flow = ptr;
3103 struct mlx5e_priv *priv = flow->priv;
3105 mlx5e_tc_del_flow(priv, flow);
3109 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
3111 struct mlx5e_tc_table *tc = &priv->fs.tc;
3113 if (tc->netdevice_nb.notifier_call)
3114 unregister_netdevice_notifier(&tc->netdevice_nb);
3116 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
3118 if (!IS_ERR_OR_NULL(tc->t)) {
3119 mlx5_destroy_flow_table(tc->t);
3124 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
3126 return rhashtable_init(tc_ht, &tc_ht_params);
3129 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
3131 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
3134 int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
3136 struct rhashtable *tc_ht = get_tc_ht(priv);
3138 return atomic_read(&tc_ht->nelems);