2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/tc_act/tc_mirred.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include <net/tc_act/tc_tunnel_key.h>
44 #include <net/tc_act/tc_pedit.h>
45 #include <net/tc_act/tc_csum.h>
53 #include "en/tc_tun.h"
54 #include "lib/devcom.h"
56 struct mlx5_nic_flow_attr {
62 struct mlx5_flow_table *hairpin_ft;
63 struct mlx5_fc *counter;
66 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
69 MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
70 MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
71 MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD,
72 MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD,
73 MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE),
74 MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1),
75 MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2),
76 MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3),
77 MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4),
78 MLX5E_TC_FLOW_NOT_READY = BIT(MLX5E_TC_FLOW_BASE + 5),
81 #define MLX5E_TC_MAX_SPLITS 1
83 /* Helper struct for accessing a struct containing list_head array.
92 * To access the containing struct from one of the list_head items:
93 * 1. Get the helper item from the list_head item using
95 * container_of(list_head item, helper struct type, list_head field)
96 * 2. Get the contining struct from the helper item and its index in the array:
98 * container_of(helper item, containing struct type, helper field[index])
100 struct encap_flow_item {
101 struct list_head list;
105 struct mlx5e_tc_flow {
106 struct rhash_head node;
107 struct mlx5e_priv *priv;
110 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
111 /* Flow can be associated with multiple encap IDs.
112 * The number of encaps is bounded by the number of supported
115 struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS];
116 struct mlx5e_tc_flow *peer_flow;
117 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
118 struct list_head hairpin; /* flows sharing the same hairpin */
119 struct list_head peer; /* flows with peer flow */
120 struct list_head unready; /* flows not ready to be offloaded (e.g due to missing route) */
122 struct mlx5_esw_flow_attr esw_attr[0];
123 struct mlx5_nic_flow_attr nic_attr[0];
127 struct mlx5e_tc_flow_parse_attr {
128 struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS];
129 struct net_device *filter_dev;
130 struct mlx5_flow_spec spec;
131 int num_mod_hdr_actions;
132 int max_mod_hdr_actions;
133 void *mod_hdr_actions;
134 int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS];
137 #define MLX5E_TC_TABLE_NUM_GROUPS 4
138 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
140 struct mlx5e_hairpin {
141 struct mlx5_hairpin *pair;
143 struct mlx5_core_dev *func_mdev;
144 struct mlx5e_priv *func_priv;
149 struct mlx5e_rqt indir_rqt;
150 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
151 struct mlx5e_ttc_table ttc;
154 struct mlx5e_hairpin_entry {
155 /* a node of a hash table which keeps all the hairpin entries */
156 struct hlist_node hairpin_hlist;
158 /* flows sharing the same hairpin */
159 struct list_head flows;
163 struct mlx5e_hairpin *hp;
171 struct mlx5e_mod_hdr_entry {
172 /* a node of a hash table which keeps all the mod_hdr entries */
173 struct hlist_node mod_hdr_hlist;
175 /* flows sharing the same mod_hdr entry */
176 struct list_head flows;
178 struct mod_hdr_key key;
183 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
185 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
187 return jhash(key->actions,
188 key->num_actions * MLX5_MH_ACT_SZ, 0);
191 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
192 struct mod_hdr_key *b)
194 if (a->num_actions != b->num_actions)
197 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
200 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
201 struct mlx5e_tc_flow *flow,
202 struct mlx5e_tc_flow_parse_attr *parse_attr)
204 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
205 int num_actions, actions_size, namespace, err;
206 struct mlx5e_mod_hdr_entry *mh;
207 struct mod_hdr_key key;
211 num_actions = parse_attr->num_mod_hdr_actions;
212 actions_size = MLX5_MH_ACT_SZ * num_actions;
214 key.actions = parse_attr->mod_hdr_actions;
215 key.num_actions = num_actions;
217 hash_key = hash_mod_hdr_info(&key);
219 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
220 namespace = MLX5_FLOW_NAMESPACE_FDB;
221 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
222 mod_hdr_hlist, hash_key) {
223 if (!cmp_mod_hdr_info(&mh->key, &key)) {
229 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
230 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
231 mod_hdr_hlist, hash_key) {
232 if (!cmp_mod_hdr_info(&mh->key, &key)) {
242 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
246 mh->key.actions = (void *)mh + sizeof(*mh);
247 memcpy(mh->key.actions, key.actions, actions_size);
248 mh->key.num_actions = num_actions;
249 INIT_LIST_HEAD(&mh->flows);
251 err = mlx5_modify_header_alloc(priv->mdev, namespace,
258 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
259 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
261 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
264 list_add(&flow->mod_hdr, &mh->flows);
265 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
266 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
268 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
277 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
278 struct mlx5e_tc_flow *flow)
280 struct list_head *next = flow->mod_hdr.next;
282 list_del(&flow->mod_hdr);
284 if (list_empty(next)) {
285 struct mlx5e_mod_hdr_entry *mh;
287 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
289 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
290 hash_del(&mh->mod_hdr_hlist);
296 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
298 struct net_device *netdev;
299 struct mlx5e_priv *priv;
301 netdev = __dev_get_by_index(net, ifindex);
302 priv = netdev_priv(netdev);
306 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
308 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
312 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
316 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
318 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
319 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
320 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
322 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
329 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
334 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
336 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
337 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
340 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
342 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
343 struct mlx5e_priv *priv = hp->func_priv;
344 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
346 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
349 for (i = 0; i < sz; i++) {
351 if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR)
352 ix = mlx5e_bits_invert(i, ilog2(sz));
353 ix = indirection_rqt[ix];
354 rqn = hp->pair->rqn[ix];
355 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
359 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
361 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
362 struct mlx5e_priv *priv = hp->func_priv;
363 struct mlx5_core_dev *mdev = priv->mdev;
367 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
368 in = kvzalloc(inlen, GFP_KERNEL);
372 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
374 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
375 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
377 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
379 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
381 hp->indir_rqt.enabled = true;
387 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
389 struct mlx5e_priv *priv = hp->func_priv;
390 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
394 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
395 struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt);
397 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
398 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
400 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
401 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
402 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
403 mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false);
405 err = mlx5_core_create_tir(hp->func_mdev, in,
406 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
408 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
409 goto err_destroy_tirs;
415 for (i = 0; i < tt; i++)
416 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
420 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
424 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
425 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
428 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
429 struct ttc_params *ttc_params)
431 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
434 memset(ttc_params, 0, sizeof(*ttc_params));
436 ttc_params->any_tt_tirn = hp->tirn;
438 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
439 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
441 ft_attr->max_fte = MLX5E_NUM_TT;
442 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
443 ft_attr->prio = MLX5E_TC_PRIO;
446 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
448 struct mlx5e_priv *priv = hp->func_priv;
449 struct ttc_params ttc_params;
452 err = mlx5e_hairpin_create_indirect_rqt(hp);
456 err = mlx5e_hairpin_create_indirect_tirs(hp);
458 goto err_create_indirect_tirs;
460 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
461 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
463 goto err_create_ttc_table;
465 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
466 hp->num_channels, hp->ttc.ft.t->id);
470 err_create_ttc_table:
471 mlx5e_hairpin_destroy_indirect_tirs(hp);
472 err_create_indirect_tirs:
473 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
478 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
480 struct mlx5e_priv *priv = hp->func_priv;
482 mlx5e_destroy_ttc_table(priv, &hp->ttc);
483 mlx5e_hairpin_destroy_indirect_tirs(hp);
484 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
487 static struct mlx5e_hairpin *
488 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
491 struct mlx5_core_dev *func_mdev, *peer_mdev;
492 struct mlx5e_hairpin *hp;
493 struct mlx5_hairpin *pair;
496 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
498 return ERR_PTR(-ENOMEM);
500 func_mdev = priv->mdev;
501 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
503 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
506 goto create_pair_err;
509 hp->func_mdev = func_mdev;
510 hp->func_priv = priv;
511 hp->num_channels = params->num_channels;
513 err = mlx5e_hairpin_create_transport(hp);
515 goto create_transport_err;
517 if (hp->num_channels > 1) {
518 err = mlx5e_hairpin_rss_init(hp);
526 mlx5e_hairpin_destroy_transport(hp);
527 create_transport_err:
528 mlx5_core_hairpin_destroy(hp->pair);
534 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
536 if (hp->num_channels > 1)
537 mlx5e_hairpin_rss_cleanup(hp);
538 mlx5e_hairpin_destroy_transport(hp);
539 mlx5_core_hairpin_destroy(hp->pair);
543 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
545 return (peer_vhca_id << 16 | prio);
548 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
549 u16 peer_vhca_id, u8 prio)
551 struct mlx5e_hairpin_entry *hpe;
552 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
554 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
555 hairpin_hlist, hash_key) {
556 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
563 #define UNKNOWN_MATCH_PRIO 8
565 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
566 struct mlx5_flow_spec *spec, u8 *match_prio,
567 struct netlink_ext_ack *extack)
569 void *headers_c, *headers_v;
570 u8 prio_val, prio_mask = 0;
573 #ifdef CONFIG_MLX5_CORE_EN_DCB
574 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
575 NL_SET_ERR_MSG_MOD(extack,
576 "only PCP trust state supported for hairpin");
580 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
581 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
583 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
585 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
586 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
589 if (!vlan_present || !prio_mask) {
590 prio_val = UNKNOWN_MATCH_PRIO;
591 } else if (prio_mask != 0x7) {
592 NL_SET_ERR_MSG_MOD(extack,
593 "masked priority match not supported for hairpin");
597 *match_prio = prio_val;
601 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
602 struct mlx5e_tc_flow *flow,
603 struct mlx5e_tc_flow_parse_attr *parse_attr,
604 struct netlink_ext_ack *extack)
606 int peer_ifindex = parse_attr->mirred_ifindex[0];
607 struct mlx5_hairpin_params params;
608 struct mlx5_core_dev *peer_mdev;
609 struct mlx5e_hairpin_entry *hpe;
610 struct mlx5e_hairpin *hp;
617 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
618 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
619 NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
623 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
624 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
628 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
632 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
636 INIT_LIST_HEAD(&hpe->flows);
637 hpe->peer_vhca_id = peer_id;
638 hpe->prio = match_prio;
640 params.log_data_size = 15;
641 params.log_data_size = min_t(u8, params.log_data_size,
642 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
643 params.log_data_size = max_t(u8, params.log_data_size,
644 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
646 params.log_num_packets = params.log_data_size -
647 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
648 params.log_num_packets = min_t(u8, params.log_num_packets,
649 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
651 params.q_counter = priv->q_counter;
652 /* set hairpin pair per each 50Gbs share of the link */
653 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
654 link_speed = max_t(u32, link_speed, 50000);
655 link_speed64 = link_speed;
656 do_div(link_speed64, 50000);
657 params.num_channels = link_speed64;
659 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
662 goto create_hairpin_err;
665 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
666 hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
667 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
670 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
671 hash_hairpin_info(peer_id, match_prio));
674 if (hpe->hp->num_channels > 1) {
675 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
676 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
678 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
680 list_add(&flow->hairpin, &hpe->flows);
689 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
690 struct mlx5e_tc_flow *flow)
692 struct list_head *next = flow->hairpin.next;
694 list_del(&flow->hairpin);
696 /* no more hairpin flows for us, release the hairpin pair */
697 if (list_empty(next)) {
698 struct mlx5e_hairpin_entry *hpe;
700 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
702 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
703 hpe->hp->pair->peer_mdev->priv.name);
705 mlx5e_hairpin_destroy(hpe->hp);
706 hash_del(&hpe->hairpin_hlist);
712 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
713 struct mlx5e_tc_flow_parse_attr *parse_attr,
714 struct mlx5e_tc_flow *flow,
715 struct netlink_ext_ack *extack)
717 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
718 struct mlx5_core_dev *dev = priv->mdev;
719 struct mlx5_flow_destination dest[2] = {};
720 struct mlx5_flow_act flow_act = {
721 .action = attr->action,
722 .flow_tag = attr->flow_tag,
724 .flags = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
726 struct mlx5_fc *counter = NULL;
727 bool table_created = false;
728 int err, dest_ix = 0;
730 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
731 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
733 goto err_add_hairpin_flow;
735 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
736 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
737 dest[dest_ix].ft = attr->hairpin_ft;
739 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
740 dest[dest_ix].tir_num = attr->hairpin_tirn;
743 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
744 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
745 dest[dest_ix].ft = priv->fs.vlan.ft.t;
749 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
750 counter = mlx5_fc_create(dev, true);
751 if (IS_ERR(counter)) {
752 err = PTR_ERR(counter);
755 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
756 dest[dest_ix].counter_id = mlx5_fc_id(counter);
758 attr->counter = counter;
761 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
762 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
763 flow_act.modify_id = attr->mod_hdr_id;
764 kfree(parse_attr->mod_hdr_actions);
766 goto err_create_mod_hdr_id;
769 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
770 int tc_grp_size, tc_tbl_size;
771 u32 max_flow_counter;
773 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
774 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
776 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
778 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
779 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
782 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
785 MLX5E_TC_TABLE_NUM_GROUPS,
786 MLX5E_TC_FT_LEVEL, 0);
787 if (IS_ERR(priv->fs.tc.t)) {
788 NL_SET_ERR_MSG_MOD(extack,
789 "Failed to create tc offload table\n");
790 netdev_err(priv->netdev,
791 "Failed to create tc offload table\n");
792 err = PTR_ERR(priv->fs.tc.t);
796 table_created = true;
799 if (attr->match_level != MLX5_MATCH_NONE)
800 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
802 flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
803 &flow_act, dest, dest_ix);
805 if (IS_ERR(flow->rule[0])) {
806 err = PTR_ERR(flow->rule[0]);
814 mlx5_destroy_flow_table(priv->fs.tc.t);
815 priv->fs.tc.t = NULL;
818 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
819 mlx5e_detach_mod_hdr(priv, flow);
820 err_create_mod_hdr_id:
821 mlx5_fc_destroy(dev, counter);
823 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
824 mlx5e_hairpin_flow_del(priv, flow);
825 err_add_hairpin_flow:
829 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
830 struct mlx5e_tc_flow *flow)
832 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
833 struct mlx5_fc *counter = NULL;
835 counter = attr->counter;
836 mlx5_del_flow_rules(flow->rule[0]);
837 mlx5_fc_destroy(priv->mdev, counter);
839 if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) {
840 mlx5_destroy_flow_table(priv->fs.tc.t);
841 priv->fs.tc.t = NULL;
844 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
845 mlx5e_detach_mod_hdr(priv, flow);
847 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
848 mlx5e_hairpin_flow_del(priv, flow);
851 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
852 struct mlx5e_tc_flow *flow, int out_index);
854 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
855 struct mlx5e_tc_flow *flow,
856 struct net_device *mirred_dev,
858 struct netlink_ext_ack *extack,
859 struct net_device **encap_dev,
862 static struct mlx5_flow_handle *
863 mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
864 struct mlx5e_tc_flow *flow,
865 struct mlx5_flow_spec *spec,
866 struct mlx5_esw_flow_attr *attr)
868 struct mlx5_flow_handle *rule;
870 rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
874 if (attr->split_count) {
875 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
876 if (IS_ERR(flow->rule[1])) {
877 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
878 return flow->rule[1];
882 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
887 mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
888 struct mlx5e_tc_flow *flow,
889 struct mlx5_esw_flow_attr *attr)
891 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
893 if (attr->split_count)
894 mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
896 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
899 static struct mlx5_flow_handle *
900 mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
901 struct mlx5e_tc_flow *flow,
902 struct mlx5_flow_spec *spec,
903 struct mlx5_esw_flow_attr *slow_attr)
905 struct mlx5_flow_handle *rule;
907 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
908 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
909 slow_attr->split_count = 0;
910 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
912 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
914 flow->flags |= MLX5E_TC_FLOW_SLOW;
920 mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
921 struct mlx5e_tc_flow *flow,
922 struct mlx5_esw_flow_attr *slow_attr)
924 memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
925 slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
926 slow_attr->split_count = 0;
927 slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN;
928 mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
929 flow->flags &= ~MLX5E_TC_FLOW_SLOW;
932 static void add_unready_flow(struct mlx5e_tc_flow *flow)
934 struct mlx5_rep_uplink_priv *uplink_priv;
935 struct mlx5e_rep_priv *rpriv;
936 struct mlx5_eswitch *esw;
938 esw = flow->priv->mdev->priv.eswitch;
939 rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
940 uplink_priv = &rpriv->uplink_priv;
942 flow->flags |= MLX5E_TC_FLOW_NOT_READY;
943 list_add_tail(&flow->unready, &uplink_priv->unready_flows);
946 static void remove_unready_flow(struct mlx5e_tc_flow *flow)
948 list_del(&flow->unready);
949 flow->flags &= ~MLX5E_TC_FLOW_NOT_READY;
953 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
954 struct mlx5e_tc_flow *flow,
955 struct netlink_ext_ack *extack)
957 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
958 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
959 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
960 struct mlx5e_tc_flow_parse_attr *parse_attr = attr->parse_attr;
961 u16 max_prio = mlx5_eswitch_get_prio_range(esw);
962 struct net_device *out_dev, *encap_dev = NULL;
963 struct mlx5_fc *counter = NULL;
964 struct mlx5e_rep_priv *rpriv;
965 struct mlx5e_priv *out_priv;
966 bool encap_valid = true;
970 if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) {
971 NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW");
975 if (attr->chain > max_chain) {
976 NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
978 goto err_max_prio_chain;
981 if (attr->prio > max_prio) {
982 NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
984 goto err_max_prio_chain;
987 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) {
990 if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP))
993 mirred_ifindex = parse_attr->mirred_ifindex[out_index];
994 out_dev = __dev_get_by_index(dev_net(priv->netdev),
996 err = mlx5e_attach_encap(priv, flow, out_dev, out_index,
997 extack, &encap_dev, &encap_valid);
999 goto err_attach_encap;
1001 out_priv = netdev_priv(encap_dev);
1002 rpriv = out_priv->ppriv;
1003 attr->dests[out_index].rep = rpriv->rep;
1004 attr->dests[out_index].mdev = out_priv->mdev;
1007 err = mlx5_eswitch_add_vlan_action(esw, attr);
1011 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
1012 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
1013 kfree(parse_attr->mod_hdr_actions);
1018 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
1019 counter = mlx5_fc_create(attr->counter_dev, true);
1020 if (IS_ERR(counter)) {
1021 err = PTR_ERR(counter);
1022 goto err_create_counter;
1025 attr->counter = counter;
1028 /* we get here if one of the following takes place:
1029 * (1) there's no error
1030 * (2) there's an encap action and we don't have valid neigh
1033 /* continue with goto slow path rule instead */
1034 struct mlx5_esw_flow_attr slow_attr;
1036 flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
1038 flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
1041 if (IS_ERR(flow->rule[0])) {
1042 err = PTR_ERR(flow->rule[0]);
1049 mlx5_fc_destroy(attr->counter_dev, counter);
1051 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1052 mlx5e_detach_mod_hdr(priv, flow);
1054 mlx5_eswitch_del_vlan_action(esw, attr);
1056 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1057 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1058 mlx5e_detach_encap(priv, flow, out_index);
1064 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
1065 struct mlx5e_tc_flow *flow)
1067 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1068 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
1069 struct mlx5_esw_flow_attr slow_attr;
1072 if (flow->flags & MLX5E_TC_FLOW_NOT_READY) {
1073 remove_unready_flow(flow);
1074 kvfree(attr->parse_attr);
1078 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1079 if (flow->flags & MLX5E_TC_FLOW_SLOW)
1080 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1082 mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
1085 mlx5_eswitch_del_vlan_action(esw, attr);
1087 for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++)
1088 if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)
1089 mlx5e_detach_encap(priv, flow, out_index);
1090 kvfree(attr->parse_attr);
1092 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1093 mlx5e_detach_mod_hdr(priv, flow);
1095 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
1096 mlx5_fc_destroy(attr->counter_dev, attr->counter);
1099 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
1100 struct mlx5e_encap_entry *e)
1102 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1103 struct mlx5_esw_flow_attr slow_attr, *esw_attr;
1104 struct mlx5_flow_handle *rule;
1105 struct mlx5_flow_spec *spec;
1106 struct encap_flow_item *efi;
1107 struct mlx5e_tc_flow *flow;
1110 err = mlx5_packet_reformat_alloc(priv->mdev,
1112 e->encap_size, e->encap_header,
1113 MLX5_FLOW_NAMESPACE_FDB,
1116 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
1120 e->flags |= MLX5_ENCAP_ENTRY_VALID;
1121 mlx5e_rep_queue_neigh_stats_work(priv);
1123 list_for_each_entry(efi, &e->flows, list) {
1124 bool all_flow_encaps_valid = true;
1127 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1128 esw_attr = flow->esw_attr;
1129 spec = &esw_attr->parse_attr->spec;
1131 esw_attr->dests[efi->index].encap_id = e->encap_id;
1132 esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
1133 /* Flow can be associated with multiple encap entries.
1134 * Before offloading the flow verify that all of them have
1135 * a valid neighbour.
1137 for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) {
1138 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP))
1140 if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) {
1141 all_flow_encaps_valid = false;
1145 /* Do not offload flows with unresolved neighbors */
1146 if (!all_flow_encaps_valid)
1148 /* update from slow path rule to encap rule */
1149 rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
1151 err = PTR_ERR(rule);
1152 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
1157 mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
1158 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
1159 flow->rule[0] = rule;
1163 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
1164 struct mlx5e_encap_entry *e)
1166 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1167 struct mlx5_esw_flow_attr slow_attr;
1168 struct mlx5_flow_handle *rule;
1169 struct mlx5_flow_spec *spec;
1170 struct encap_flow_item *efi;
1171 struct mlx5e_tc_flow *flow;
1174 list_for_each_entry(efi, &e->flows, list) {
1175 flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]);
1176 spec = &flow->esw_attr->parse_attr->spec;
1178 /* update from encap rule to slow path rule */
1179 rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
1180 /* mark the flow's encap dest as non-valid */
1181 flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID;
1184 err = PTR_ERR(rule);
1185 mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
1190 mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
1191 flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
1192 flow->rule[0] = rule;
1195 /* we know that the encap is valid */
1196 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
1197 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1200 static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
1202 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1203 return flow->esw_attr->counter;
1205 return flow->nic_attr->counter;
1208 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
1210 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
1211 u64 bytes, packets, lastuse = 0;
1212 struct mlx5e_tc_flow *flow;
1213 struct mlx5e_encap_entry *e;
1214 struct mlx5_fc *counter;
1215 struct neigh_table *tbl;
1216 bool neigh_used = false;
1217 struct neighbour *n;
1219 if (m_neigh->family == AF_INET)
1221 #if IS_ENABLED(CONFIG_IPV6)
1222 else if (m_neigh->family == AF_INET6)
1228 list_for_each_entry(e, &nhe->encap_list, encap_list) {
1229 struct encap_flow_item *efi;
1230 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
1232 list_for_each_entry(efi, &e->flows, list) {
1233 flow = container_of(efi, struct mlx5e_tc_flow,
1234 encaps[efi->index]);
1235 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1236 counter = mlx5e_tc_get_counter(flow);
1237 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1238 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1249 nhe->reported_lastuse = jiffies;
1251 /* find the relevant neigh according to the cached device and
1254 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1258 neigh_event_send(n, NULL);
1263 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1264 struct mlx5e_tc_flow *flow, int out_index)
1266 struct list_head *next = flow->encaps[out_index].list.next;
1268 list_del(&flow->encaps[out_index].list);
1269 if (list_empty(next)) {
1270 struct mlx5e_encap_entry *e;
1272 e = list_entry(next, struct mlx5e_encap_entry, flows);
1273 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1275 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1276 mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
1278 hash_del_rcu(&e->encap_hlist);
1279 kfree(e->encap_header);
1284 static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1286 struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch;
1288 if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
1289 !(flow->flags & MLX5E_TC_FLOW_DUP))
1292 mutex_lock(&esw->offloads.peer_mutex);
1293 list_del(&flow->peer);
1294 mutex_unlock(&esw->offloads.peer_mutex);
1296 flow->flags &= ~MLX5E_TC_FLOW_DUP;
1298 mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow);
1299 kvfree(flow->peer_flow);
1300 flow->peer_flow = NULL;
1303 static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow)
1305 struct mlx5_core_dev *dev = flow->priv->mdev;
1306 struct mlx5_devcom *devcom = dev->priv.devcom;
1307 struct mlx5_eswitch *peer_esw;
1309 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1313 __mlx5e_tc_del_fdb_peer_flow(flow);
1314 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
1317 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1318 struct mlx5e_tc_flow *flow)
1320 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1321 mlx5e_tc_del_fdb_peer_flow(flow);
1322 mlx5e_tc_del_fdb_flow(priv, flow);
1324 mlx5e_tc_del_nic_flow(priv, flow);
1329 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1330 struct mlx5_flow_spec *spec,
1331 struct tc_cls_flower_offload *f,
1332 struct net_device *filter_dev, u8 *match_level)
1334 struct netlink_ext_ack *extack = f->common.extack;
1335 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1337 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1339 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
1340 struct flow_match_control enc_control;
1343 err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f,
1344 headers_c, headers_v, match_level);
1346 NL_SET_ERR_MSG_MOD(extack,
1347 "failed to parse tunnel attributes");
1351 flow_rule_match_enc_control(rule, &enc_control);
1353 if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1354 struct flow_match_ipv4_addrs match;
1356 flow_rule_match_enc_ipv4_addrs(rule, &match);
1357 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1358 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1359 ntohl(match.mask->src));
1360 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1361 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1362 ntohl(match.key->src));
1364 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1365 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1366 ntohl(match.mask->dst));
1367 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1368 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1369 ntohl(match.key->dst));
1371 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1372 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
1373 } else if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1374 struct flow_match_ipv6_addrs match;
1376 flow_rule_match_enc_ipv6_addrs(rule, &match);
1377 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1378 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1379 &match.mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1380 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1381 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1382 &match.key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1384 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1385 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1386 &match.mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1387 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1388 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1389 &match.key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1391 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1392 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
1395 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IP)) {
1396 struct flow_match_ip match;
1398 flow_rule_match_enc_ip(rule, &match);
1399 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
1400 match.mask->tos & 0x3);
1401 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
1402 match.key->tos & 0x3);
1404 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
1405 match.mask->tos >> 2);
1406 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
1407 match.key->tos >> 2);
1409 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
1411 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
1414 if (match.mask->ttl &&
1415 !MLX5_CAP_ESW_FLOWTABLE_FDB
1417 ft_field_support.outer_ipv4_ttl)) {
1418 NL_SET_ERR_MSG_MOD(extack,
1419 "Matching on TTL is not supported");
1425 /* Enforce DMAC when offloading incoming tunneled flows.
1426 * Flow counters require a match on the DMAC.
1428 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1429 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1430 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1431 dmac_47_16), priv->netdev->dev_addr);
1433 /* let software handle IP fragments */
1434 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1435 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1440 static int __parse_cls_flower(struct mlx5e_priv *priv,
1441 struct mlx5_flow_spec *spec,
1442 struct tc_cls_flower_offload *f,
1443 struct net_device *filter_dev,
1444 u8 *match_level, u8 *tunnel_match_level)
1446 struct netlink_ext_ack *extack = f->common.extack;
1447 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1449 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1451 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1453 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1455 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
1456 struct flow_dissector *dissector = rule->match.dissector;
1460 *match_level = MLX5_MATCH_NONE;
1462 if (dissector->used_keys &
1463 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1464 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1465 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1466 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1467 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1468 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1469 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1470 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1471 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1472 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1473 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1474 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1475 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1476 BIT(FLOW_DISSECTOR_KEY_TCP) |
1477 BIT(FLOW_DISSECTOR_KEY_IP) |
1478 BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
1479 NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
1480 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1481 dissector->used_keys);
1485 if ((flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1486 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1487 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1488 flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1489 struct flow_match_control match;
1491 flow_rule_match_enc_control(rule, &match);
1492 switch (match.key->addr_type) {
1493 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1494 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1495 if (parse_tunnel_attr(priv, spec, f, filter_dev, tunnel_match_level))
1502 /* In decap flow, header pointers should point to the inner
1503 * headers, outer header were already set by parse_tunnel_attr
1505 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1507 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1511 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1512 struct flow_match_basic match;
1514 flow_rule_match_basic(rule, &match);
1515 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1516 ntohs(match.mask->n_proto));
1517 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1518 ntohs(match.key->n_proto));
1520 if (match.mask->n_proto)
1521 *match_level = MLX5_MATCH_L2;
1524 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
1525 struct flow_match_vlan match;
1527 flow_rule_match_vlan(rule, &match);
1528 if (match.mask->vlan_id ||
1529 match.mask->vlan_priority ||
1530 match.mask->vlan_tpid) {
1531 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
1532 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1534 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1537 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1539 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1543 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid,
1544 match.mask->vlan_id);
1545 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid,
1546 match.key->vlan_id);
1548 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio,
1549 match.mask->vlan_priority);
1550 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio,
1551 match.key->vlan_priority);
1553 *match_level = MLX5_MATCH_L2;
1555 } else if (*match_level != MLX5_MATCH_NONE) {
1556 MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
1557 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1558 *match_level = MLX5_MATCH_L2;
1561 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
1562 struct flow_match_vlan match;
1564 flow_rule_match_vlan(rule, &match);
1565 if (match.mask->vlan_id ||
1566 match.mask->vlan_priority ||
1567 match.mask->vlan_tpid) {
1568 if (match.key->vlan_tpid == htons(ETH_P_8021AD)) {
1569 MLX5_SET(fte_match_set_misc, misc_c,
1570 outer_second_svlan_tag, 1);
1571 MLX5_SET(fte_match_set_misc, misc_v,
1572 outer_second_svlan_tag, 1);
1574 MLX5_SET(fte_match_set_misc, misc_c,
1575 outer_second_cvlan_tag, 1);
1576 MLX5_SET(fte_match_set_misc, misc_v,
1577 outer_second_cvlan_tag, 1);
1580 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1581 match.mask->vlan_id);
1582 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1583 match.key->vlan_id);
1584 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1585 match.mask->vlan_priority);
1586 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1587 match.key->vlan_priority);
1589 *match_level = MLX5_MATCH_L2;
1593 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1594 struct flow_match_eth_addrs match;
1596 flow_rule_match_eth_addrs(rule, &match);
1597 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1600 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1604 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1607 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1611 if (!is_zero_ether_addr(match.mask->src) ||
1612 !is_zero_ether_addr(match.mask->dst))
1613 *match_level = MLX5_MATCH_L2;
1616 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
1617 struct flow_match_control match;
1619 flow_rule_match_control(rule, &match);
1620 addr_type = match.key->addr_type;
1622 /* the HW doesn't support frag first/later */
1623 if (match.mask->flags & FLOW_DIS_FIRST_FRAG)
1626 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
1627 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1628 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1629 match.key->flags & FLOW_DIS_IS_FRAGMENT);
1631 /* the HW doesn't need L3 inline to match on frag=no */
1632 if (!(match.key->flags & FLOW_DIS_IS_FRAGMENT))
1633 *match_level = MLX5_MATCH_L2;
1634 /* *** L2 attributes parsing up to here *** */
1636 *match_level = MLX5_MATCH_L3;
1640 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
1641 struct flow_match_basic match;
1643 flow_rule_match_basic(rule, &match);
1644 ip_proto = match.key->ip_proto;
1646 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1647 match.mask->ip_proto);
1648 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1649 match.key->ip_proto);
1651 if (match.mask->ip_proto)
1652 *match_level = MLX5_MATCH_L3;
1655 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1656 struct flow_match_ipv4_addrs match;
1658 flow_rule_match_ipv4_addrs(rule, &match);
1659 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1660 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1661 &match.mask->src, sizeof(match.mask->src));
1662 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1663 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1664 &match.key->src, sizeof(match.key->src));
1665 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1666 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1667 &match.mask->dst, sizeof(match.mask->dst));
1668 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1669 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1670 &match.key->dst, sizeof(match.key->dst));
1672 if (match.mask->src || match.mask->dst)
1673 *match_level = MLX5_MATCH_L3;
1676 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1677 struct flow_match_ipv6_addrs match;
1679 flow_rule_match_ipv6_addrs(rule, &match);
1680 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1681 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1682 &match.mask->src, sizeof(match.mask->src));
1683 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1684 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1685 &match.key->src, sizeof(match.key->src));
1687 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1688 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1689 &match.mask->dst, sizeof(match.mask->dst));
1690 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1691 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1692 &match.key->dst, sizeof(match.key->dst));
1694 if (ipv6_addr_type(&match.mask->src) != IPV6_ADDR_ANY ||
1695 ipv6_addr_type(&match.mask->dst) != IPV6_ADDR_ANY)
1696 *match_level = MLX5_MATCH_L3;
1699 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
1700 struct flow_match_ip match;
1702 flow_rule_match_ip(rule, &match);
1703 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn,
1704 match.mask->tos & 0x3);
1705 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn,
1706 match.key->tos & 0x3);
1708 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp,
1709 match.mask->tos >> 2);
1710 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp,
1711 match.key->tos >> 2);
1713 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit,
1715 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit,
1718 if (match.mask->ttl &&
1719 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1720 ft_field_support.outer_ipv4_ttl)) {
1721 NL_SET_ERR_MSG_MOD(extack,
1722 "Matching on TTL is not supported");
1726 if (match.mask->tos || match.mask->ttl)
1727 *match_level = MLX5_MATCH_L3;
1730 /* *** L3 attributes parsing up to here *** */
1732 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
1733 struct flow_match_ports match;
1735 flow_rule_match_ports(rule, &match);
1738 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1739 tcp_sport, ntohs(match.mask->src));
1740 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1741 tcp_sport, ntohs(match.key->src));
1743 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1744 tcp_dport, ntohs(match.mask->dst));
1745 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1746 tcp_dport, ntohs(match.key->dst));
1750 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1751 udp_sport, ntohs(match.mask->src));
1752 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1753 udp_sport, ntohs(match.key->src));
1755 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1756 udp_dport, ntohs(match.mask->dst));
1757 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1758 udp_dport, ntohs(match.key->dst));
1761 NL_SET_ERR_MSG_MOD(extack,
1762 "Only UDP and TCP transports are supported for L4 matching");
1763 netdev_err(priv->netdev,
1764 "Only UDP and TCP transport are supported\n");
1768 if (match.mask->src || match.mask->dst)
1769 *match_level = MLX5_MATCH_L4;
1772 if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) {
1773 struct flow_match_tcp match;
1775 flow_rule_match_tcp(rule, &match);
1776 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1777 ntohs(match.mask->flags));
1778 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1779 ntohs(match.key->flags));
1781 if (match.mask->flags)
1782 *match_level = MLX5_MATCH_L4;
1788 static int parse_cls_flower(struct mlx5e_priv *priv,
1789 struct mlx5e_tc_flow *flow,
1790 struct mlx5_flow_spec *spec,
1791 struct tc_cls_flower_offload *f,
1792 struct net_device *filter_dev)
1794 struct netlink_ext_ack *extack = f->common.extack;
1795 struct mlx5_core_dev *dev = priv->mdev;
1796 struct mlx5_eswitch *esw = dev->priv.eswitch;
1797 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1798 u8 match_level, tunnel_match_level = MLX5_MATCH_NONE;
1799 struct mlx5_eswitch_rep *rep;
1802 err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level, &tunnel_match_level);
1804 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1806 if (rep->vport != MLX5_VPORT_UPLINK &&
1807 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1808 esw->offloads.inline_mode < match_level)) {
1809 NL_SET_ERR_MSG_MOD(extack,
1810 "Flow is not offloaded due to min inline setting");
1811 netdev_warn(priv->netdev,
1812 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1813 match_level, esw->offloads.inline_mode);
1818 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
1819 flow->esw_attr->match_level = match_level;
1820 flow->esw_attr->tunnel_match_level = tunnel_match_level;
1822 flow->nic_attr->match_level = match_level;
1828 struct pedit_headers {
1836 struct pedit_headers_action {
1837 struct pedit_headers vals;
1838 struct pedit_headers masks;
1842 static int pedit_header_offsets[] = {
1843 [FLOW_ACT_MANGLE_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1844 [FLOW_ACT_MANGLE_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1845 [FLOW_ACT_MANGLE_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1846 [FLOW_ACT_MANGLE_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1847 [FLOW_ACT_MANGLE_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1850 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1852 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1853 struct pedit_headers_action *hdrs)
1855 u32 *curr_pmask, *curr_pval;
1857 curr_pmask = (u32 *)(pedit_header(&hdrs->masks, hdr_type) + offset);
1858 curr_pval = (u32 *)(pedit_header(&hdrs->vals, hdr_type) + offset);
1860 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1863 *curr_pmask |= mask;
1864 *curr_pval |= (val & mask);
1872 struct mlx5_fields {
1878 #define OFFLOAD(fw_field, size, field, off) \
1879 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1881 static struct mlx5_fields fields[] = {
1882 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1883 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1884 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1885 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1886 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1888 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1889 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1890 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1892 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1893 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1894 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1895 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1896 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1897 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1898 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1899 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
1900 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1902 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1903 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1904 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1906 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1907 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1910 /* On input attr->max_mod_hdr_actions tells how many HW actions can be parsed at
1911 * max from the SW pedit action. On success, attr->num_mod_hdr_actions
1912 * says how many HW actions were actually parsed.
1914 static int offload_pedit_fields(struct pedit_headers_action *hdrs,
1915 struct mlx5e_tc_flow_parse_attr *parse_attr,
1916 struct netlink_ext_ack *extack)
1918 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1919 int i, action_size, nactions, max_actions, first, last, next_z;
1920 void *s_masks_p, *a_masks_p, *vals_p;
1921 struct mlx5_fields *f;
1922 u8 cmd, field_bsize;
1929 set_masks = &hdrs[0].masks;
1930 add_masks = &hdrs[1].masks;
1931 set_vals = &hdrs[0].vals;
1932 add_vals = &hdrs[1].vals;
1934 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1935 action = parse_attr->mod_hdr_actions +
1936 parse_attr->num_mod_hdr_actions * action_size;
1938 max_actions = parse_attr->max_mod_hdr_actions;
1939 nactions = parse_attr->num_mod_hdr_actions;
1941 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1943 /* avoid seeing bits set from previous iterations */
1947 s_masks_p = (void *)set_masks + f->offset;
1948 a_masks_p = (void *)add_masks + f->offset;
1950 memcpy(&s_mask, s_masks_p, f->size);
1951 memcpy(&a_mask, a_masks_p, f->size);
1953 if (!s_mask && !a_mask) /* nothing to offload here */
1956 if (s_mask && a_mask) {
1957 NL_SET_ERR_MSG_MOD(extack,
1958 "can't set and add to the same HW field");
1959 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1963 if (nactions == max_actions) {
1964 NL_SET_ERR_MSG_MOD(extack,
1965 "too many pedit actions, can't offload");
1966 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1971 cmd = MLX5_ACTION_TYPE_SET;
1973 vals_p = (void *)set_vals + f->offset;
1974 /* clear to denote we consumed this field */
1975 memset(s_masks_p, 0, f->size);
1977 cmd = MLX5_ACTION_TYPE_ADD;
1979 vals_p = (void *)add_vals + f->offset;
1980 /* clear to denote we consumed this field */
1981 memset(a_masks_p, 0, f->size);
1984 field_bsize = f->size * BITS_PER_BYTE;
1986 if (field_bsize == 32) {
1987 mask_be32 = *(__be32 *)&mask;
1988 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1989 } else if (field_bsize == 16) {
1990 mask_be16 = *(__be16 *)&mask;
1991 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1994 first = find_first_bit(&mask, field_bsize);
1995 next_z = find_next_zero_bit(&mask, field_bsize, first);
1996 last = find_last_bit(&mask, field_bsize);
1997 if (first < next_z && next_z < last) {
1998 NL_SET_ERR_MSG_MOD(extack,
1999 "rewrite of few sub-fields isn't supported");
2000 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
2005 MLX5_SET(set_action_in, action, action_type, cmd);
2006 MLX5_SET(set_action_in, action, field, f->field);
2008 if (cmd == MLX5_ACTION_TYPE_SET) {
2009 MLX5_SET(set_action_in, action, offset, first);
2010 /* length is num of bits to be written, zero means length of 32 */
2011 MLX5_SET(set_action_in, action, length, (last - first + 1));
2014 if (field_bsize == 32)
2015 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
2016 else if (field_bsize == 16)
2017 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
2018 else if (field_bsize == 8)
2019 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
2021 action += action_size;
2025 parse_attr->num_mod_hdr_actions = nactions;
2029 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
2030 struct pedit_headers_action *hdrs,
2032 struct mlx5e_tc_flow_parse_attr *parse_attr)
2034 int nkeys, action_size, max_actions;
2036 nkeys = hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits +
2037 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits;
2038 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
2040 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
2041 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
2042 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
2043 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
2045 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
2046 max_actions = min(max_actions, nkeys * 16);
2048 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
2049 if (!parse_attr->mod_hdr_actions)
2052 parse_attr->max_mod_hdr_actions = max_actions;
2056 static const struct pedit_headers zero_masks = {};
2058 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
2059 const struct flow_action_entry *act, int namespace,
2060 struct mlx5e_tc_flow_parse_attr *parse_attr,
2061 struct pedit_headers_action *hdrs,
2062 struct netlink_ext_ack *extack)
2064 u8 cmd = (act->id == FLOW_ACTION_MANGLE) ? 0 : 1;
2065 int err = -EOPNOTSUPP;
2066 u32 mask, val, offset;
2069 htype = act->mangle.htype;
2070 err = -EOPNOTSUPP; /* can't be all optimistic */
2072 if (htype == FLOW_ACT_MANGLE_UNSPEC) {
2073 NL_SET_ERR_MSG_MOD(extack, "legacy pedit isn't offloaded");
2077 mask = act->mangle.mask;
2078 val = act->mangle.val;
2079 offset = act->mangle.offset;
2081 err = set_pedit_val(htype, ~mask, val, offset, &hdrs[cmd]);
2092 static int alloc_tc_pedit_action(struct mlx5e_priv *priv, int namespace,
2093 struct mlx5e_tc_flow_parse_attr *parse_attr,
2094 struct pedit_headers_action *hdrs,
2095 struct netlink_ext_ack *extack)
2097 struct pedit_headers *cmd_masks;
2101 if (!parse_attr->mod_hdr_actions) {
2102 err = alloc_mod_hdr_actions(priv, hdrs, namespace, parse_attr);
2107 err = offload_pedit_fields(hdrs, parse_attr, extack);
2109 goto out_dealloc_parsed_actions;
2111 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
2112 cmd_masks = &hdrs[cmd].masks;
2113 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
2114 NL_SET_ERR_MSG_MOD(extack,
2115 "attempt to offload an unsupported field");
2116 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
2117 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
2118 16, 1, cmd_masks, sizeof(zero_masks), true);
2120 goto out_dealloc_parsed_actions;
2126 out_dealloc_parsed_actions:
2127 kfree(parse_attr->mod_hdr_actions);
2132 static bool csum_offload_supported(struct mlx5e_priv *priv,
2135 struct netlink_ext_ack *extack)
2137 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
2138 TCA_CSUM_UPDATE_FLAG_UDP;
2140 /* The HW recalcs checksums only if re-writing headers */
2141 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
2142 NL_SET_ERR_MSG_MOD(extack,
2143 "TC csum action is only offloaded with pedit");
2144 netdev_warn(priv->netdev,
2145 "TC csum action is only offloaded with pedit\n");
2149 if (update_flags & ~prot_flags) {
2150 NL_SET_ERR_MSG_MOD(extack,
2151 "can't offload TC csum action for some header/s");
2152 netdev_warn(priv->netdev,
2153 "can't offload TC csum action for some header/s - flags %#x\n",
2161 struct ip_ttl_word {
2167 struct ipv6_hoplimit_word {
2173 static bool is_action_keys_supported(const struct flow_action_entry *act)
2178 htype = act->mangle.htype;
2179 offset = act->mangle.offset;
2180 mask = ~act->mangle.mask;
2181 /* For IPv4 & IPv6 header check 4 byte word,
2182 * to determine that modified fields
2183 * are NOT ttl & hop_limit only.
2185 if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP4) {
2186 struct ip_ttl_word *ttl_word =
2187 (struct ip_ttl_word *)&mask;
2189 if (offset != offsetof(struct iphdr, ttl) ||
2190 ttl_word->protocol ||
2194 } else if (htype == FLOW_ACT_MANGLE_HDR_TYPE_IP6) {
2195 struct ipv6_hoplimit_word *hoplimit_word =
2196 (struct ipv6_hoplimit_word *)&mask;
2198 if (offset != offsetof(struct ipv6hdr, payload_len) ||
2199 hoplimit_word->payload_len ||
2200 hoplimit_word->nexthdr) {
2207 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
2208 struct flow_action *flow_action,
2210 struct netlink_ext_ack *extack)
2212 const struct flow_action_entry *act;
2213 bool modify_ip_header;
2219 if (actions & MLX5_FLOW_CONTEXT_ACTION_DECAP)
2220 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, inner_headers);
2222 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
2224 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
2226 /* for non-IP we only re-write MACs, so we're okay */
2227 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
2230 modify_ip_header = false;
2231 flow_action_for_each(i, act, flow_action) {
2232 if (act->id != FLOW_ACTION_MANGLE &&
2233 act->id != FLOW_ACTION_ADD)
2236 if (is_action_keys_supported(act)) {
2237 modify_ip_header = true;
2242 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
2243 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
2244 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
2245 NL_SET_ERR_MSG_MOD(extack,
2246 "can't offload re-write of non TCP/UDP");
2247 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
2255 static bool actions_match_supported(struct mlx5e_priv *priv,
2256 struct flow_action *flow_action,
2257 struct mlx5e_tc_flow_parse_attr *parse_attr,
2258 struct mlx5e_tc_flow *flow,
2259 struct netlink_ext_ack *extack)
2263 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
2264 actions = flow->esw_attr->action;
2266 actions = flow->nic_attr->action;
2268 if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
2269 !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
2272 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2273 return modify_header_match_supported(&parse_attr->spec,
2274 flow_action, actions,
2280 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2282 struct mlx5_core_dev *fmdev, *pmdev;
2283 u64 fsystem_guid, psystem_guid;
2286 pmdev = peer_priv->mdev;
2288 fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
2289 psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
2291 return (fsystem_guid == psystem_guid);
2294 static int parse_tc_nic_actions(struct mlx5e_priv *priv,
2295 struct flow_action *flow_action,
2296 struct mlx5e_tc_flow_parse_attr *parse_attr,
2297 struct mlx5e_tc_flow *flow,
2298 struct netlink_ext_ack *extack)
2300 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
2301 struct pedit_headers_action hdrs[2] = {};
2302 const struct flow_action_entry *act;
2306 if (!flow_action_has_entries(flow_action))
2309 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2311 flow_action_for_each(i, act, flow_action) {
2313 case FLOW_ACTION_DROP:
2314 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2315 if (MLX5_CAP_FLOWTABLE(priv->mdev,
2316 flow_table_properties_nic_receive.flow_counter))
2317 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2319 case FLOW_ACTION_MANGLE:
2320 case FLOW_ACTION_ADD:
2321 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_KERNEL,
2322 parse_attr, hdrs, extack);
2326 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2327 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2329 case FLOW_ACTION_CSUM:
2330 if (csum_offload_supported(priv, action,
2336 case FLOW_ACTION_REDIRECT: {
2337 struct net_device *peer_dev = act->dev;
2339 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2340 same_hw_devs(priv, netdev_priv(peer_dev))) {
2341 parse_attr->mirred_ifindex[0] = peer_dev->ifindex;
2342 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
2343 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2344 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2346 NL_SET_ERR_MSG_MOD(extack,
2347 "device is not on same HW, can't offload");
2348 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2354 case FLOW_ACTION_MARK: {
2355 u32 mark = act->mark;
2357 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2358 NL_SET_ERR_MSG_MOD(extack,
2359 "Bad flow mark - only 16 bit is supported");
2363 attr->flow_tag = mark;
2364 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2372 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2373 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2374 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_KERNEL,
2375 parse_attr, hdrs, extack);
2380 attr->action = action;
2381 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
2388 struct ip_tunnel_key *ip_tun_key;
2392 static inline int cmp_encap_info(struct encap_key *a,
2393 struct encap_key *b)
2395 return memcmp(a->ip_tun_key, b->ip_tun_key, sizeof(*a->ip_tun_key)) ||
2396 a->tunnel_type != b->tunnel_type;
2399 static inline int hash_encap_info(struct encap_key *key)
2401 return jhash(key->ip_tun_key, sizeof(*key->ip_tun_key),
2406 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2407 struct net_device *peer_netdev)
2409 struct mlx5e_priv *peer_priv;
2411 peer_priv = netdev_priv(peer_netdev);
2413 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2414 mlx5e_eswitch_rep(priv->netdev) &&
2415 mlx5e_eswitch_rep(peer_netdev) &&
2416 same_hw_devs(priv, peer_priv));
2421 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2422 struct mlx5e_tc_flow *flow,
2423 struct net_device *mirred_dev,
2425 struct netlink_ext_ack *extack,
2426 struct net_device **encap_dev,
2429 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2430 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2431 struct mlx5e_tc_flow_parse_attr *parse_attr;
2432 struct ip_tunnel_info *tun_info;
2433 struct encap_key key, e_key;
2434 struct mlx5e_encap_entry *e;
2435 unsigned short family;
2440 parse_attr = attr->parse_attr;
2441 tun_info = &parse_attr->tun_info[out_index];
2442 family = ip_tunnel_info_af(tun_info);
2443 key.ip_tun_key = &tun_info->key;
2444 key.tunnel_type = mlx5e_tc_tun_get_type(mirred_dev);
2446 hash_key = hash_encap_info(&key);
2448 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2449 encap_hlist, hash_key) {
2450 e_key.ip_tun_key = &e->tun_info.key;
2451 e_key.tunnel_type = e->tunnel_type;
2452 if (!cmp_encap_info(&e_key, &key)) {
2458 /* must verify if encap is valid or not */
2462 e = kzalloc(sizeof(*e), GFP_KERNEL);
2466 e->tun_info = *tun_info;
2467 err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack);
2471 INIT_LIST_HEAD(&e->flows);
2473 if (family == AF_INET)
2474 err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e);
2475 else if (family == AF_INET6)
2476 err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e);
2481 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2484 list_add(&flow->encaps[out_index].list, &e->flows);
2485 flow->encaps[out_index].index = out_index;
2486 *encap_dev = e->out_dev;
2487 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
2488 attr->dests[out_index].encap_id = e->encap_id;
2489 attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID;
2490 *encap_valid = true;
2492 *encap_valid = false;
2502 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
2503 const struct flow_action_entry *act,
2504 struct mlx5_esw_flow_attr *attr,
2507 u8 vlan_idx = attr->total_vlan;
2509 if (vlan_idx >= MLX5_FS_VLAN_DEPTH)
2513 case FLOW_ACTION_VLAN_POP:
2515 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2516 MLX5_FS_VLAN_DEPTH))
2519 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2;
2521 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2524 case FLOW_ACTION_VLAN_PUSH:
2525 attr->vlan_vid[vlan_idx] = act->vlan.vid;
2526 attr->vlan_prio[vlan_idx] = act->vlan.prio;
2527 attr->vlan_proto[vlan_idx] = act->vlan.proto;
2528 if (!attr->vlan_proto[vlan_idx])
2529 attr->vlan_proto[vlan_idx] = htons(ETH_P_8021Q);
2532 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev,
2533 MLX5_FS_VLAN_DEPTH))
2536 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2;
2538 if (!mlx5_eswitch_vlan_actions_supported(priv->mdev, 1) &&
2539 (act->vlan.proto != htons(ETH_P_8021Q) ||
2543 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2547 /* action is FLOW_ACT_VLAN_MANGLE */
2551 attr->total_vlan = vlan_idx + 1;
2556 static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
2557 struct flow_action *flow_action,
2558 struct mlx5e_tc_flow_parse_attr *parse_attr,
2559 struct mlx5e_tc_flow *flow,
2560 struct netlink_ext_ack *extack)
2562 struct pedit_headers_action hdrs[2] = {};
2563 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2564 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2565 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2566 const struct ip_tunnel_info *info = NULL;
2567 const struct flow_action_entry *act;
2572 if (!flow_action_has_entries(flow_action))
2575 attr->in_rep = rpriv->rep;
2576 attr->in_mdev = priv->mdev;
2578 flow_action_for_each(i, act, flow_action) {
2580 case FLOW_ACTION_DROP:
2581 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2582 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2584 case FLOW_ACTION_MANGLE:
2585 case FLOW_ACTION_ADD:
2586 err = parse_tc_pedit_action(priv, act, MLX5_FLOW_NAMESPACE_FDB,
2587 parse_attr, hdrs, extack);
2591 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2592 attr->split_count = attr->out_count;
2594 case FLOW_ACTION_CSUM:
2595 if (csum_offload_supported(priv, action,
2596 act->csum_flags, extack))
2600 case FLOW_ACTION_REDIRECT:
2601 case FLOW_ACTION_MIRRED: {
2602 struct mlx5e_priv *out_priv;
2603 struct net_device *out_dev;
2607 /* out_dev is NULL when filters with
2608 * non-existing mirred device are replayed to
2614 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
2615 NL_SET_ERR_MSG_MOD(extack,
2616 "can't support more output ports, can't offload forwarding");
2617 pr_err("can't support more than %d output ports, can't offload forwarding\n",
2622 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2623 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2624 if (netdev_port_same_parent_id(priv->netdev,
2626 is_merged_eswitch_dev(priv, out_dev)) {
2627 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2628 struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
2629 struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev);
2632 netif_is_lag_master(uplink_upper) &&
2633 uplink_upper == out_dev)
2634 out_dev = uplink_dev;
2636 if (!mlx5e_eswitch_rep(out_dev))
2639 out_priv = netdev_priv(out_dev);
2640 rpriv = out_priv->ppriv;
2641 attr->dests[attr->out_count].rep = rpriv->rep;
2642 attr->dests[attr->out_count].mdev = out_priv->mdev;
2645 parse_attr->mirred_ifindex[attr->out_count] =
2647 parse_attr->tun_info[attr->out_count] = *info;
2649 attr->parse_attr = parse_attr;
2650 attr->dests[attr->out_count].flags |=
2651 MLX5_ESW_DEST_ENCAP;
2653 /* attr->dests[].rep is resolved when we
2656 } else if (parse_attr->filter_dev != priv->netdev) {
2657 /* All mlx5 devices are called to configure
2658 * high level device filters. Therefore, the
2659 * *attempt* to install a filter on invalid
2660 * eswitch should not trigger an explicit error
2664 NL_SET_ERR_MSG_MOD(extack,
2665 "devices are not on same switch HW, can't offload forwarding");
2666 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2667 priv->netdev->name, out_dev->name);
2672 case FLOW_ACTION_TUNNEL_ENCAP:
2680 case FLOW_ACTION_VLAN_PUSH:
2681 case FLOW_ACTION_VLAN_POP:
2682 err = parse_tc_vlan_action(priv, act, attr, &action);
2686 attr->split_count = attr->out_count;
2688 case FLOW_ACTION_TUNNEL_DECAP:
2689 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2691 case FLOW_ACTION_GOTO: {
2692 u32 dest_chain = act->chain_index;
2693 u32 max_chain = mlx5_eswitch_get_chain_range(esw);
2695 if (dest_chain <= attr->chain) {
2696 NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
2699 if (dest_chain > max_chain) {
2700 NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
2703 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2704 attr->dest_chain = dest_chain;
2712 if (hdrs[TCA_PEDIT_KEY_EX_CMD_SET].pedits ||
2713 hdrs[TCA_PEDIT_KEY_EX_CMD_ADD].pedits) {
2714 err = alloc_tc_pedit_action(priv, MLX5_FLOW_NAMESPACE_FDB,
2715 parse_attr, hdrs, extack);
2720 attr->action = action;
2721 if (!actions_match_supported(priv, flow_action, parse_attr, flow, extack))
2724 if (attr->dest_chain) {
2725 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
2726 NL_SET_ERR_MSG(extack, "Mirroring goto chain rules isn't supported");
2729 attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2732 if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
2733 NL_SET_ERR_MSG_MOD(extack,
2734 "current firmware doesn't support split rule for port mirroring");
2735 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
2742 static void get_flags(int flags, u16 *flow_flags)
2744 u16 __flow_flags = 0;
2746 if (flags & MLX5E_TC_INGRESS)
2747 __flow_flags |= MLX5E_TC_FLOW_INGRESS;
2748 if (flags & MLX5E_TC_EGRESS)
2749 __flow_flags |= MLX5E_TC_FLOW_EGRESS;
2751 if (flags & MLX5E_TC_ESW_OFFLOAD)
2752 __flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2753 if (flags & MLX5E_TC_NIC_OFFLOAD)
2754 __flow_flags |= MLX5E_TC_FLOW_NIC;
2756 *flow_flags = __flow_flags;
2759 static const struct rhashtable_params tc_ht_params = {
2760 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2761 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2762 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2763 .automatic_shrinking = true,
2766 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags)
2768 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2769 struct mlx5e_rep_priv *uplink_rpriv;
2771 if (flags & MLX5E_TC_ESW_OFFLOAD) {
2772 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2773 return &uplink_rpriv->uplink_priv.tc_ht;
2774 } else /* NIC offload */
2775 return &priv->fs.tc.ht;
2778 static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow)
2780 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2781 bool is_rep_ingress = attr->in_rep->vport != MLX5_VPORT_UPLINK &&
2782 flow->flags & MLX5E_TC_FLOW_INGRESS;
2783 bool act_is_encap = !!(attr->action &
2784 MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT);
2785 bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom,
2786 MLX5_DEVCOM_ESW_OFFLOADS);
2791 if ((mlx5_lag_is_sriov(attr->in_mdev) ||
2792 mlx5_lag_is_multipath(attr->in_mdev)) &&
2793 (is_rep_ingress || act_is_encap))
2800 mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
2801 struct tc_cls_flower_offload *f, u16 flow_flags,
2802 struct mlx5e_tc_flow_parse_attr **__parse_attr,
2803 struct mlx5e_tc_flow **__flow)
2805 struct mlx5e_tc_flow_parse_attr *parse_attr;
2806 struct mlx5e_tc_flow *flow;
2809 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2810 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2811 if (!parse_attr || !flow) {
2816 flow->cookie = f->cookie;
2817 flow->flags = flow_flags;
2821 *__parse_attr = parse_attr;
2832 mlx5e_flow_esw_attr_init(struct mlx5_esw_flow_attr *esw_attr,
2833 struct mlx5e_priv *priv,
2834 struct mlx5e_tc_flow_parse_attr *parse_attr,
2835 struct tc_cls_flower_offload *f,
2836 struct mlx5_eswitch_rep *in_rep,
2837 struct mlx5_core_dev *in_mdev)
2839 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2841 esw_attr->parse_attr = parse_attr;
2842 esw_attr->chain = f->common.chain_index;
2843 esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
2845 esw_attr->in_rep = in_rep;
2846 esw_attr->in_mdev = in_mdev;
2848 if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) ==
2849 MLX5_COUNTER_SOURCE_ESWITCH)
2850 esw_attr->counter_dev = in_mdev;
2852 esw_attr->counter_dev = priv->mdev;
2855 static struct mlx5e_tc_flow *
2856 __mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2857 struct tc_cls_flower_offload *f,
2859 struct net_device *filter_dev,
2860 struct mlx5_eswitch_rep *in_rep,
2861 struct mlx5_core_dev *in_mdev)
2863 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
2864 struct netlink_ext_ack *extack = f->common.extack;
2865 struct mlx5e_tc_flow_parse_attr *parse_attr;
2866 struct mlx5e_tc_flow *flow;
2869 flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2870 attr_size = sizeof(struct mlx5_esw_flow_attr);
2871 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
2872 &parse_attr, &flow);
2876 parse_attr->filter_dev = filter_dev;
2877 mlx5e_flow_esw_attr_init(flow->esw_attr,
2879 f, in_rep, in_mdev);
2881 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
2886 err = parse_tc_fdb_actions(priv, &rule->action, parse_attr, flow, extack);
2890 err = mlx5e_tc_add_fdb_flow(priv, flow, extack);
2892 if (!(err == -ENETUNREACH && mlx5_lag_is_multipath(in_mdev)))
2895 add_unready_flow(flow);
2904 return ERR_PTR(err);
2907 static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f,
2908 struct mlx5e_tc_flow *flow,
2911 struct mlx5e_priv *priv = flow->priv, *peer_priv;
2912 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw;
2913 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
2914 struct mlx5e_tc_flow_parse_attr *parse_attr;
2915 struct mlx5e_rep_priv *peer_urpriv;
2916 struct mlx5e_tc_flow *peer_flow;
2917 struct mlx5_core_dev *in_mdev;
2920 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2924 peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH);
2925 peer_priv = netdev_priv(peer_urpriv->netdev);
2927 /* in_mdev is assigned of which the packet originated from.
2928 * So packets redirected to uplink use the same mdev of the
2929 * original flow and packets redirected from uplink use the
2932 if (flow->esw_attr->in_rep->vport == MLX5_VPORT_UPLINK)
2933 in_mdev = peer_priv->mdev;
2935 in_mdev = priv->mdev;
2937 parse_attr = flow->esw_attr->parse_attr;
2938 peer_flow = __mlx5e_add_fdb_flow(peer_priv, f, flow_flags,
2939 parse_attr->filter_dev,
2940 flow->esw_attr->in_rep, in_mdev);
2941 if (IS_ERR(peer_flow)) {
2942 err = PTR_ERR(peer_flow);
2946 flow->peer_flow = peer_flow;
2947 flow->flags |= MLX5E_TC_FLOW_DUP;
2948 mutex_lock(&esw->offloads.peer_mutex);
2949 list_add_tail(&flow->peer, &esw->offloads.peer_flows);
2950 mutex_unlock(&esw->offloads.peer_mutex);
2953 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
2958 mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
2959 struct tc_cls_flower_offload *f,
2961 struct net_device *filter_dev,
2962 struct mlx5e_tc_flow **__flow)
2964 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2965 struct mlx5_eswitch_rep *in_rep = rpriv->rep;
2966 struct mlx5_core_dev *in_mdev = priv->mdev;
2967 struct mlx5e_tc_flow *flow;
2970 flow = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep,
2973 return PTR_ERR(flow);
2975 if (is_peer_flow_needed(flow)) {
2976 err = mlx5e_tc_add_fdb_peer_flow(f, flow, flow_flags);
2978 mlx5e_tc_del_fdb_flow(priv, flow);
2992 mlx5e_add_nic_flow(struct mlx5e_priv *priv,
2993 struct tc_cls_flower_offload *f,
2995 struct net_device *filter_dev,
2996 struct mlx5e_tc_flow **__flow)
2998 struct flow_rule *rule = tc_cls_flower_offload_flow_rule(f);
2999 struct netlink_ext_ack *extack = f->common.extack;
3000 struct mlx5e_tc_flow_parse_attr *parse_attr;
3001 struct mlx5e_tc_flow *flow;
3004 /* multi-chain not supported for NIC rules */
3005 if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
3008 flow_flags |= MLX5E_TC_FLOW_NIC;
3009 attr_size = sizeof(struct mlx5_nic_flow_attr);
3010 err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
3011 &parse_attr, &flow);
3015 parse_attr->filter_dev = filter_dev;
3016 err = parse_cls_flower(flow->priv, flow, &parse_attr->spec,
3021 err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack);
3025 err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
3029 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
3043 mlx5e_tc_add_flow(struct mlx5e_priv *priv,
3044 struct tc_cls_flower_offload *f,
3046 struct net_device *filter_dev,
3047 struct mlx5e_tc_flow **flow)
3049 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
3053 get_flags(flags, &flow_flags);
3055 if (!tc_can_offload_extack(priv->netdev, f->common.extack))
3058 if (esw && esw->mode == SRIOV_OFFLOADS)
3059 err = mlx5e_add_fdb_flow(priv, f, flow_flags,
3062 err = mlx5e_add_nic_flow(priv, f, flow_flags,
3068 int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
3069 struct tc_cls_flower_offload *f, int flags)
3071 struct netlink_ext_ack *extack = f->common.extack;
3072 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3073 struct mlx5e_tc_flow *flow;
3076 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3078 NL_SET_ERR_MSG_MOD(extack,
3079 "flow cookie already exists, ignoring");
3080 netdev_warn_once(priv->netdev,
3081 "flow cookie %lx already exists, ignoring\n",
3086 err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow);
3090 err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
3097 mlx5e_tc_del_flow(priv, flow);
3103 #define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
3104 #define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
3106 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
3108 if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
3114 int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
3115 struct tc_cls_flower_offload *f, int flags)
3117 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3118 struct mlx5e_tc_flow *flow;
3120 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3121 if (!flow || !same_flow_direction(flow, flags))
3124 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
3126 mlx5e_tc_del_flow(priv, flow);
3133 int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv,
3134 struct tc_cls_flower_offload *f, int flags)
3136 struct mlx5_devcom *devcom = priv->mdev->priv.devcom;
3137 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3138 struct mlx5_eswitch *peer_esw;
3139 struct mlx5e_tc_flow *flow;
3140 struct mlx5_fc *counter;
3145 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
3146 if (!flow || !same_flow_direction(flow, flags))
3149 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
3150 counter = mlx5e_tc_get_counter(flow);
3154 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
3157 /* Under multipath it's possible for one rule to be currently
3158 * un-offloaded while the other rule is offloaded.
3160 peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3164 if ((flow->flags & MLX5E_TC_FLOW_DUP) &&
3165 (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) {
3170 counter = mlx5e_tc_get_counter(flow->peer_flow);
3172 goto no_peer_counter;
3173 mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2);
3176 packets += packets2;
3177 lastuse = max_t(u64, lastuse, lastuse2);
3181 mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS);
3183 flow_stats_update(&f->stats, bytes, packets, lastuse);
3188 static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
3189 struct mlx5e_priv *peer_priv)
3191 struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
3192 struct mlx5e_hairpin_entry *hpe;
3196 if (!same_hw_devs(priv, peer_priv))
3199 peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
3201 hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
3202 if (hpe->peer_vhca_id == peer_vhca_id)
3203 hpe->hp->pair->peer_gone = true;
3207 static int mlx5e_tc_netdev_event(struct notifier_block *this,
3208 unsigned long event, void *ptr)
3210 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
3211 struct mlx5e_flow_steering *fs;
3212 struct mlx5e_priv *peer_priv;
3213 struct mlx5e_tc_table *tc;
3214 struct mlx5e_priv *priv;
3216 if (ndev->netdev_ops != &mlx5e_netdev_ops ||
3217 event != NETDEV_UNREGISTER ||
3218 ndev->reg_state == NETREG_REGISTERED)
3221 tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
3222 fs = container_of(tc, struct mlx5e_flow_steering, tc);
3223 priv = container_of(fs, struct mlx5e_priv, fs);
3224 peer_priv = netdev_priv(ndev);
3225 if (priv == peer_priv ||
3226 !(priv->netdev->features & NETIF_F_HW_TC))
3229 mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
3234 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
3236 struct mlx5e_tc_table *tc = &priv->fs.tc;
3239 hash_init(tc->mod_hdr_tbl);
3240 hash_init(tc->hairpin_tbl);
3242 err = rhashtable_init(&tc->ht, &tc_ht_params);
3246 tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
3247 if (register_netdevice_notifier(&tc->netdevice_nb)) {
3248 tc->netdevice_nb.notifier_call = NULL;
3249 mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
3255 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
3257 struct mlx5e_tc_flow *flow = ptr;
3258 struct mlx5e_priv *priv = flow->priv;
3260 mlx5e_tc_del_flow(priv, flow);
3264 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
3266 struct mlx5e_tc_table *tc = &priv->fs.tc;
3268 if (tc->netdevice_nb.notifier_call)
3269 unregister_netdevice_notifier(&tc->netdevice_nb);
3271 rhashtable_destroy(&tc->ht);
3273 if (!IS_ERR_OR_NULL(tc->t)) {
3274 mlx5_destroy_flow_table(tc->t);
3279 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
3281 return rhashtable_init(tc_ht, &tc_ht_params);
3284 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
3286 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
3289 int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags)
3291 struct rhashtable *tc_ht = get_tc_ht(priv, flags);
3293 return atomic_read(&tc_ht->nelems);
3296 void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw)
3298 struct mlx5e_tc_flow *flow, *tmp;
3300 list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
3301 __mlx5e_tc_del_fdb_peer_flow(flow);
3304 void mlx5e_tc_reoffload_flows_work(struct work_struct *work)
3306 struct mlx5_rep_uplink_priv *rpriv =
3307 container_of(work, struct mlx5_rep_uplink_priv,
3308 reoffload_flows_work);
3309 struct mlx5e_tc_flow *flow, *tmp;
3312 list_for_each_entry_safe(flow, tmp, &rpriv->unready_flows, unready) {
3313 if (!mlx5e_tc_add_fdb_flow(flow->priv, flow, NULL))
3314 remove_unready_flow(flow);