2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
57 struct mlx5_nic_flow_attr {
63 struct mlx5_flow_table *hairpin_ft;
66 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
69 MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
70 MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
71 MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
72 MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
73 MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
74 MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
75 MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
78 #define MLX5E_TC_MAX_SPLITS 1
80 struct mlx5e_tc_flow {
81 struct rhash_head node;
82 struct mlx5e_priv *priv;
85 struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
86 struct list_head encap; /* flows sharing the same encap ID */
87 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
88 struct list_head hairpin; /* flows sharing the same hairpin */
90 struct mlx5_esw_flow_attr esw_attr[0];
91 struct mlx5_nic_flow_attr nic_attr[0];
95 struct mlx5e_tc_flow_parse_attr {
96 struct ip_tunnel_info tun_info;
97 struct mlx5_flow_spec spec;
98 int num_mod_hdr_actions;
99 void *mod_hdr_actions;
104 MLX5_HEADER_TYPE_VXLAN = 0x0,
105 MLX5_HEADER_TYPE_NVGRE = 0x1,
108 #define MLX5E_TC_TABLE_NUM_GROUPS 4
109 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
111 struct mlx5e_hairpin {
112 struct mlx5_hairpin *pair;
114 struct mlx5_core_dev *func_mdev;
115 struct mlx5e_priv *func_priv;
120 struct mlx5e_rqt indir_rqt;
121 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
122 struct mlx5e_ttc_table ttc;
125 struct mlx5e_hairpin_entry {
126 /* a node of a hash table which keeps all the hairpin entries */
127 struct hlist_node hairpin_hlist;
129 /* flows sharing the same hairpin */
130 struct list_head flows;
134 struct mlx5e_hairpin *hp;
142 struct mlx5e_mod_hdr_entry {
143 /* a node of a hash table which keeps all the mod_hdr entries */
144 struct hlist_node mod_hdr_hlist;
146 /* flows sharing the same mod_hdr entry */
147 struct list_head flows;
149 struct mod_hdr_key key;
154 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
156 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
158 return jhash(key->actions,
159 key->num_actions * MLX5_MH_ACT_SZ, 0);
162 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
163 struct mod_hdr_key *b)
165 if (a->num_actions != b->num_actions)
168 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
171 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
172 struct mlx5e_tc_flow *flow,
173 struct mlx5e_tc_flow_parse_attr *parse_attr)
175 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
176 int num_actions, actions_size, namespace, err;
177 struct mlx5e_mod_hdr_entry *mh;
178 struct mod_hdr_key key;
182 num_actions = parse_attr->num_mod_hdr_actions;
183 actions_size = MLX5_MH_ACT_SZ * num_actions;
185 key.actions = parse_attr->mod_hdr_actions;
186 key.num_actions = num_actions;
188 hash_key = hash_mod_hdr_info(&key);
190 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
191 namespace = MLX5_FLOW_NAMESPACE_FDB;
192 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
193 mod_hdr_hlist, hash_key) {
194 if (!cmp_mod_hdr_info(&mh->key, &key)) {
200 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
201 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
202 mod_hdr_hlist, hash_key) {
203 if (!cmp_mod_hdr_info(&mh->key, &key)) {
213 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
217 mh->key.actions = (void *)mh + sizeof(*mh);
218 memcpy(mh->key.actions, key.actions, actions_size);
219 mh->key.num_actions = num_actions;
220 INIT_LIST_HEAD(&mh->flows);
222 err = mlx5_modify_header_alloc(priv->mdev, namespace,
229 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
230 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
232 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
235 list_add(&flow->mod_hdr, &mh->flows);
236 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
237 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
239 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
248 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
249 struct mlx5e_tc_flow *flow)
251 struct list_head *next = flow->mod_hdr.next;
253 list_del(&flow->mod_hdr);
255 if (list_empty(next)) {
256 struct mlx5e_mod_hdr_entry *mh;
258 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
260 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
261 hash_del(&mh->mod_hdr_hlist);
267 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
269 struct net_device *netdev;
270 struct mlx5e_priv *priv;
272 netdev = __dev_get_by_index(net, ifindex);
273 priv = netdev_priv(netdev);
277 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
279 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
283 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
287 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
289 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
290 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
291 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
293 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
300 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
305 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
307 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
308 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
311 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
313 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
314 struct mlx5e_priv *priv = hp->func_priv;
315 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
317 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
320 for (i = 0; i < sz; i++) {
322 if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
323 ix = mlx5e_bits_invert(i, ilog2(sz));
324 ix = indirection_rqt[ix];
325 rqn = hp->pair->rqn[ix];
326 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
330 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
332 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
333 struct mlx5e_priv *priv = hp->func_priv;
334 struct mlx5_core_dev *mdev = priv->mdev;
338 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
339 in = kvzalloc(inlen, GFP_KERNEL);
343 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
345 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
346 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
348 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
350 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
352 hp->indir_rqt.enabled = true;
358 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
360 struct mlx5e_priv *priv = hp->func_priv;
361 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
365 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
366 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
367 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
369 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
370 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
371 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
372 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
374 err = mlx5_core_create_tir(hp->func_mdev, in,
375 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
377 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
378 goto err_destroy_tirs;
384 for (i = 0; i < tt; i++)
385 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
389 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
393 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
394 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
397 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
398 struct ttc_params *ttc_params)
400 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
403 memset(ttc_params, 0, sizeof(*ttc_params));
405 ttc_params->any_tt_tirn = hp->tirn;
407 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
408 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
410 ft_attr->max_fte = MLX5E_NUM_TT;
411 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
412 ft_attr->prio = MLX5E_TC_PRIO;
415 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
417 struct mlx5e_priv *priv = hp->func_priv;
418 struct ttc_params ttc_params;
421 err = mlx5e_hairpin_create_indirect_rqt(hp);
425 err = mlx5e_hairpin_create_indirect_tirs(hp);
427 goto err_create_indirect_tirs;
429 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
430 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
432 goto err_create_ttc_table;
434 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
435 hp->num_channels, hp->ttc.ft.t->id);
439 err_create_ttc_table:
440 mlx5e_hairpin_destroy_indirect_tirs(hp);
441 err_create_indirect_tirs:
442 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
447 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
449 struct mlx5e_priv *priv = hp->func_priv;
451 mlx5e_destroy_ttc_table(priv, &hp->ttc);
452 mlx5e_hairpin_destroy_indirect_tirs(hp);
453 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
456 static struct mlx5e_hairpin *
457 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
460 struct mlx5_core_dev *func_mdev, *peer_mdev;
461 struct mlx5e_hairpin *hp;
462 struct mlx5_hairpin *pair;
465 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
467 return ERR_PTR(-ENOMEM);
469 func_mdev = priv->mdev;
470 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
472 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
475 goto create_pair_err;
478 hp->func_mdev = func_mdev;
479 hp->func_priv = priv;
480 hp->num_channels = params->num_channels;
482 err = mlx5e_hairpin_create_transport(hp);
484 goto create_transport_err;
486 if (hp->num_channels > 1) {
487 err = mlx5e_hairpin_rss_init(hp);
495 mlx5e_hairpin_destroy_transport(hp);
496 create_transport_err:
497 mlx5_core_hairpin_destroy(hp->pair);
503 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
505 if (hp->num_channels > 1)
506 mlx5e_hairpin_rss_cleanup(hp);
507 mlx5e_hairpin_destroy_transport(hp);
508 mlx5_core_hairpin_destroy(hp->pair);
512 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
514 return (peer_vhca_id << 16 | prio);
517 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
518 u16 peer_vhca_id, u8 prio)
520 struct mlx5e_hairpin_entry *hpe;
521 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
523 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
524 hairpin_hlist, hash_key) {
525 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
532 #define UNKNOWN_MATCH_PRIO 8
534 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
535 struct mlx5_flow_spec *spec, u8 *match_prio)
537 void *headers_c, *headers_v;
538 u8 prio_val, prio_mask = 0;
541 #ifdef CONFIG_MLX5_CORE_EN_DCB
542 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
543 netdev_warn(priv->netdev,
544 "only PCP trust state supported for hairpin\n");
548 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
549 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
551 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
553 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
554 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
557 if (!vlan_present || !prio_mask) {
558 prio_val = UNKNOWN_MATCH_PRIO;
559 } else if (prio_mask != 0x7) {
560 netdev_warn(priv->netdev,
561 "masked priority match not supported for hairpin\n");
565 *match_prio = prio_val;
569 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
570 struct mlx5e_tc_flow *flow,
571 struct mlx5e_tc_flow_parse_attr *parse_attr)
573 int peer_ifindex = parse_attr->mirred_ifindex;
574 struct mlx5_hairpin_params params;
575 struct mlx5_core_dev *peer_mdev;
576 struct mlx5e_hairpin_entry *hpe;
577 struct mlx5e_hairpin *hp;
584 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
585 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
586 netdev_warn(priv->netdev, "hairpin is not supported\n");
590 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
591 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
594 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
598 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
602 INIT_LIST_HEAD(&hpe->flows);
603 hpe->peer_vhca_id = peer_id;
604 hpe->prio = match_prio;
606 params.log_data_size = 15;
607 params.log_data_size = min_t(u8, params.log_data_size,
608 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
609 params.log_data_size = max_t(u8, params.log_data_size,
610 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
612 params.log_num_packets = params.log_data_size -
613 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
614 params.log_num_packets = min_t(u8, params.log_num_packets,
615 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
617 params.q_counter = priv->q_counter;
618 /* set hairpin pair per each 50Gbs share of the link */
619 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
620 link_speed = max_t(u32, link_speed, 50000);
621 link_speed64 = link_speed;
622 do_div(link_speed64, 50000);
623 params.num_channels = link_speed64;
625 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
628 goto create_hairpin_err;
631 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
632 hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
633 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
636 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
637 hash_hairpin_info(peer_id, match_prio));
640 if (hpe->hp->num_channels > 1) {
641 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
642 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
644 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
646 list_add(&flow->hairpin, &hpe->flows);
655 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
656 struct mlx5e_tc_flow *flow)
658 struct list_head *next = flow->hairpin.next;
660 list_del(&flow->hairpin);
662 /* no more hairpin flows for us, release the hairpin pair */
663 if (list_empty(next)) {
664 struct mlx5e_hairpin_entry *hpe;
666 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
668 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
669 hpe->hp->pair->peer_mdev->priv.name);
671 mlx5e_hairpin_destroy(hpe->hp);
672 hash_del(&hpe->hairpin_hlist);
677 static struct mlx5_flow_handle *
678 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
679 struct mlx5e_tc_flow_parse_attr *parse_attr,
680 struct mlx5e_tc_flow *flow)
682 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
683 struct mlx5_core_dev *dev = priv->mdev;
684 struct mlx5_flow_destination dest[2] = {};
685 struct mlx5_flow_act flow_act = {
686 .action = attr->action,
687 .has_flow_tag = true,
688 .flow_tag = attr->flow_tag,
691 struct mlx5_fc *counter = NULL;
692 struct mlx5_flow_handle *rule;
693 bool table_created = false;
694 int err, dest_ix = 0;
696 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
697 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
700 goto err_add_hairpin_flow;
702 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
703 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
704 dest[dest_ix].ft = attr->hairpin_ft;
706 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
707 dest[dest_ix].tir_num = attr->hairpin_tirn;
710 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
711 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
712 dest[dest_ix].ft = priv->fs.vlan.ft.t;
716 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
717 counter = mlx5_fc_create(dev, true);
718 if (IS_ERR(counter)) {
719 rule = ERR_CAST(counter);
722 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
723 dest[dest_ix].counter = counter;
727 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
728 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
729 flow_act.modify_id = attr->mod_hdr_id;
730 kfree(parse_attr->mod_hdr_actions);
733 goto err_create_mod_hdr_id;
737 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
738 int tc_grp_size, tc_tbl_size;
739 u32 max_flow_counter;
741 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
742 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
744 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
746 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
747 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
750 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
753 MLX5E_TC_TABLE_NUM_GROUPS,
754 MLX5E_TC_FT_LEVEL, 0);
755 if (IS_ERR(priv->fs.tc.t)) {
756 netdev_err(priv->netdev,
757 "Failed to create tc offload table\n");
758 rule = ERR_CAST(priv->fs.tc.t);
762 table_created = true;
765 if (attr->match_level != MLX5_MATCH_NONE)
766 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
768 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
769 &flow_act, dest, dest_ix);
778 mlx5_destroy_flow_table(priv->fs.tc.t);
779 priv->fs.tc.t = NULL;
782 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
783 mlx5e_detach_mod_hdr(priv, flow);
784 err_create_mod_hdr_id:
785 mlx5_fc_destroy(dev, counter);
787 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
788 mlx5e_hairpin_flow_del(priv, flow);
789 err_add_hairpin_flow:
793 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
794 struct mlx5e_tc_flow *flow)
796 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
797 struct mlx5_fc *counter = NULL;
799 counter = mlx5_flow_rule_counter(flow->rule[0]);
800 mlx5_del_flow_rules(flow->rule[0]);
801 mlx5_fc_destroy(priv->mdev, counter);
803 if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
804 mlx5_destroy_flow_table(priv->fs.tc.t);
805 priv->fs.tc.t = NULL;
808 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
809 mlx5e_detach_mod_hdr(priv, flow);
811 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
812 mlx5e_hairpin_flow_del(priv, flow);
815 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
816 struct mlx5e_tc_flow *flow);
818 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
819 struct ip_tunnel_info *tun_info,
820 struct net_device *mirred_dev,
821 struct net_device **encap_dev,
822 struct mlx5e_tc_flow *flow);
824 static struct mlx5_flow_handle *
825 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
826 struct mlx5e_tc_flow_parse_attr *parse_attr,
827 struct mlx5e_tc_flow *flow)
829 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
830 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
831 struct net_device *out_dev, *encap_dev = NULL;
832 struct mlx5_flow_handle *rule = NULL;
833 struct mlx5e_rep_priv *rpriv;
834 struct mlx5e_priv *out_priv;
837 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
838 out_dev = __dev_get_by_index(dev_net(priv->netdev),
839 attr->parse_attr->mirred_ifindex);
840 err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
841 out_dev, &encap_dev, flow);
845 goto err_attach_encap;
847 out_priv = netdev_priv(encap_dev);
848 rpriv = out_priv->ppriv;
849 attr->out_rep[attr->out_count] = rpriv->rep;
850 attr->out_mdev[attr->out_count++] = out_priv->mdev;
853 err = mlx5_eswitch_add_vlan_action(esw, attr);
859 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
860 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
861 kfree(parse_attr->mod_hdr_actions);
868 /* we get here if (1) there's no error (rule being null) or when
869 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
871 if (rule != ERR_PTR(-EAGAIN)) {
872 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
876 if (attr->mirror_count) {
877 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
878 if (IS_ERR(flow->rule[1]))
885 mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
886 rule = flow->rule[1];
888 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
889 mlx5e_detach_mod_hdr(priv, flow);
891 mlx5_eswitch_del_vlan_action(esw, attr);
893 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
894 mlx5e_detach_encap(priv, flow);
899 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
900 struct mlx5e_tc_flow *flow)
902 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
903 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
905 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
906 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
907 if (attr->mirror_count)
908 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
909 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
912 mlx5_eswitch_del_vlan_action(esw, attr);
914 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
915 mlx5e_detach_encap(priv, flow);
916 kvfree(attr->parse_attr);
919 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
920 mlx5e_detach_mod_hdr(priv, flow);
923 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
924 struct mlx5e_encap_entry *e)
926 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
927 struct mlx5_esw_flow_attr *esw_attr;
928 struct mlx5e_tc_flow *flow;
931 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
932 e->encap_size, e->encap_header,
935 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
939 e->flags |= MLX5_ENCAP_ENTRY_VALID;
940 mlx5e_rep_queue_neigh_stats_work(priv);
942 list_for_each_entry(flow, &e->flows, encap) {
943 esw_attr = flow->esw_attr;
944 esw_attr->encap_id = e->encap_id;
945 flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
946 if (IS_ERR(flow->rule[0])) {
947 err = PTR_ERR(flow->rule[0]);
948 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
953 if (esw_attr->mirror_count) {
954 flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
955 if (IS_ERR(flow->rule[1])) {
956 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr);
957 err = PTR_ERR(flow->rule[1]);
958 mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n",
964 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
968 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
969 struct mlx5e_encap_entry *e)
971 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
972 struct mlx5e_tc_flow *flow;
974 list_for_each_entry(flow, &e->flows, encap) {
975 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
976 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
978 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
979 if (attr->mirror_count)
980 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
981 mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
985 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
986 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
987 mlx5_encap_dealloc(priv->mdev, e->encap_id);
991 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
993 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
994 u64 bytes, packets, lastuse = 0;
995 struct mlx5e_tc_flow *flow;
996 struct mlx5e_encap_entry *e;
997 struct mlx5_fc *counter;
998 struct neigh_table *tbl;
999 bool neigh_used = false;
1000 struct neighbour *n;
1002 if (m_neigh->family == AF_INET)
1004 #if IS_ENABLED(CONFIG_IPV6)
1005 else if (m_neigh->family == AF_INET6)
1011 list_for_each_entry(e, &nhe->encap_list, encap_list) {
1012 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
1014 list_for_each_entry(flow, &e->flows, encap) {
1015 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
1016 counter = mlx5_flow_rule_counter(flow->rule[0]);
1017 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
1018 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1029 nhe->reported_lastuse = jiffies;
1031 /* find the relevant neigh according to the cached device and
1034 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1038 neigh_event_send(n, NULL);
1043 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1044 struct mlx5e_tc_flow *flow)
1046 struct list_head *next = flow->encap.next;
1048 list_del(&flow->encap);
1049 if (list_empty(next)) {
1050 struct mlx5e_encap_entry *e;
1052 e = list_entry(next, struct mlx5e_encap_entry, flows);
1053 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1055 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1056 mlx5_encap_dealloc(priv->mdev, e->encap_id);
1058 hash_del_rcu(&e->encap_hlist);
1059 kfree(e->encap_header);
1064 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1065 struct mlx5e_tc_flow *flow)
1067 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1068 mlx5e_tc_del_fdb_flow(priv, flow);
1070 mlx5e_tc_del_nic_flow(priv, flow);
1073 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
1074 struct tc_cls_flower_offload *f)
1076 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1078 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1080 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1082 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1085 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
1086 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1088 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1089 struct flow_dissector_key_keyid *key =
1090 skb_flow_dissector_target(f->dissector,
1091 FLOW_DISSECTOR_KEY_ENC_KEYID,
1093 struct flow_dissector_key_keyid *mask =
1094 skb_flow_dissector_target(f->dissector,
1095 FLOW_DISSECTOR_KEY_ENC_KEYID,
1097 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
1098 be32_to_cpu(mask->keyid));
1099 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
1100 be32_to_cpu(key->keyid));
1104 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1105 struct mlx5_flow_spec *spec,
1106 struct tc_cls_flower_offload *f)
1108 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1110 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1113 struct flow_dissector_key_control *enc_control =
1114 skb_flow_dissector_target(f->dissector,
1115 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1118 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
1119 struct flow_dissector_key_ports *key =
1120 skb_flow_dissector_target(f->dissector,
1121 FLOW_DISSECTOR_KEY_ENC_PORTS,
1123 struct flow_dissector_key_ports *mask =
1124 skb_flow_dissector_target(f->dissector,
1125 FLOW_DISSECTOR_KEY_ENC_PORTS,
1127 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1128 struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1129 struct net_device *up_dev = uplink_rpriv->netdev;
1130 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1132 /* Full udp dst port must be given */
1133 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
1134 goto vxlan_match_offload_err;
1136 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
1137 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
1138 parse_vxlan_attr(spec, f);
1140 netdev_warn(priv->netdev,
1141 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
1145 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1146 udp_dport, ntohs(mask->dst));
1147 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1148 udp_dport, ntohs(key->dst));
1150 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1151 udp_sport, ntohs(mask->src));
1152 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1153 udp_sport, ntohs(key->src));
1154 } else { /* udp dst port must be given */
1155 vxlan_match_offload_err:
1156 netdev_warn(priv->netdev,
1157 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
1161 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1162 struct flow_dissector_key_ipv4_addrs *key =
1163 skb_flow_dissector_target(f->dissector,
1164 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1166 struct flow_dissector_key_ipv4_addrs *mask =
1167 skb_flow_dissector_target(f->dissector,
1168 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1170 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1171 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1173 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1174 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1177 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1178 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1180 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1181 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1184 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1185 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
1186 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1187 struct flow_dissector_key_ipv6_addrs *key =
1188 skb_flow_dissector_target(f->dissector,
1189 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1191 struct flow_dissector_key_ipv6_addrs *mask =
1192 skb_flow_dissector_target(f->dissector,
1193 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1196 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1197 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1198 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1199 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1200 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1201 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1203 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1204 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1205 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1206 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1207 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1208 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1210 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1211 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
1214 /* Enforce DMAC when offloading incoming tunneled flows.
1215 * Flow counters require a match on the DMAC.
1217 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1218 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1219 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1220 dmac_47_16), priv->netdev->dev_addr);
1222 /* let software handle IP fragments */
1223 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1224 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1229 static int __parse_cls_flower(struct mlx5e_priv *priv,
1230 struct mlx5_flow_spec *spec,
1231 struct tc_cls_flower_offload *f,
1234 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1236 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1238 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1240 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1245 *match_level = MLX5_MATCH_NONE;
1247 if (f->dissector->used_keys &
1248 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1249 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1250 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1251 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1252 BIT(FLOW_DISSECTOR_KEY_CVLAN) |
1253 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1254 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1255 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1256 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1257 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1258 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1259 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1260 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1261 BIT(FLOW_DISSECTOR_KEY_TCP) |
1262 BIT(FLOW_DISSECTOR_KEY_IP))) {
1263 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1264 f->dissector->used_keys);
1268 if ((dissector_uses_key(f->dissector,
1269 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1270 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1271 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1272 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1273 struct flow_dissector_key_control *key =
1274 skb_flow_dissector_target(f->dissector,
1275 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1277 switch (key->addr_type) {
1278 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1279 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1280 if (parse_tunnel_attr(priv, spec, f))
1287 /* In decap flow, header pointers should point to the inner
1288 * headers, outer header were already set by parse_tunnel_attr
1290 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1292 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1296 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1297 struct flow_dissector_key_eth_addrs *key =
1298 skb_flow_dissector_target(f->dissector,
1299 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1301 struct flow_dissector_key_eth_addrs *mask =
1302 skb_flow_dissector_target(f->dissector,
1303 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1306 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1309 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1313 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1316 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1320 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
1321 *match_level = MLX5_MATCH_L2;
1324 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
1325 struct flow_dissector_key_vlan *key =
1326 skb_flow_dissector_target(f->dissector,
1327 FLOW_DISSECTOR_KEY_VLAN,
1329 struct flow_dissector_key_vlan *mask =
1330 skb_flow_dissector_target(f->dissector,
1331 FLOW_DISSECTOR_KEY_VLAN,
1333 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1334 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1335 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1337 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1340 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1342 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1346 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
1347 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
1349 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
1350 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
1352 *match_level = MLX5_MATCH_L2;
1356 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
1357 struct flow_dissector_key_vlan *key =
1358 skb_flow_dissector_target(f->dissector,
1359 FLOW_DISSECTOR_KEY_CVLAN,
1361 struct flow_dissector_key_vlan *mask =
1362 skb_flow_dissector_target(f->dissector,
1363 FLOW_DISSECTOR_KEY_CVLAN,
1365 if (mask->vlan_id || mask->vlan_priority || mask->vlan_tpid) {
1366 if (key->vlan_tpid == htons(ETH_P_8021AD)) {
1367 MLX5_SET(fte_match_set_misc, misc_c,
1368 outer_second_svlan_tag, 1);
1369 MLX5_SET(fte_match_set_misc, misc_v,
1370 outer_second_svlan_tag, 1);
1372 MLX5_SET(fte_match_set_misc, misc_c,
1373 outer_second_cvlan_tag, 1);
1374 MLX5_SET(fte_match_set_misc, misc_v,
1375 outer_second_cvlan_tag, 1);
1378 MLX5_SET(fte_match_set_misc, misc_c, outer_second_vid,
1380 MLX5_SET(fte_match_set_misc, misc_v, outer_second_vid,
1382 MLX5_SET(fte_match_set_misc, misc_c, outer_second_prio,
1383 mask->vlan_priority);
1384 MLX5_SET(fte_match_set_misc, misc_v, outer_second_prio,
1385 key->vlan_priority);
1387 *match_level = MLX5_MATCH_L2;
1391 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1392 struct flow_dissector_key_basic *key =
1393 skb_flow_dissector_target(f->dissector,
1394 FLOW_DISSECTOR_KEY_BASIC,
1396 struct flow_dissector_key_basic *mask =
1397 skb_flow_dissector_target(f->dissector,
1398 FLOW_DISSECTOR_KEY_BASIC,
1400 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1401 ntohs(mask->n_proto));
1402 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1403 ntohs(key->n_proto));
1406 *match_level = MLX5_MATCH_L2;
1409 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
1410 struct flow_dissector_key_control *key =
1411 skb_flow_dissector_target(f->dissector,
1412 FLOW_DISSECTOR_KEY_CONTROL,
1415 struct flow_dissector_key_control *mask =
1416 skb_flow_dissector_target(f->dissector,
1417 FLOW_DISSECTOR_KEY_CONTROL,
1419 addr_type = key->addr_type;
1421 /* the HW doesn't support frag first/later */
1422 if (mask->flags & FLOW_DIS_FIRST_FRAG)
1425 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
1426 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1427 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1428 key->flags & FLOW_DIS_IS_FRAGMENT);
1430 /* the HW doesn't need L3 inline to match on frag=no */
1431 if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
1432 *match_level = MLX5_INLINE_MODE_L2;
1433 /* *** L2 attributes parsing up to here *** */
1435 *match_level = MLX5_INLINE_MODE_IP;
1439 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1440 struct flow_dissector_key_basic *key =
1441 skb_flow_dissector_target(f->dissector,
1442 FLOW_DISSECTOR_KEY_BASIC,
1444 struct flow_dissector_key_basic *mask =
1445 skb_flow_dissector_target(f->dissector,
1446 FLOW_DISSECTOR_KEY_BASIC,
1448 ip_proto = key->ip_proto;
1450 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1452 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1456 *match_level = MLX5_MATCH_L3;
1459 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1460 struct flow_dissector_key_ipv4_addrs *key =
1461 skb_flow_dissector_target(f->dissector,
1462 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1464 struct flow_dissector_key_ipv4_addrs *mask =
1465 skb_flow_dissector_target(f->dissector,
1466 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1469 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1470 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1471 &mask->src, sizeof(mask->src));
1472 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1473 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1474 &key->src, sizeof(key->src));
1475 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1476 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1477 &mask->dst, sizeof(mask->dst));
1478 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1479 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1480 &key->dst, sizeof(key->dst));
1482 if (mask->src || mask->dst)
1483 *match_level = MLX5_MATCH_L3;
1486 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1487 struct flow_dissector_key_ipv6_addrs *key =
1488 skb_flow_dissector_target(f->dissector,
1489 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1491 struct flow_dissector_key_ipv6_addrs *mask =
1492 skb_flow_dissector_target(f->dissector,
1493 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1496 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1497 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1498 &mask->src, sizeof(mask->src));
1499 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1500 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1501 &key->src, sizeof(key->src));
1503 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1504 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1505 &mask->dst, sizeof(mask->dst));
1506 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1507 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1508 &key->dst, sizeof(key->dst));
1510 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
1511 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
1512 *match_level = MLX5_MATCH_L3;
1515 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
1516 struct flow_dissector_key_ip *key =
1517 skb_flow_dissector_target(f->dissector,
1518 FLOW_DISSECTOR_KEY_IP,
1520 struct flow_dissector_key_ip *mask =
1521 skb_flow_dissector_target(f->dissector,
1522 FLOW_DISSECTOR_KEY_IP,
1525 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1526 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1528 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1529 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1531 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1532 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1535 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1536 ft_field_support.outer_ipv4_ttl))
1539 if (mask->tos || mask->ttl)
1540 *match_level = MLX5_MATCH_L3;
1543 /* *** L3 attributes parsing up to here *** */
1545 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
1546 struct flow_dissector_key_ports *key =
1547 skb_flow_dissector_target(f->dissector,
1548 FLOW_DISSECTOR_KEY_PORTS,
1550 struct flow_dissector_key_ports *mask =
1551 skb_flow_dissector_target(f->dissector,
1552 FLOW_DISSECTOR_KEY_PORTS,
1556 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1557 tcp_sport, ntohs(mask->src));
1558 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1559 tcp_sport, ntohs(key->src));
1561 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1562 tcp_dport, ntohs(mask->dst));
1563 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1564 tcp_dport, ntohs(key->dst));
1568 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1569 udp_sport, ntohs(mask->src));
1570 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1571 udp_sport, ntohs(key->src));
1573 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1574 udp_dport, ntohs(mask->dst));
1575 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1576 udp_dport, ntohs(key->dst));
1579 netdev_err(priv->netdev,
1580 "Only UDP and TCP transport are supported\n");
1584 if (mask->src || mask->dst)
1585 *match_level = MLX5_MATCH_L4;
1588 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1589 struct flow_dissector_key_tcp *key =
1590 skb_flow_dissector_target(f->dissector,
1591 FLOW_DISSECTOR_KEY_TCP,
1593 struct flow_dissector_key_tcp *mask =
1594 skb_flow_dissector_target(f->dissector,
1595 FLOW_DISSECTOR_KEY_TCP,
1598 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1599 ntohs(mask->flags));
1600 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1604 *match_level = MLX5_MATCH_L4;
1610 static int parse_cls_flower(struct mlx5e_priv *priv,
1611 struct mlx5e_tc_flow *flow,
1612 struct mlx5_flow_spec *spec,
1613 struct tc_cls_flower_offload *f)
1615 struct mlx5_core_dev *dev = priv->mdev;
1616 struct mlx5_eswitch *esw = dev->priv.eswitch;
1617 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1618 struct mlx5_eswitch_rep *rep;
1622 err = __parse_cls_flower(priv, spec, f, &match_level);
1624 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1626 if (rep->vport != FDB_UPLINK_VPORT &&
1627 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1628 esw->offloads.inline_mode < match_level)) {
1629 netdev_warn(priv->netdev,
1630 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1631 match_level, esw->offloads.inline_mode);
1636 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1637 flow->esw_attr->match_level = match_level;
1639 flow->nic_attr->match_level = match_level;
1644 struct pedit_headers {
1652 static int pedit_header_offsets[] = {
1653 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1654 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1655 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1656 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1657 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1660 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1662 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1663 struct pedit_headers *masks,
1664 struct pedit_headers *vals)
1666 u32 *curr_pmask, *curr_pval;
1668 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1671 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1672 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
1674 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1677 *curr_pmask |= mask;
1678 *curr_pval |= (val & mask);
1686 struct mlx5_fields {
1692 #define OFFLOAD(fw_field, size, field, off) \
1693 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1695 static struct mlx5_fields fields[] = {
1696 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1697 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1698 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1699 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1700 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1702 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1703 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1704 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1706 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1707 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1708 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1709 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1710 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1711 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1712 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1713 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
1714 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1716 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1717 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1718 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1720 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1721 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1724 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1725 * max from the SW pedit action. On success, it says how many HW actions were
1728 static int offload_pedit_fields(struct pedit_headers *masks,
1729 struct pedit_headers *vals,
1730 struct mlx5e_tc_flow_parse_attr *parse_attr)
1732 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1733 int i, action_size, nactions, max_actions, first, last, next_z;
1734 void *s_masks_p, *a_masks_p, *vals_p;
1735 struct mlx5_fields *f;
1736 u8 cmd, field_bsize;
1743 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1744 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1745 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1746 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1748 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1749 action = parse_attr->mod_hdr_actions;
1750 max_actions = parse_attr->num_mod_hdr_actions;
1753 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1755 /* avoid seeing bits set from previous iterations */
1759 s_masks_p = (void *)set_masks + f->offset;
1760 a_masks_p = (void *)add_masks + f->offset;
1762 memcpy(&s_mask, s_masks_p, f->size);
1763 memcpy(&a_mask, a_masks_p, f->size);
1765 if (!s_mask && !a_mask) /* nothing to offload here */
1768 if (s_mask && a_mask) {
1769 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1773 if (nactions == max_actions) {
1774 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1779 cmd = MLX5_ACTION_TYPE_SET;
1781 vals_p = (void *)set_vals + f->offset;
1782 /* clear to denote we consumed this field */
1783 memset(s_masks_p, 0, f->size);
1785 cmd = MLX5_ACTION_TYPE_ADD;
1787 vals_p = (void *)add_vals + f->offset;
1788 /* clear to denote we consumed this field */
1789 memset(a_masks_p, 0, f->size);
1792 field_bsize = f->size * BITS_PER_BYTE;
1794 if (field_bsize == 32) {
1795 mask_be32 = *(__be32 *)&mask;
1796 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1797 } else if (field_bsize == 16) {
1798 mask_be16 = *(__be16 *)&mask;
1799 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1802 first = find_first_bit(&mask, field_bsize);
1803 next_z = find_next_zero_bit(&mask, field_bsize, first);
1804 last = find_last_bit(&mask, field_bsize);
1805 if (first < next_z && next_z < last) {
1806 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
1811 MLX5_SET(set_action_in, action, action_type, cmd);
1812 MLX5_SET(set_action_in, action, field, f->field);
1814 if (cmd == MLX5_ACTION_TYPE_SET) {
1815 MLX5_SET(set_action_in, action, offset, first);
1816 /* length is num of bits to be written, zero means length of 32 */
1817 MLX5_SET(set_action_in, action, length, (last - first + 1));
1820 if (field_bsize == 32)
1821 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
1822 else if (field_bsize == 16)
1823 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
1824 else if (field_bsize == 8)
1825 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
1827 action += action_size;
1831 parse_attr->num_mod_hdr_actions = nactions;
1835 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1836 const struct tc_action *a, int namespace,
1837 struct mlx5e_tc_flow_parse_attr *parse_attr)
1839 int nkeys, action_size, max_actions;
1841 nkeys = tcf_pedit_nkeys(a);
1842 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1844 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1845 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1846 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1847 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1849 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1850 max_actions = min(max_actions, nkeys * 16);
1852 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1853 if (!parse_attr->mod_hdr_actions)
1856 parse_attr->num_mod_hdr_actions = max_actions;
1860 static const struct pedit_headers zero_masks = {};
1862 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1863 const struct tc_action *a, int namespace,
1864 struct mlx5e_tc_flow_parse_attr *parse_attr)
1866 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1867 int nkeys, i, err = -EOPNOTSUPP;
1868 u32 mask, val, offset;
1871 nkeys = tcf_pedit_nkeys(a);
1873 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1874 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1876 for (i = 0; i < nkeys; i++) {
1877 htype = tcf_pedit_htype(a, i);
1878 cmd = tcf_pedit_cmd(a, i);
1879 err = -EOPNOTSUPP; /* can't be all optimistic */
1881 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1882 netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
1886 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1887 netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
1891 mask = tcf_pedit_mask(a, i);
1892 val = tcf_pedit_val(a, i);
1893 offset = tcf_pedit_offset(a, i);
1895 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1900 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1904 err = offload_pedit_fields(masks, vals, parse_attr);
1906 goto out_dealloc_parsed_actions;
1908 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1909 cmd_masks = &masks[cmd];
1910 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1911 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
1912 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1913 16, 1, cmd_masks, sizeof(zero_masks), true);
1915 goto out_dealloc_parsed_actions;
1921 out_dealloc_parsed_actions:
1922 kfree(parse_attr->mod_hdr_actions);
1927 static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1929 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1930 TCA_CSUM_UPDATE_FLAG_UDP;
1932 /* The HW recalcs checksums only if re-writing headers */
1933 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1934 netdev_warn(priv->netdev,
1935 "TC csum action is only offloaded with pedit\n");
1939 if (update_flags & ~prot_flags) {
1940 netdev_warn(priv->netdev,
1941 "can't offload TC csum action for some header/s - flags %#x\n",
1949 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1950 struct tcf_exts *exts)
1952 const struct tc_action *a;
1953 bool modify_ip_header;
1960 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1961 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1963 /* for non-IP we only re-write MACs, so we're okay */
1964 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
1967 modify_ip_header = false;
1968 tcf_exts_to_list(exts, &actions);
1969 list_for_each_entry(a, &actions, list) {
1970 if (!is_tcf_pedit(a))
1973 nkeys = tcf_pedit_nkeys(a);
1974 for (i = 0; i < nkeys; i++) {
1975 htype = tcf_pedit_htype(a, i);
1976 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1977 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1978 modify_ip_header = true;
1984 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1985 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
1986 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
1987 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
1995 static bool actions_match_supported(struct mlx5e_priv *priv,
1996 struct tcf_exts *exts,
1997 struct mlx5e_tc_flow_parse_attr *parse_attr,
1998 struct mlx5e_tc_flow *flow)
2002 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
2003 actions = flow->esw_attr->action;
2005 actions = flow->nic_attr->action;
2007 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
2008 return modify_header_match_supported(&parse_attr->spec, exts);
2013 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
2015 struct mlx5_core_dev *fmdev, *pmdev;
2016 u16 func_id, peer_id;
2019 pmdev = peer_priv->mdev;
2021 func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
2022 peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
2024 return (func_id == peer_id);
2027 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2028 struct mlx5e_tc_flow_parse_attr *parse_attr,
2029 struct mlx5e_tc_flow *flow)
2031 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
2032 const struct tc_action *a;
2037 if (!tcf_exts_has_actions(exts))
2040 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
2042 tcf_exts_to_list(exts, &actions);
2043 list_for_each_entry(a, &actions, list) {
2044 if (is_tcf_gact_shot(a)) {
2045 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
2046 if (MLX5_CAP_FLOWTABLE(priv->mdev,
2047 flow_table_properties_nic_receive.flow_counter))
2048 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
2052 if (is_tcf_pedit(a)) {
2053 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
2058 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
2059 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2063 if (is_tcf_csum(a)) {
2064 if (csum_offload_supported(priv, action,
2065 tcf_csum_update_flags(a)))
2071 if (is_tcf_mirred_egress_redirect(a)) {
2072 struct net_device *peer_dev = tcf_mirred_dev(a);
2074 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
2075 same_hw_devs(priv, netdev_priv(peer_dev))) {
2076 parse_attr->mirred_ifindex = peer_dev->ifindex;
2077 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
2078 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2079 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2081 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2088 if (is_tcf_skbedit_mark(a)) {
2089 u32 mark = tcf_skbedit_mark(a);
2091 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2092 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
2097 attr->flow_tag = mark;
2098 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2105 attr->action = action;
2106 if (!actions_match_supported(priv, exts, parse_attr, flow))
2112 static inline int cmp_encap_info(struct ip_tunnel_key *a,
2113 struct ip_tunnel_key *b)
2115 return memcmp(a, b, sizeof(*a));
2118 static inline int hash_encap_info(struct ip_tunnel_key *key)
2120 return jhash(key, sizeof(*key), 0);
2123 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
2124 struct net_device *mirred_dev,
2125 struct net_device **out_dev,
2127 struct neighbour **out_n,
2130 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2131 struct mlx5e_rep_priv *uplink_rpriv;
2133 struct neighbour *n = NULL;
2135 #if IS_ENABLED(CONFIG_INET)
2138 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
2139 ret = PTR_ERR_OR_ZERO(rt);
2145 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2146 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2147 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
2148 *out_dev = uplink_rpriv->netdev;
2150 *out_dev = rt->dst.dev;
2152 *out_ttl = ip4_dst_hoplimit(&rt->dst);
2153 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
2162 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2163 struct net_device *peer_netdev)
2165 struct mlx5e_priv *peer_priv;
2167 peer_priv = netdev_priv(peer_netdev);
2169 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2170 (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
2171 same_hw_devs(priv, peer_priv) &&
2172 MLX5_VPORT_MANAGER(peer_priv->mdev) &&
2173 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
2176 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
2177 struct net_device *mirred_dev,
2178 struct net_device **out_dev,
2180 struct neighbour **out_n,
2183 struct neighbour *n = NULL;
2184 struct dst_entry *dst;
2186 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
2187 struct mlx5e_rep_priv *uplink_rpriv;
2188 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2191 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
2196 *out_ttl = ip6_dst_hoplimit(dst);
2198 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2199 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2200 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
2201 *out_dev = uplink_rpriv->netdev;
2203 *out_dev = dst->dev;
2208 n = dst_neigh_lookup(dst, &fl6->daddr);
2217 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
2218 char buf[], int encap_size,
2219 unsigned char h_dest[ETH_ALEN],
2223 __be16 udp_dst_port,
2226 struct ethhdr *eth = (struct ethhdr *)buf;
2227 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
2228 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
2229 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2231 memset(buf, 0, encap_size);
2233 ether_addr_copy(eth->h_dest, h_dest);
2234 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2235 eth->h_proto = htons(ETH_P_IP);
2241 ip->protocol = IPPROTO_UDP;
2245 udp->dest = udp_dst_port;
2246 vxh->vx_flags = VXLAN_HF_VNI;
2247 vxh->vx_vni = vxlan_vni_field(vx_vni);
2250 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
2251 char buf[], int encap_size,
2252 unsigned char h_dest[ETH_ALEN],
2254 struct in6_addr *daddr,
2255 struct in6_addr *saddr,
2256 __be16 udp_dst_port,
2259 struct ethhdr *eth = (struct ethhdr *)buf;
2260 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
2261 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
2262 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2264 memset(buf, 0, encap_size);
2266 ether_addr_copy(eth->h_dest, h_dest);
2267 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2268 eth->h_proto = htons(ETH_P_IPV6);
2270 ip6_flow_hdr(ip6h, 0, 0);
2271 /* the HW fills up ipv6 payload len */
2272 ip6h->nexthdr = IPPROTO_UDP;
2273 ip6h->hop_limit = ttl;
2274 ip6h->daddr = *daddr;
2275 ip6h->saddr = *saddr;
2277 udp->dest = udp_dst_port;
2278 vxh->vx_flags = VXLAN_HF_VNI;
2279 vxh->vx_vni = vxlan_vni_field(vx_vni);
2282 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
2283 struct net_device *mirred_dev,
2284 struct mlx5e_encap_entry *e)
2286 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2287 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
2288 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2289 struct net_device *out_dev;
2290 struct neighbour *n = NULL;
2291 struct flowi4 fl4 = {};
2296 if (max_encap_size < ipv4_encap_size) {
2297 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2298 ipv4_encap_size, max_encap_size);
2302 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
2306 switch (e->tunnel_type) {
2307 case MLX5_HEADER_TYPE_VXLAN:
2308 fl4.flowi4_proto = IPPROTO_UDP;
2309 fl4.fl4_dport = tun_key->tp_dst;
2315 fl4.flowi4_tos = tun_key->tos;
2316 fl4.daddr = tun_key->u.ipv4.dst;
2317 fl4.saddr = tun_key->u.ipv4.src;
2319 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
2324 /* used by mlx5e_detach_encap to lookup a neigh hash table
2325 * entry in the neigh hash table when a user deletes a rule
2327 e->m_neigh.dev = n->dev;
2328 e->m_neigh.family = n->ops->family;
2329 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2330 e->out_dev = out_dev;
2332 /* It's importent to add the neigh to the hash table before checking
2333 * the neigh validity state. So if we'll get a notification, in case the
2334 * neigh changes it's validity state, we would find the relevant neigh
2337 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2341 read_lock_bh(&n->lock);
2342 nud_state = n->nud_state;
2343 ether_addr_copy(e->h_dest, n->ha);
2344 read_unlock_bh(&n->lock);
2346 switch (e->tunnel_type) {
2347 case MLX5_HEADER_TYPE_VXLAN:
2348 gen_vxlan_header_ipv4(out_dev, encap_header,
2349 ipv4_encap_size, e->h_dest, ttl,
2351 fl4.saddr, tun_key->tp_dst,
2352 tunnel_id_to_key32(tun_key->tun_id));
2356 goto destroy_neigh_entry;
2358 e->encap_size = ipv4_encap_size;
2359 e->encap_header = encap_header;
2361 if (!(nud_state & NUD_VALID)) {
2362 neigh_event_send(n, NULL);
2367 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
2368 ipv4_encap_size, encap_header, &e->encap_id);
2370 goto destroy_neigh_entry;
2372 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2373 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2377 destroy_neigh_entry:
2378 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2380 kfree(encap_header);
2387 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
2388 struct net_device *mirred_dev,
2389 struct mlx5e_encap_entry *e)
2391 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2392 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
2393 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2394 struct net_device *out_dev;
2395 struct neighbour *n = NULL;
2396 struct flowi6 fl6 = {};
2401 if (max_encap_size < ipv6_encap_size) {
2402 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2403 ipv6_encap_size, max_encap_size);
2407 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
2411 switch (e->tunnel_type) {
2412 case MLX5_HEADER_TYPE_VXLAN:
2413 fl6.flowi6_proto = IPPROTO_UDP;
2414 fl6.fl6_dport = tun_key->tp_dst;
2421 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
2422 fl6.daddr = tun_key->u.ipv6.dst;
2423 fl6.saddr = tun_key->u.ipv6.src;
2425 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
2430 /* used by mlx5e_detach_encap to lookup a neigh hash table
2431 * entry in the neigh hash table when a user deletes a rule
2433 e->m_neigh.dev = n->dev;
2434 e->m_neigh.family = n->ops->family;
2435 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2436 e->out_dev = out_dev;
2438 /* It's importent to add the neigh to the hash table before checking
2439 * the neigh validity state. So if we'll get a notification, in case the
2440 * neigh changes it's validity state, we would find the relevant neigh
2443 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2447 read_lock_bh(&n->lock);
2448 nud_state = n->nud_state;
2449 ether_addr_copy(e->h_dest, n->ha);
2450 read_unlock_bh(&n->lock);
2452 switch (e->tunnel_type) {
2453 case MLX5_HEADER_TYPE_VXLAN:
2454 gen_vxlan_header_ipv6(out_dev, encap_header,
2455 ipv6_encap_size, e->h_dest, ttl,
2457 &fl6.saddr, tun_key->tp_dst,
2458 tunnel_id_to_key32(tun_key->tun_id));
2462 goto destroy_neigh_entry;
2465 e->encap_size = ipv6_encap_size;
2466 e->encap_header = encap_header;
2468 if (!(nud_state & NUD_VALID)) {
2469 neigh_event_send(n, NULL);
2474 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
2475 ipv6_encap_size, encap_header, &e->encap_id);
2477 goto destroy_neigh_entry;
2479 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2480 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2484 destroy_neigh_entry:
2485 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2487 kfree(encap_header);
2494 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2495 struct ip_tunnel_info *tun_info,
2496 struct net_device *mirred_dev,
2497 struct net_device **encap_dev,
2498 struct mlx5e_tc_flow *flow)
2500 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2501 struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
2503 struct net_device *up_dev = uplink_rpriv->netdev;
2504 unsigned short family = ip_tunnel_info_af(tun_info);
2505 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
2506 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2507 struct ip_tunnel_key *key = &tun_info->key;
2508 struct mlx5e_encap_entry *e;
2509 int tunnel_type, err = 0;
2513 /* udp dst port must be set */
2514 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2515 goto vxlan_encap_offload_err;
2517 /* setting udp src port isn't supported */
2518 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
2519 vxlan_encap_offload_err:
2520 netdev_warn(priv->netdev,
2521 "must set udp dst port and not set udp src port\n");
2525 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
2526 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
2527 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
2529 netdev_warn(priv->netdev,
2530 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
2534 hash_key = hash_encap_info(key);
2536 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2537 encap_hlist, hash_key) {
2538 if (!cmp_encap_info(&e->tun_info.key, key)) {
2544 /* must verify if encap is valid or not */
2548 e = kzalloc(sizeof(*e), GFP_KERNEL);
2552 e->tun_info = *tun_info;
2553 e->tunnel_type = tunnel_type;
2554 INIT_LIST_HEAD(&e->flows);
2556 if (family == AF_INET)
2557 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
2558 else if (family == AF_INET6)
2559 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
2561 if (err && err != -EAGAIN)
2564 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2567 list_add(&flow->encap, &e->flows);
2568 *encap_dev = e->out_dev;
2569 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
2570 attr->encap_id = e->encap_id;
2581 static int parse_tc_vlan_action(struct mlx5e_priv *priv,
2582 const struct tc_action *a,
2583 struct mlx5_esw_flow_attr *attr,
2586 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2587 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2588 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2589 *action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2590 attr->vlan_vid[0] = tcf_vlan_push_vid(a);
2591 if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
2592 attr->vlan_prio[0] = tcf_vlan_push_prio(a);
2593 attr->vlan_proto[0] = tcf_vlan_push_proto(a);
2594 if (!attr->vlan_proto[0])
2595 attr->vlan_proto[0] = htons(ETH_P_8021Q);
2596 } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2597 tcf_vlan_push_prio(a)) {
2600 } else { /* action is TCA_VLAN_ACT_MODIFY */
2607 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2608 struct mlx5e_tc_flow_parse_attr *parse_attr,
2609 struct mlx5e_tc_flow *flow)
2611 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2612 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2613 struct ip_tunnel_info *info = NULL;
2614 const struct tc_action *a;
2620 if (!tcf_exts_has_actions(exts))
2623 attr->in_rep = rpriv->rep;
2624 attr->in_mdev = priv->mdev;
2626 tcf_exts_to_list(exts, &actions);
2627 list_for_each_entry(a, &actions, list) {
2628 if (is_tcf_gact_shot(a)) {
2629 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2630 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2634 if (is_tcf_pedit(a)) {
2635 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
2640 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2641 attr->mirror_count = attr->out_count;
2645 if (is_tcf_csum(a)) {
2646 if (csum_offload_supported(priv, action,
2647 tcf_csum_update_flags(a)))
2653 if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
2654 struct mlx5e_priv *out_priv;
2655 struct net_device *out_dev;
2657 out_dev = tcf_mirred_dev(a);
2659 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
2660 pr_err("can't support more than %d output ports, can't offload forwarding\n",
2665 if (switchdev_port_same_parent_id(priv->netdev,
2667 is_merged_eswitch_dev(priv, out_dev)) {
2668 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2669 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2670 out_priv = netdev_priv(out_dev);
2671 rpriv = out_priv->ppriv;
2672 attr->out_rep[attr->out_count] = rpriv->rep;
2673 attr->out_mdev[attr->out_count++] = out_priv->mdev;
2675 parse_attr->mirred_ifindex = out_dev->ifindex;
2676 parse_attr->tun_info = *info;
2677 attr->parse_attr = parse_attr;
2678 action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
2679 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2680 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2681 /* attr->out_rep is resolved when we handle encap */
2683 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2684 priv->netdev->name, out_dev->name);
2690 if (is_tcf_tunnel_set(a)) {
2691 info = tcf_tunnel_info(a);
2696 attr->mirror_count = attr->out_count;
2700 if (is_tcf_vlan(a)) {
2701 err = parse_tc_vlan_action(priv, a, attr, &action);
2706 attr->mirror_count = attr->out_count;
2710 if (is_tcf_tunnel_release(a)) {
2711 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2718 attr->action = action;
2719 if (!actions_match_supported(priv, exts, parse_attr, flow))
2722 if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
2723 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
2730 static void get_flags(int flags, u8 *flow_flags)
2732 u8 __flow_flags = 0;
2734 if (flags & MLX5E_TC_INGRESS)
2735 __flow_flags |= MLX5E_TC_FLOW_INGRESS;
2736 if (flags & MLX5E_TC_EGRESS)
2737 __flow_flags |= MLX5E_TC_FLOW_EGRESS;
2739 *flow_flags = __flow_flags;
2742 static const struct rhashtable_params tc_ht_params = {
2743 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2744 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2745 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2746 .automatic_shrinking = true,
2749 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
2751 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2752 struct mlx5e_rep_priv *uplink_rpriv;
2754 if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
2755 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2756 return &uplink_rpriv->tc_ht;
2758 return &priv->fs.tc.ht;
2761 int mlx5e_configure_flower(struct mlx5e_priv *priv,
2762 struct tc_cls_flower_offload *f, int flags)
2764 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2765 struct mlx5e_tc_flow_parse_attr *parse_attr;
2766 struct rhashtable *tc_ht = get_tc_ht(priv);
2767 struct mlx5e_tc_flow *flow;
2768 int attr_size, err = 0;
2771 get_flags(flags, &flow_flags);
2773 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2775 netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
2779 if (esw && esw->mode == SRIOV_OFFLOADS) {
2780 flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2781 attr_size = sizeof(struct mlx5_esw_flow_attr);
2783 flow_flags |= MLX5E_TC_FLOW_NIC;
2784 attr_size = sizeof(struct mlx5_nic_flow_attr);
2787 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2788 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2789 if (!parse_attr || !flow) {
2794 flow->cookie = f->cookie;
2795 flow->flags = flow_flags;
2798 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
2802 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
2803 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
2806 flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
2808 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
2811 flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
2814 if (IS_ERR(flow->rule[0])) {
2815 err = PTR_ERR(flow->rule[0]);
2821 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2823 if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
2824 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
2827 err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
2829 mlx5e_tc_del_flow(priv, flow);
2841 #define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
2842 #define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
2844 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
2846 if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
2852 int mlx5e_delete_flower(struct mlx5e_priv *priv,
2853 struct tc_cls_flower_offload *f, int flags)
2855 struct rhashtable *tc_ht = get_tc_ht(priv);
2856 struct mlx5e_tc_flow *flow;
2858 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2859 if (!flow || !same_flow_direction(flow, flags))
2862 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
2864 mlx5e_tc_del_flow(priv, flow);
2871 int mlx5e_stats_flower(struct mlx5e_priv *priv,
2872 struct tc_cls_flower_offload *f, int flags)
2874 struct rhashtable *tc_ht = get_tc_ht(priv);
2875 struct mlx5e_tc_flow *flow;
2876 struct mlx5_fc *counter;
2881 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2882 if (!flow || !same_flow_direction(flow, flags))
2885 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
2888 counter = mlx5_flow_rule_counter(flow->rule[0]);
2892 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
2894 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
2899 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
2901 struct mlx5e_tc_table *tc = &priv->fs.tc;
2903 hash_init(tc->mod_hdr_tbl);
2904 hash_init(tc->hairpin_tbl);
2906 return rhashtable_init(&tc->ht, &tc_ht_params);
2909 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
2911 struct mlx5e_tc_flow *flow = ptr;
2912 struct mlx5e_priv *priv = flow->priv;
2914 mlx5e_tc_del_flow(priv, flow);
2918 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
2920 struct mlx5e_tc_table *tc = &priv->fs.tc;
2922 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
2924 if (!IS_ERR_OR_NULL(tc->t)) {
2925 mlx5_destroy_flow_table(tc->t);
2930 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
2932 return rhashtable_init(tc_ht, &tc_ht_params);
2935 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
2937 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);
2940 int mlx5e_tc_num_filters(struct mlx5e_priv *priv)
2942 struct rhashtable *tc_ht = get_tc_ht(priv);
2944 return atomic_read(&tc_ht->nelems);