2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/flow_dissector.h>
34 #include <net/sch_generic.h>
35 #include <net/pkt_cls.h>
36 #include <net/tc_act/tc_gact.h>
37 #include <net/tc_act/tc_skbedit.h>
38 #include <linux/mlx5/fs.h>
39 #include <linux/mlx5/device.h>
40 #include <linux/rhashtable.h>
41 #include <net/switchdev.h>
42 #include <net/tc_act/tc_mirred.h>
43 #include <net/tc_act/tc_vlan.h>
44 #include <net/tc_act/tc_tunnel_key.h>
45 #include <net/tc_act/tc_pedit.h>
46 #include <net/tc_act/tc_csum.h>
47 #include <net/vxlan.h>
57 struct mlx5_nic_flow_attr {
63 struct mlx5_flow_table *hairpin_ft;
66 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
69 MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS,
70 MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS,
71 MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE),
72 MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1),
73 MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
74 MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3),
75 MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
78 struct mlx5e_tc_flow {
79 struct rhash_head node;
80 struct mlx5e_priv *priv;
83 struct mlx5_flow_handle *rule;
84 struct list_head encap; /* flows sharing the same encap ID */
85 struct list_head mod_hdr; /* flows sharing the same mod hdr ID */
86 struct list_head hairpin; /* flows sharing the same hairpin */
88 struct mlx5_esw_flow_attr esw_attr[0];
89 struct mlx5_nic_flow_attr nic_attr[0];
93 struct mlx5e_tc_flow_parse_attr {
94 struct ip_tunnel_info tun_info;
95 struct mlx5_flow_spec spec;
96 int num_mod_hdr_actions;
97 void *mod_hdr_actions;
102 MLX5_HEADER_TYPE_VXLAN = 0x0,
103 MLX5_HEADER_TYPE_NVGRE = 0x1,
106 #define MLX5E_TC_TABLE_NUM_GROUPS 4
107 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
109 struct mlx5e_hairpin {
110 struct mlx5_hairpin *pair;
112 struct mlx5_core_dev *func_mdev;
113 struct mlx5e_priv *func_priv;
118 struct mlx5e_rqt indir_rqt;
119 u32 indir_tirn[MLX5E_NUM_INDIR_TIRS];
120 struct mlx5e_ttc_table ttc;
123 struct mlx5e_hairpin_entry {
124 /* a node of a hash table which keeps all the hairpin entries */
125 struct hlist_node hairpin_hlist;
127 /* flows sharing the same hairpin */
128 struct list_head flows;
132 struct mlx5e_hairpin *hp;
140 struct mlx5e_mod_hdr_entry {
141 /* a node of a hash table which keeps all the mod_hdr entries */
142 struct hlist_node mod_hdr_hlist;
144 /* flows sharing the same mod_hdr entry */
145 struct list_head flows;
147 struct mod_hdr_key key;
152 #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto)
154 static inline u32 hash_mod_hdr_info(struct mod_hdr_key *key)
156 return jhash(key->actions,
157 key->num_actions * MLX5_MH_ACT_SZ, 0);
160 static inline int cmp_mod_hdr_info(struct mod_hdr_key *a,
161 struct mod_hdr_key *b)
163 if (a->num_actions != b->num_actions)
166 return memcmp(a->actions, b->actions, a->num_actions * MLX5_MH_ACT_SZ);
169 static int mlx5e_attach_mod_hdr(struct mlx5e_priv *priv,
170 struct mlx5e_tc_flow *flow,
171 struct mlx5e_tc_flow_parse_attr *parse_attr)
173 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
174 int num_actions, actions_size, namespace, err;
175 struct mlx5e_mod_hdr_entry *mh;
176 struct mod_hdr_key key;
180 num_actions = parse_attr->num_mod_hdr_actions;
181 actions_size = MLX5_MH_ACT_SZ * num_actions;
183 key.actions = parse_attr->mod_hdr_actions;
184 key.num_actions = num_actions;
186 hash_key = hash_mod_hdr_info(&key);
188 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
189 namespace = MLX5_FLOW_NAMESPACE_FDB;
190 hash_for_each_possible(esw->offloads.mod_hdr_tbl, mh,
191 mod_hdr_hlist, hash_key) {
192 if (!cmp_mod_hdr_info(&mh->key, &key)) {
198 namespace = MLX5_FLOW_NAMESPACE_KERNEL;
199 hash_for_each_possible(priv->fs.tc.mod_hdr_tbl, mh,
200 mod_hdr_hlist, hash_key) {
201 if (!cmp_mod_hdr_info(&mh->key, &key)) {
211 mh = kzalloc(sizeof(*mh) + actions_size, GFP_KERNEL);
215 mh->key.actions = (void *)mh + sizeof(*mh);
216 memcpy(mh->key.actions, key.actions, actions_size);
217 mh->key.num_actions = num_actions;
218 INIT_LIST_HEAD(&mh->flows);
220 err = mlx5_modify_header_alloc(priv->mdev, namespace,
227 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
228 hash_add(esw->offloads.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
230 hash_add(priv->fs.tc.mod_hdr_tbl, &mh->mod_hdr_hlist, hash_key);
233 list_add(&flow->mod_hdr, &mh->flows);
234 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
235 flow->esw_attr->mod_hdr_id = mh->mod_hdr_id;
237 flow->nic_attr->mod_hdr_id = mh->mod_hdr_id;
246 static void mlx5e_detach_mod_hdr(struct mlx5e_priv *priv,
247 struct mlx5e_tc_flow *flow)
249 struct list_head *next = flow->mod_hdr.next;
251 list_del(&flow->mod_hdr);
253 if (list_empty(next)) {
254 struct mlx5e_mod_hdr_entry *mh;
256 mh = list_entry(next, struct mlx5e_mod_hdr_entry, flows);
258 mlx5_modify_header_dealloc(priv->mdev, mh->mod_hdr_id);
259 hash_del(&mh->mod_hdr_hlist);
265 struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex)
267 struct net_device *netdev;
268 struct mlx5e_priv *priv;
270 netdev = __dev_get_by_index(net, ifindex);
271 priv = netdev_priv(netdev);
275 static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp)
277 u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {0};
281 err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn);
285 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
287 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
288 MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]);
289 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
291 err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->tirn);
298 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
303 static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
305 mlx5_core_destroy_tir(hp->func_mdev, hp->tirn);
306 mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
309 static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
311 u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
312 struct mlx5e_priv *priv = hp->func_priv;
313 int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
315 mlx5e_build_default_indir_rqt(indirection_rqt, sz,
318 for (i = 0; i < sz; i++) {
320 if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR)
321 ix = mlx5e_bits_invert(i, ilog2(sz));
322 ix = indirection_rqt[ix];
323 rqn = hp->pair->rqn[ix];
324 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
328 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
330 int inlen, err, sz = MLX5E_INDIR_RQT_SIZE;
331 struct mlx5e_priv *priv = hp->func_priv;
332 struct mlx5_core_dev *mdev = priv->mdev;
336 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
337 in = kvzalloc(inlen, GFP_KERNEL);
341 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
343 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
344 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
346 mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
348 err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
350 hp->indir_rqt.enabled = true;
356 static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp)
358 struct mlx5e_priv *priv = hp->func_priv;
359 u32 in[MLX5_ST_SZ_DW(create_tir_in)];
363 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
364 memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in));
365 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
367 MLX5_SET(tirc, tirc, transport_domain, hp->tdn);
368 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
369 MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn);
370 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
372 err = mlx5_core_create_tir(hp->func_mdev, in,
373 MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]);
375 mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err);
376 goto err_destroy_tirs;
382 for (i = 0; i < tt; i++)
383 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]);
387 static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp)
391 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
392 mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]);
395 static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp,
396 struct ttc_params *ttc_params)
398 struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr;
401 memset(ttc_params, 0, sizeof(*ttc_params));
403 ttc_params->any_tt_tirn = hp->tirn;
405 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++)
406 ttc_params->indir_tirn[tt] = hp->indir_tirn[tt];
408 ft_attr->max_fte = MLX5E_NUM_TT;
409 ft_attr->level = MLX5E_TC_TTC_FT_LEVEL;
410 ft_attr->prio = MLX5E_TC_PRIO;
413 static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp)
415 struct mlx5e_priv *priv = hp->func_priv;
416 struct ttc_params ttc_params;
419 err = mlx5e_hairpin_create_indirect_rqt(hp);
423 err = mlx5e_hairpin_create_indirect_tirs(hp);
425 goto err_create_indirect_tirs;
427 mlx5e_hairpin_set_ttc_params(hp, &ttc_params);
428 err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc);
430 goto err_create_ttc_table;
432 netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n",
433 hp->num_channels, hp->ttc.ft.t->id);
437 err_create_ttc_table:
438 mlx5e_hairpin_destroy_indirect_tirs(hp);
439 err_create_indirect_tirs:
440 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
445 static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp)
447 struct mlx5e_priv *priv = hp->func_priv;
449 mlx5e_destroy_ttc_table(priv, &hp->ttc);
450 mlx5e_hairpin_destroy_indirect_tirs(hp);
451 mlx5e_destroy_rqt(priv, &hp->indir_rqt);
454 static struct mlx5e_hairpin *
455 mlx5e_hairpin_create(struct mlx5e_priv *priv, struct mlx5_hairpin_params *params,
458 struct mlx5_core_dev *func_mdev, *peer_mdev;
459 struct mlx5e_hairpin *hp;
460 struct mlx5_hairpin *pair;
463 hp = kzalloc(sizeof(*hp), GFP_KERNEL);
465 return ERR_PTR(-ENOMEM);
467 func_mdev = priv->mdev;
468 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
470 pair = mlx5_core_hairpin_create(func_mdev, peer_mdev, params);
473 goto create_pair_err;
476 hp->func_mdev = func_mdev;
477 hp->func_priv = priv;
478 hp->num_channels = params->num_channels;
480 err = mlx5e_hairpin_create_transport(hp);
482 goto create_transport_err;
484 if (hp->num_channels > 1) {
485 err = mlx5e_hairpin_rss_init(hp);
493 mlx5e_hairpin_destroy_transport(hp);
494 create_transport_err:
495 mlx5_core_hairpin_destroy(hp->pair);
501 static void mlx5e_hairpin_destroy(struct mlx5e_hairpin *hp)
503 if (hp->num_channels > 1)
504 mlx5e_hairpin_rss_cleanup(hp);
505 mlx5e_hairpin_destroy_transport(hp);
506 mlx5_core_hairpin_destroy(hp->pair);
510 static inline u32 hash_hairpin_info(u16 peer_vhca_id, u8 prio)
512 return (peer_vhca_id << 16 | prio);
515 static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
516 u16 peer_vhca_id, u8 prio)
518 struct mlx5e_hairpin_entry *hpe;
519 u32 hash_key = hash_hairpin_info(peer_vhca_id, prio);
521 hash_for_each_possible(priv->fs.tc.hairpin_tbl, hpe,
522 hairpin_hlist, hash_key) {
523 if (hpe->peer_vhca_id == peer_vhca_id && hpe->prio == prio)
530 #define UNKNOWN_MATCH_PRIO 8
532 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
533 struct mlx5_flow_spec *spec, u8 *match_prio)
535 void *headers_c, *headers_v;
536 u8 prio_val, prio_mask = 0;
539 #ifdef CONFIG_MLX5_CORE_EN_DCB
540 if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
541 netdev_warn(priv->netdev,
542 "only PCP trust state supported for hairpin\n");
546 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers);
547 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
549 vlan_present = MLX5_GET(fte_match_set_lyr_2_4, headers_v, cvlan_tag);
551 prio_mask = MLX5_GET(fte_match_set_lyr_2_4, headers_c, first_prio);
552 prio_val = MLX5_GET(fte_match_set_lyr_2_4, headers_v, first_prio);
555 if (!vlan_present || !prio_mask) {
556 prio_val = UNKNOWN_MATCH_PRIO;
557 } else if (prio_mask != 0x7) {
558 netdev_warn(priv->netdev,
559 "masked priority match not supported for hairpin\n");
563 *match_prio = prio_val;
567 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
568 struct mlx5e_tc_flow *flow,
569 struct mlx5e_tc_flow_parse_attr *parse_attr)
571 int peer_ifindex = parse_attr->mirred_ifindex;
572 struct mlx5_hairpin_params params;
573 struct mlx5_core_dev *peer_mdev;
574 struct mlx5e_hairpin_entry *hpe;
575 struct mlx5e_hairpin *hp;
582 peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
583 if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
584 netdev_warn(priv->netdev, "hairpin is not supported\n");
588 peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
589 err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
592 hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
596 hpe = kzalloc(sizeof(*hpe), GFP_KERNEL);
600 INIT_LIST_HEAD(&hpe->flows);
601 hpe->peer_vhca_id = peer_id;
602 hpe->prio = match_prio;
604 params.log_data_size = 15;
605 params.log_data_size = min_t(u8, params.log_data_size,
606 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_wq_data_sz));
607 params.log_data_size = max_t(u8, params.log_data_size,
608 MLX5_CAP_GEN(priv->mdev, log_min_hairpin_wq_data_sz));
610 params.log_num_packets = params.log_data_size -
611 MLX5_MPWRQ_MIN_LOG_STRIDE_SZ(priv->mdev);
612 params.log_num_packets = min_t(u8, params.log_num_packets,
613 MLX5_CAP_GEN(priv->mdev, log_max_hairpin_num_packets));
615 params.q_counter = priv->q_counter;
616 /* set hairpin pair per each 50Gbs share of the link */
617 mlx5e_port_max_linkspeed(priv->mdev, &link_speed);
618 link_speed = max_t(u32, link_speed, 50000);
619 link_speed64 = link_speed;
620 do_div(link_speed64, 50000);
621 params.num_channels = link_speed64;
623 hp = mlx5e_hairpin_create(priv, ¶ms, peer_ifindex);
626 goto create_hairpin_err;
629 netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n",
630 hp->tirn, hp->pair->rqn[0], hp->pair->peer_mdev->priv.name,
631 hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets);
634 hash_add(priv->fs.tc.hairpin_tbl, &hpe->hairpin_hlist,
635 hash_hairpin_info(peer_id, match_prio));
638 if (hpe->hp->num_channels > 1) {
639 flow->flags |= MLX5E_TC_FLOW_HAIRPIN_RSS;
640 flow->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t;
642 flow->nic_attr->hairpin_tirn = hpe->hp->tirn;
644 list_add(&flow->hairpin, &hpe->flows);
653 static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
654 struct mlx5e_tc_flow *flow)
656 struct list_head *next = flow->hairpin.next;
658 list_del(&flow->hairpin);
660 /* no more hairpin flows for us, release the hairpin pair */
661 if (list_empty(next)) {
662 struct mlx5e_hairpin_entry *hpe;
664 hpe = list_entry(next, struct mlx5e_hairpin_entry, flows);
666 netdev_dbg(priv->netdev, "del hairpin: peer %s\n",
667 hpe->hp->pair->peer_mdev->priv.name);
669 mlx5e_hairpin_destroy(hpe->hp);
670 hash_del(&hpe->hairpin_hlist);
675 static struct mlx5_flow_handle *
676 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
677 struct mlx5e_tc_flow_parse_attr *parse_attr,
678 struct mlx5e_tc_flow *flow)
680 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
681 struct mlx5_core_dev *dev = priv->mdev;
682 struct mlx5_flow_destination dest[2] = {};
683 struct mlx5_flow_act flow_act = {
684 .action = attr->action,
685 .has_flow_tag = true,
686 .flow_tag = attr->flow_tag,
689 struct mlx5_fc *counter = NULL;
690 struct mlx5_flow_handle *rule;
691 bool table_created = false;
692 int err, dest_ix = 0;
694 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
695 err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
698 goto err_add_hairpin_flow;
700 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
701 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
702 dest[dest_ix].ft = attr->hairpin_ft;
704 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_TIR;
705 dest[dest_ix].tir_num = attr->hairpin_tirn;
708 } else if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
709 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
710 dest[dest_ix].ft = priv->fs.vlan.ft.t;
714 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
715 counter = mlx5_fc_create(dev, true);
716 if (IS_ERR(counter)) {
717 rule = ERR_CAST(counter);
720 dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
721 dest[dest_ix].counter = counter;
725 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
726 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
727 flow_act.modify_id = attr->mod_hdr_id;
728 kfree(parse_attr->mod_hdr_actions);
731 goto err_create_mod_hdr_id;
735 if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
736 int tc_grp_size, tc_tbl_size;
737 u32 max_flow_counter;
739 max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
740 MLX5_CAP_GEN(dev, max_flow_counter_15_0);
742 tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
744 tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
745 BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
748 mlx5_create_auto_grouped_flow_table(priv->fs.ns,
751 MLX5E_TC_TABLE_NUM_GROUPS,
752 MLX5E_TC_FT_LEVEL, 0);
753 if (IS_ERR(priv->fs.tc.t)) {
754 netdev_err(priv->netdev,
755 "Failed to create tc offload table\n");
756 rule = ERR_CAST(priv->fs.tc.t);
760 table_created = true;
763 if (attr->match_level != MLX5_MATCH_NONE)
764 parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
766 rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
767 &flow_act, dest, dest_ix);
776 mlx5_destroy_flow_table(priv->fs.tc.t);
777 priv->fs.tc.t = NULL;
780 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
781 mlx5e_detach_mod_hdr(priv, flow);
782 err_create_mod_hdr_id:
783 mlx5_fc_destroy(dev, counter);
785 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
786 mlx5e_hairpin_flow_del(priv, flow);
787 err_add_hairpin_flow:
791 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
792 struct mlx5e_tc_flow *flow)
794 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
795 struct mlx5_fc *counter = NULL;
797 counter = mlx5_flow_rule_counter(flow->rule);
798 mlx5_del_flow_rules(flow->rule);
799 mlx5_fc_destroy(priv->mdev, counter);
801 if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) {
802 mlx5_destroy_flow_table(priv->fs.tc.t);
803 priv->fs.tc.t = NULL;
806 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
807 mlx5e_detach_mod_hdr(priv, flow);
809 if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
810 mlx5e_hairpin_flow_del(priv, flow);
813 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
814 struct mlx5e_tc_flow *flow);
816 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
817 struct ip_tunnel_info *tun_info,
818 struct net_device *mirred_dev,
819 struct net_device **encap_dev,
820 struct mlx5e_tc_flow *flow);
822 static struct mlx5_flow_handle *
823 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
824 struct mlx5e_tc_flow_parse_attr *parse_attr,
825 struct mlx5e_tc_flow *flow)
827 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
828 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
829 struct net_device *out_dev, *encap_dev = NULL;
830 struct mlx5_flow_handle *rule = NULL;
831 struct mlx5e_rep_priv *rpriv;
832 struct mlx5e_priv *out_priv;
835 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
836 out_dev = __dev_get_by_index(dev_net(priv->netdev),
837 attr->parse_attr->mirred_ifindex);
838 err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
839 out_dev, &encap_dev, flow);
843 goto err_attach_encap;
845 out_priv = netdev_priv(encap_dev);
846 rpriv = out_priv->ppriv;
847 attr->out_rep[attr->out_count] = rpriv->rep;
848 attr->out_mdev[attr->out_count++] = out_priv->mdev;
851 err = mlx5_eswitch_add_vlan_action(esw, attr);
857 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
858 err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
859 kfree(parse_attr->mod_hdr_actions);
866 /* we get here if (1) there's no error (rule being null) or when
867 * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
869 if (rule != ERR_PTR(-EAGAIN)) {
870 rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
877 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
878 mlx5e_detach_mod_hdr(priv, flow);
880 mlx5_eswitch_del_vlan_action(esw, attr);
882 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
883 mlx5e_detach_encap(priv, flow);
888 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
889 struct mlx5e_tc_flow *flow)
891 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
892 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
894 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
895 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
896 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, attr);
899 mlx5_eswitch_del_vlan_action(esw, attr);
901 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
902 mlx5e_detach_encap(priv, flow);
903 kvfree(attr->parse_attr);
906 if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
907 mlx5e_detach_mod_hdr(priv, flow);
910 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
911 struct mlx5e_encap_entry *e)
913 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
914 struct mlx5_esw_flow_attr *esw_attr;
915 struct mlx5e_tc_flow *flow;
918 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
919 e->encap_size, e->encap_header,
922 mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
926 e->flags |= MLX5_ENCAP_ENTRY_VALID;
927 mlx5e_rep_queue_neigh_stats_work(priv);
929 list_for_each_entry(flow, &e->flows, encap) {
930 esw_attr = flow->esw_attr;
931 esw_attr->encap_id = e->encap_id;
932 flow->rule = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
933 if (IS_ERR(flow->rule)) {
934 err = PTR_ERR(flow->rule);
935 mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
939 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
943 void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
944 struct mlx5e_encap_entry *e)
946 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
947 struct mlx5e_tc_flow *flow;
949 list_for_each_entry(flow, &e->flows, encap) {
950 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
951 flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
952 mlx5_eswitch_del_offloaded_rule(esw, flow->rule, flow->esw_attr);
956 if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
957 e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
958 mlx5_encap_dealloc(priv->mdev, e->encap_id);
962 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
964 struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
965 u64 bytes, packets, lastuse = 0;
966 struct mlx5e_tc_flow *flow;
967 struct mlx5e_encap_entry *e;
968 struct mlx5_fc *counter;
969 struct neigh_table *tbl;
970 bool neigh_used = false;
973 if (m_neigh->family == AF_INET)
975 #if IS_ENABLED(CONFIG_IPV6)
976 else if (m_neigh->family == AF_INET6)
982 list_for_each_entry(e, &nhe->encap_list, encap_list) {
983 if (!(e->flags & MLX5_ENCAP_ENTRY_VALID))
985 list_for_each_entry(flow, &e->flows, encap) {
986 if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
987 counter = mlx5_flow_rule_counter(flow->rule);
988 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
989 if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
1000 nhe->reported_lastuse = jiffies;
1002 /* find the relevant neigh according to the cached device and
1005 n = neigh_lookup(tbl, &m_neigh->dst_ip, m_neigh->dev);
1007 WARN(1, "The neighbour already freed\n");
1011 neigh_event_send(n, NULL);
1016 static void mlx5e_detach_encap(struct mlx5e_priv *priv,
1017 struct mlx5e_tc_flow *flow)
1019 struct list_head *next = flow->encap.next;
1021 list_del(&flow->encap);
1022 if (list_empty(next)) {
1023 struct mlx5e_encap_entry *e;
1025 e = list_entry(next, struct mlx5e_encap_entry, flows);
1026 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
1028 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
1029 mlx5_encap_dealloc(priv->mdev, e->encap_id);
1031 hash_del_rcu(&e->encap_hlist);
1032 kfree(e->encap_header);
1037 static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
1038 struct mlx5e_tc_flow *flow)
1040 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1041 mlx5e_tc_del_fdb_flow(priv, flow);
1043 mlx5e_tc_del_nic_flow(priv, flow);
1046 static void parse_vxlan_attr(struct mlx5_flow_spec *spec,
1047 struct tc_cls_flower_offload *f)
1049 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1051 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1053 void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1055 void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1058 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol);
1059 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1061 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
1062 struct flow_dissector_key_keyid *key =
1063 skb_flow_dissector_target(f->dissector,
1064 FLOW_DISSECTOR_KEY_ENC_KEYID,
1066 struct flow_dissector_key_keyid *mask =
1067 skb_flow_dissector_target(f->dissector,
1068 FLOW_DISSECTOR_KEY_ENC_KEYID,
1070 MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni,
1071 be32_to_cpu(mask->keyid));
1072 MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
1073 be32_to_cpu(key->keyid));
1077 static int parse_tunnel_attr(struct mlx5e_priv *priv,
1078 struct mlx5_flow_spec *spec,
1079 struct tc_cls_flower_offload *f)
1081 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1083 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1086 struct flow_dissector_key_control *enc_control =
1087 skb_flow_dissector_target(f->dissector,
1088 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1091 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
1092 struct flow_dissector_key_ports *key =
1093 skb_flow_dissector_target(f->dissector,
1094 FLOW_DISSECTOR_KEY_ENC_PORTS,
1096 struct flow_dissector_key_ports *mask =
1097 skb_flow_dissector_target(f->dissector,
1098 FLOW_DISSECTOR_KEY_ENC_PORTS,
1100 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
1101 struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
1102 struct net_device *up_dev = uplink_rpriv->netdev;
1103 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
1105 /* Full udp dst port must be given */
1106 if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
1107 goto vxlan_match_offload_err;
1109 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->dst)) &&
1110 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
1111 parse_vxlan_attr(spec, f);
1113 netdev_warn(priv->netdev,
1114 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
1118 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1119 udp_dport, ntohs(mask->dst));
1120 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1121 udp_dport, ntohs(key->dst));
1123 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1124 udp_sport, ntohs(mask->src));
1125 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1126 udp_sport, ntohs(key->src));
1127 } else { /* udp dst port must be given */
1128 vxlan_match_offload_err:
1129 netdev_warn(priv->netdev,
1130 "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
1134 if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1135 struct flow_dissector_key_ipv4_addrs *key =
1136 skb_flow_dissector_target(f->dissector,
1137 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1139 struct flow_dissector_key_ipv4_addrs *mask =
1140 skb_flow_dissector_target(f->dissector,
1141 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
1143 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1144 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1146 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1147 src_ipv4_src_ipv6.ipv4_layout.ipv4,
1150 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1151 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1153 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1154 dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
1157 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1158 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
1159 } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1160 struct flow_dissector_key_ipv6_addrs *key =
1161 skb_flow_dissector_target(f->dissector,
1162 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1164 struct flow_dissector_key_ipv6_addrs *mask =
1165 skb_flow_dissector_target(f->dissector,
1166 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
1169 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1170 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1171 &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1172 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1173 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1174 &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1176 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1177 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1178 &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1179 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1180 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1181 &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
1183 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
1184 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
1187 /* Enforce DMAC when offloading incoming tunneled flows.
1188 * Flow counters require a match on the DMAC.
1190 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_47_16);
1191 MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, dmac_15_0);
1192 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1193 dmac_47_16), priv->netdev->dev_addr);
1195 /* let software handle IP fragments */
1196 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1197 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag, 0);
1202 static int __parse_cls_flower(struct mlx5e_priv *priv,
1203 struct mlx5_flow_spec *spec,
1204 struct tc_cls_flower_offload *f,
1207 void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1209 void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1214 *match_level = MLX5_MATCH_NONE;
1216 if (f->dissector->used_keys &
1217 ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
1218 BIT(FLOW_DISSECTOR_KEY_BASIC) |
1219 BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
1220 BIT(FLOW_DISSECTOR_KEY_VLAN) |
1221 BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
1222 BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
1223 BIT(FLOW_DISSECTOR_KEY_PORTS) |
1224 BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
1225 BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) |
1226 BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) |
1227 BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) |
1228 BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) |
1229 BIT(FLOW_DISSECTOR_KEY_TCP) |
1230 BIT(FLOW_DISSECTOR_KEY_IP))) {
1231 netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
1232 f->dissector->used_keys);
1236 if ((dissector_uses_key(f->dissector,
1237 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) ||
1238 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID) ||
1239 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) &&
1240 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
1241 struct flow_dissector_key_control *key =
1242 skb_flow_dissector_target(f->dissector,
1243 FLOW_DISSECTOR_KEY_ENC_CONTROL,
1245 switch (key->addr_type) {
1246 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
1247 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
1248 if (parse_tunnel_attr(priv, spec, f))
1255 /* In decap flow, header pointers should point to the inner
1256 * headers, outer header were already set by parse_tunnel_attr
1258 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
1260 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
1264 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1265 struct flow_dissector_key_eth_addrs *key =
1266 skb_flow_dissector_target(f->dissector,
1267 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1269 struct flow_dissector_key_eth_addrs *mask =
1270 skb_flow_dissector_target(f->dissector,
1271 FLOW_DISSECTOR_KEY_ETH_ADDRS,
1274 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1277 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1281 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1284 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1288 if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
1289 *match_level = MLX5_MATCH_L2;
1292 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
1293 struct flow_dissector_key_vlan *key =
1294 skb_flow_dissector_target(f->dissector,
1295 FLOW_DISSECTOR_KEY_VLAN,
1297 struct flow_dissector_key_vlan *mask =
1298 skb_flow_dissector_target(f->dissector,
1299 FLOW_DISSECTOR_KEY_VLAN,
1301 if (mask->vlan_id || mask->vlan_priority) {
1302 MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
1303 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
1305 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
1306 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
1308 MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_prio, mask->vlan_priority);
1309 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, key->vlan_priority);
1311 *match_level = MLX5_MATCH_L2;
1315 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1316 struct flow_dissector_key_basic *key =
1317 skb_flow_dissector_target(f->dissector,
1318 FLOW_DISSECTOR_KEY_BASIC,
1320 struct flow_dissector_key_basic *mask =
1321 skb_flow_dissector_target(f->dissector,
1322 FLOW_DISSECTOR_KEY_BASIC,
1324 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
1325 ntohs(mask->n_proto));
1326 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
1327 ntohs(key->n_proto));
1330 *match_level = MLX5_MATCH_L2;
1333 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
1334 struct flow_dissector_key_control *key =
1335 skb_flow_dissector_target(f->dissector,
1336 FLOW_DISSECTOR_KEY_CONTROL,
1339 struct flow_dissector_key_control *mask =
1340 skb_flow_dissector_target(f->dissector,
1341 FLOW_DISSECTOR_KEY_CONTROL,
1343 addr_type = key->addr_type;
1345 /* the HW doesn't support frag first/later */
1346 if (mask->flags & FLOW_DIS_FIRST_FRAG)
1349 if (mask->flags & FLOW_DIS_IS_FRAGMENT) {
1350 MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
1351 MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
1352 key->flags & FLOW_DIS_IS_FRAGMENT);
1354 /* the HW doesn't need L3 inline to match on frag=no */
1355 if (!(key->flags & FLOW_DIS_IS_FRAGMENT))
1356 *match_level = MLX5_INLINE_MODE_L2;
1357 /* *** L2 attributes parsing up to here *** */
1359 *match_level = MLX5_INLINE_MODE_IP;
1363 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
1364 struct flow_dissector_key_basic *key =
1365 skb_flow_dissector_target(f->dissector,
1366 FLOW_DISSECTOR_KEY_BASIC,
1368 struct flow_dissector_key_basic *mask =
1369 skb_flow_dissector_target(f->dissector,
1370 FLOW_DISSECTOR_KEY_BASIC,
1372 ip_proto = key->ip_proto;
1374 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_protocol,
1376 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1380 *match_level = MLX5_MATCH_L3;
1383 if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1384 struct flow_dissector_key_ipv4_addrs *key =
1385 skb_flow_dissector_target(f->dissector,
1386 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1388 struct flow_dissector_key_ipv4_addrs *mask =
1389 skb_flow_dissector_target(f->dissector,
1390 FLOW_DISSECTOR_KEY_IPV4_ADDRS,
1393 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1394 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1395 &mask->src, sizeof(mask->src));
1396 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1397 src_ipv4_src_ipv6.ipv4_layout.ipv4),
1398 &key->src, sizeof(key->src));
1399 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1400 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1401 &mask->dst, sizeof(mask->dst));
1402 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1403 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
1404 &key->dst, sizeof(key->dst));
1406 if (mask->src || mask->dst)
1407 *match_level = MLX5_MATCH_L3;
1410 if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1411 struct flow_dissector_key_ipv6_addrs *key =
1412 skb_flow_dissector_target(f->dissector,
1413 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1415 struct flow_dissector_key_ipv6_addrs *mask =
1416 skb_flow_dissector_target(f->dissector,
1417 FLOW_DISSECTOR_KEY_IPV6_ADDRS,
1420 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1421 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1422 &mask->src, sizeof(mask->src));
1423 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1424 src_ipv4_src_ipv6.ipv6_layout.ipv6),
1425 &key->src, sizeof(key->src));
1427 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
1428 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1429 &mask->dst, sizeof(mask->dst));
1430 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1431 dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
1432 &key->dst, sizeof(key->dst));
1434 if (ipv6_addr_type(&mask->src) != IPV6_ADDR_ANY ||
1435 ipv6_addr_type(&mask->dst) != IPV6_ADDR_ANY)
1436 *match_level = MLX5_MATCH_L3;
1439 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_IP)) {
1440 struct flow_dissector_key_ip *key =
1441 skb_flow_dissector_target(f->dissector,
1442 FLOW_DISSECTOR_KEY_IP,
1444 struct flow_dissector_key_ip *mask =
1445 skb_flow_dissector_target(f->dissector,
1446 FLOW_DISSECTOR_KEY_IP,
1449 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_ecn, mask->tos & 0x3);
1450 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, key->tos & 0x3);
1452 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ip_dscp, mask->tos >> 2);
1453 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, key->tos >> 2);
1455 MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
1456 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
1459 !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
1460 ft_field_support.outer_ipv4_ttl))
1463 if (mask->tos || mask->ttl)
1464 *match_level = MLX5_MATCH_L3;
1467 /* *** L3 attributes parsing up to here *** */
1469 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_PORTS)) {
1470 struct flow_dissector_key_ports *key =
1471 skb_flow_dissector_target(f->dissector,
1472 FLOW_DISSECTOR_KEY_PORTS,
1474 struct flow_dissector_key_ports *mask =
1475 skb_flow_dissector_target(f->dissector,
1476 FLOW_DISSECTOR_KEY_PORTS,
1480 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1481 tcp_sport, ntohs(mask->src));
1482 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1483 tcp_sport, ntohs(key->src));
1485 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1486 tcp_dport, ntohs(mask->dst));
1487 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1488 tcp_dport, ntohs(key->dst));
1492 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1493 udp_sport, ntohs(mask->src));
1494 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1495 udp_sport, ntohs(key->src));
1497 MLX5_SET(fte_match_set_lyr_2_4, headers_c,
1498 udp_dport, ntohs(mask->dst));
1499 MLX5_SET(fte_match_set_lyr_2_4, headers_v,
1500 udp_dport, ntohs(key->dst));
1503 netdev_err(priv->netdev,
1504 "Only UDP and TCP transport are supported\n");
1508 if (mask->src || mask->dst)
1509 *match_level = MLX5_MATCH_L4;
1512 if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_TCP)) {
1513 struct flow_dissector_key_tcp *key =
1514 skb_flow_dissector_target(f->dissector,
1515 FLOW_DISSECTOR_KEY_TCP,
1517 struct flow_dissector_key_tcp *mask =
1518 skb_flow_dissector_target(f->dissector,
1519 FLOW_DISSECTOR_KEY_TCP,
1522 MLX5_SET(fte_match_set_lyr_2_4, headers_c, tcp_flags,
1523 ntohs(mask->flags));
1524 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
1528 *match_level = MLX5_MATCH_L4;
1534 static int parse_cls_flower(struct mlx5e_priv *priv,
1535 struct mlx5e_tc_flow *flow,
1536 struct mlx5_flow_spec *spec,
1537 struct tc_cls_flower_offload *f)
1539 struct mlx5_core_dev *dev = priv->mdev;
1540 struct mlx5_eswitch *esw = dev->priv.eswitch;
1541 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1542 struct mlx5_eswitch_rep *rep;
1546 err = __parse_cls_flower(priv, spec, f, &match_level);
1548 if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) {
1550 if (rep->vport != FDB_UPLINK_VPORT &&
1551 (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
1552 esw->offloads.inline_mode < match_level)) {
1553 netdev_warn(priv->netdev,
1554 "Flow is not offloaded due to min inline setting, required %d actual %d\n",
1555 match_level, esw->offloads.inline_mode);
1560 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1561 flow->esw_attr->match_level = match_level;
1563 flow->nic_attr->match_level = match_level;
1568 struct pedit_headers {
1576 static int pedit_header_offsets[] = {
1577 [TCA_PEDIT_KEY_EX_HDR_TYPE_ETH] = offsetof(struct pedit_headers, eth),
1578 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP4] = offsetof(struct pedit_headers, ip4),
1579 [TCA_PEDIT_KEY_EX_HDR_TYPE_IP6] = offsetof(struct pedit_headers, ip6),
1580 [TCA_PEDIT_KEY_EX_HDR_TYPE_TCP] = offsetof(struct pedit_headers, tcp),
1581 [TCA_PEDIT_KEY_EX_HDR_TYPE_UDP] = offsetof(struct pedit_headers, udp),
1584 #define pedit_header(_ph, _htype) ((void *)(_ph) + pedit_header_offsets[_htype])
1586 static int set_pedit_val(u8 hdr_type, u32 mask, u32 val, u32 offset,
1587 struct pedit_headers *masks,
1588 struct pedit_headers *vals)
1590 u32 *curr_pmask, *curr_pval;
1592 if (hdr_type >= __PEDIT_HDR_TYPE_MAX)
1595 curr_pmask = (u32 *)(pedit_header(masks, hdr_type) + offset);
1596 curr_pval = (u32 *)(pedit_header(vals, hdr_type) + offset);
1598 if (*curr_pmask & mask) /* disallow acting twice on the same location */
1601 *curr_pmask |= mask;
1602 *curr_pval |= (val & mask);
1610 struct mlx5_fields {
1616 #define OFFLOAD(fw_field, size, field, off) \
1617 {MLX5_ACTION_IN_FIELD_OUT_ ## fw_field, size, offsetof(struct pedit_headers, field) + (off)}
1619 static struct mlx5_fields fields[] = {
1620 OFFLOAD(DMAC_47_16, 4, eth.h_dest[0], 0),
1621 OFFLOAD(DMAC_15_0, 2, eth.h_dest[4], 0),
1622 OFFLOAD(SMAC_47_16, 4, eth.h_source[0], 0),
1623 OFFLOAD(SMAC_15_0, 2, eth.h_source[4], 0),
1624 OFFLOAD(ETHERTYPE, 2, eth.h_proto, 0),
1626 OFFLOAD(IP_TTL, 1, ip4.ttl, 0),
1627 OFFLOAD(SIPV4, 4, ip4.saddr, 0),
1628 OFFLOAD(DIPV4, 4, ip4.daddr, 0),
1630 OFFLOAD(SIPV6_127_96, 4, ip6.saddr.s6_addr32[0], 0),
1631 OFFLOAD(SIPV6_95_64, 4, ip6.saddr.s6_addr32[1], 0),
1632 OFFLOAD(SIPV6_63_32, 4, ip6.saddr.s6_addr32[2], 0),
1633 OFFLOAD(SIPV6_31_0, 4, ip6.saddr.s6_addr32[3], 0),
1634 OFFLOAD(DIPV6_127_96, 4, ip6.daddr.s6_addr32[0], 0),
1635 OFFLOAD(DIPV6_95_64, 4, ip6.daddr.s6_addr32[1], 0),
1636 OFFLOAD(DIPV6_63_32, 4, ip6.daddr.s6_addr32[2], 0),
1637 OFFLOAD(DIPV6_31_0, 4, ip6.daddr.s6_addr32[3], 0),
1638 OFFLOAD(IPV6_HOPLIMIT, 1, ip6.hop_limit, 0),
1640 OFFLOAD(TCP_SPORT, 2, tcp.source, 0),
1641 OFFLOAD(TCP_DPORT, 2, tcp.dest, 0),
1642 OFFLOAD(TCP_FLAGS, 1, tcp.ack_seq, 5),
1644 OFFLOAD(UDP_SPORT, 2, udp.source, 0),
1645 OFFLOAD(UDP_DPORT, 2, udp.dest, 0),
1648 /* On input attr->num_mod_hdr_actions tells how many HW actions can be parsed at
1649 * max from the SW pedit action. On success, it says how many HW actions were
1652 static int offload_pedit_fields(struct pedit_headers *masks,
1653 struct pedit_headers *vals,
1654 struct mlx5e_tc_flow_parse_attr *parse_attr)
1656 struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
1657 int i, action_size, nactions, max_actions, first, last, next_z;
1658 void *s_masks_p, *a_masks_p, *vals_p;
1659 struct mlx5_fields *f;
1660 u8 cmd, field_bsize;
1667 set_masks = &masks[TCA_PEDIT_KEY_EX_CMD_SET];
1668 add_masks = &masks[TCA_PEDIT_KEY_EX_CMD_ADD];
1669 set_vals = &vals[TCA_PEDIT_KEY_EX_CMD_SET];
1670 add_vals = &vals[TCA_PEDIT_KEY_EX_CMD_ADD];
1672 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1673 action = parse_attr->mod_hdr_actions;
1674 max_actions = parse_attr->num_mod_hdr_actions;
1677 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1679 /* avoid seeing bits set from previous iterations */
1683 s_masks_p = (void *)set_masks + f->offset;
1684 a_masks_p = (void *)add_masks + f->offset;
1686 memcpy(&s_mask, s_masks_p, f->size);
1687 memcpy(&a_mask, a_masks_p, f->size);
1689 if (!s_mask && !a_mask) /* nothing to offload here */
1692 if (s_mask && a_mask) {
1693 printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
1697 if (nactions == max_actions) {
1698 printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
1703 cmd = MLX5_ACTION_TYPE_SET;
1705 vals_p = (void *)set_vals + f->offset;
1706 /* clear to denote we consumed this field */
1707 memset(s_masks_p, 0, f->size);
1709 cmd = MLX5_ACTION_TYPE_ADD;
1711 vals_p = (void *)add_vals + f->offset;
1712 /* clear to denote we consumed this field */
1713 memset(a_masks_p, 0, f->size);
1716 field_bsize = f->size * BITS_PER_BYTE;
1718 if (field_bsize == 32) {
1719 mask_be32 = *(__be32 *)&mask;
1720 mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
1721 } else if (field_bsize == 16) {
1722 mask_be16 = *(__be16 *)&mask;
1723 mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
1726 first = find_first_bit(&mask, field_bsize);
1727 next_z = find_next_zero_bit(&mask, field_bsize, first);
1728 last = find_last_bit(&mask, field_bsize);
1729 if (first < next_z && next_z < last) {
1730 printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
1735 MLX5_SET(set_action_in, action, action_type, cmd);
1736 MLX5_SET(set_action_in, action, field, f->field);
1738 if (cmd == MLX5_ACTION_TYPE_SET) {
1739 MLX5_SET(set_action_in, action, offset, first);
1740 /* length is num of bits to be written, zero means length of 32 */
1741 MLX5_SET(set_action_in, action, length, (last - first + 1));
1744 if (field_bsize == 32)
1745 MLX5_SET(set_action_in, action, data, ntohl(*(__be32 *)vals_p) >> first);
1746 else if (field_bsize == 16)
1747 MLX5_SET(set_action_in, action, data, ntohs(*(__be16 *)vals_p) >> first);
1748 else if (field_bsize == 8)
1749 MLX5_SET(set_action_in, action, data, *(u8 *)vals_p >> first);
1751 action += action_size;
1755 parse_attr->num_mod_hdr_actions = nactions;
1759 static int alloc_mod_hdr_actions(struct mlx5e_priv *priv,
1760 const struct tc_action *a, int namespace,
1761 struct mlx5e_tc_flow_parse_attr *parse_attr)
1763 int nkeys, action_size, max_actions;
1765 nkeys = tcf_pedit_nkeys(a);
1766 action_size = MLX5_UN_SZ_BYTES(set_action_in_add_action_in_auto);
1768 if (namespace == MLX5_FLOW_NAMESPACE_FDB) /* FDB offloading */
1769 max_actions = MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, max_modify_header_actions);
1770 else /* namespace is MLX5_FLOW_NAMESPACE_KERNEL - NIC offloading */
1771 max_actions = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, max_modify_header_actions);
1773 /* can get up to crazingly 16 HW actions in 32 bits pedit SW key */
1774 max_actions = min(max_actions, nkeys * 16);
1776 parse_attr->mod_hdr_actions = kcalloc(max_actions, action_size, GFP_KERNEL);
1777 if (!parse_attr->mod_hdr_actions)
1780 parse_attr->num_mod_hdr_actions = max_actions;
1784 static const struct pedit_headers zero_masks = {};
1786 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
1787 const struct tc_action *a, int namespace,
1788 struct mlx5e_tc_flow_parse_attr *parse_attr)
1790 struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
1791 int nkeys, i, err = -EOPNOTSUPP;
1792 u32 mask, val, offset;
1795 nkeys = tcf_pedit_nkeys(a);
1797 memset(masks, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1798 memset(vals, 0, sizeof(struct pedit_headers) * __PEDIT_CMD_MAX);
1800 for (i = 0; i < nkeys; i++) {
1801 htype = tcf_pedit_htype(a, i);
1802 cmd = tcf_pedit_cmd(a, i);
1803 err = -EOPNOTSUPP; /* can't be all optimistic */
1805 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
1806 netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
1810 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
1811 netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
1815 mask = tcf_pedit_mask(a, i);
1816 val = tcf_pedit_val(a, i);
1817 offset = tcf_pedit_offset(a, i);
1819 err = set_pedit_val(htype, ~mask, val, offset, &masks[cmd], &vals[cmd]);
1824 err = alloc_mod_hdr_actions(priv, a, namespace, parse_attr);
1828 err = offload_pedit_fields(masks, vals, parse_attr);
1830 goto out_dealloc_parsed_actions;
1832 for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
1833 cmd_masks = &masks[cmd];
1834 if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
1835 netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
1836 print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
1837 16, 1, cmd_masks, sizeof(zero_masks), true);
1839 goto out_dealloc_parsed_actions;
1845 out_dealloc_parsed_actions:
1846 kfree(parse_attr->mod_hdr_actions);
1851 static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
1853 u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
1854 TCA_CSUM_UPDATE_FLAG_UDP;
1856 /* The HW recalcs checksums only if re-writing headers */
1857 if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
1858 netdev_warn(priv->netdev,
1859 "TC csum action is only offloaded with pedit\n");
1863 if (update_flags & ~prot_flags) {
1864 netdev_warn(priv->netdev,
1865 "can't offload TC csum action for some header/s - flags %#x\n",
1873 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
1874 struct tcf_exts *exts)
1876 const struct tc_action *a;
1877 bool modify_ip_header;
1884 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers);
1885 ethertype = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ethertype);
1887 /* for non-IP we only re-write MACs, so we're okay */
1888 if (ethertype != ETH_P_IP && ethertype != ETH_P_IPV6)
1891 modify_ip_header = false;
1892 tcf_exts_to_list(exts, &actions);
1893 list_for_each_entry(a, &actions, list) {
1894 if (!is_tcf_pedit(a))
1897 nkeys = tcf_pedit_nkeys(a);
1898 for (i = 0; i < nkeys; i++) {
1899 htype = tcf_pedit_htype(a, i);
1900 if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP4 ||
1901 htype == TCA_PEDIT_KEY_EX_HDR_TYPE_IP6) {
1902 modify_ip_header = true;
1908 ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
1909 if (modify_ip_header && ip_proto != IPPROTO_TCP &&
1910 ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
1911 pr_info("can't offload re-write of ip proto %d\n", ip_proto);
1919 static bool actions_match_supported(struct mlx5e_priv *priv,
1920 struct tcf_exts *exts,
1921 struct mlx5e_tc_flow_parse_attr *parse_attr,
1922 struct mlx5e_tc_flow *flow)
1926 if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
1927 actions = flow->esw_attr->action;
1929 actions = flow->nic_attr->action;
1931 if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
1932 return modify_header_match_supported(&parse_attr->spec, exts);
1937 static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
1939 struct mlx5_core_dev *fmdev, *pmdev;
1940 u16 func_id, peer_id;
1943 pmdev = peer_priv->mdev;
1945 func_id = (u16)((fmdev->pdev->bus->number << 8) | PCI_SLOT(fmdev->pdev->devfn));
1946 peer_id = (u16)((pmdev->pdev->bus->number << 8) | PCI_SLOT(pmdev->pdev->devfn));
1948 return (func_id == peer_id);
1951 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
1952 struct mlx5e_tc_flow_parse_attr *parse_attr,
1953 struct mlx5e_tc_flow *flow)
1955 struct mlx5_nic_flow_attr *attr = flow->nic_attr;
1956 const struct tc_action *a;
1961 if (!tcf_exts_has_actions(exts))
1964 attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
1966 tcf_exts_to_list(exts, &actions);
1967 list_for_each_entry(a, &actions, list) {
1968 if (is_tcf_gact_shot(a)) {
1969 action |= MLX5_FLOW_CONTEXT_ACTION_DROP;
1970 if (MLX5_CAP_FLOWTABLE(priv->mdev,
1971 flow_table_properties_nic_receive.flow_counter))
1972 action |= MLX5_FLOW_CONTEXT_ACTION_COUNT;
1976 if (is_tcf_pedit(a)) {
1977 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
1982 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR |
1983 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
1987 if (is_tcf_csum(a)) {
1988 if (csum_offload_supported(priv, action,
1989 tcf_csum_update_flags(a)))
1995 if (is_tcf_mirred_egress_redirect(a)) {
1996 struct net_device *peer_dev = tcf_mirred_dev(a);
1998 if (priv->netdev->netdev_ops == peer_dev->netdev_ops &&
1999 same_hw_devs(priv, netdev_priv(peer_dev))) {
2000 parse_attr->mirred_ifindex = peer_dev->ifindex;
2001 flow->flags |= MLX5E_TC_FLOW_HAIRPIN;
2002 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2003 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2005 netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
2012 if (is_tcf_skbedit_mark(a)) {
2013 u32 mark = tcf_skbedit_mark(a);
2015 if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
2016 netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
2021 attr->flow_tag = mark;
2022 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
2029 attr->action = action;
2030 if (!actions_match_supported(priv, exts, parse_attr, flow))
2036 static inline int cmp_encap_info(struct ip_tunnel_key *a,
2037 struct ip_tunnel_key *b)
2039 return memcmp(a, b, sizeof(*a));
2042 static inline int hash_encap_info(struct ip_tunnel_key *key)
2044 return jhash(key, sizeof(*key), 0);
2047 static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
2048 struct net_device *mirred_dev,
2049 struct net_device **out_dev,
2051 struct neighbour **out_n,
2054 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2055 struct mlx5e_rep_priv *uplink_rpriv;
2057 struct neighbour *n = NULL;
2059 #if IS_ENABLED(CONFIG_INET)
2062 rt = ip_route_output_key(dev_net(mirred_dev), fl4);
2063 ret = PTR_ERR_OR_ZERO(rt);
2069 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2070 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2071 if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev))
2072 *out_dev = uplink_rpriv->netdev;
2074 *out_dev = rt->dst.dev;
2076 *out_ttl = ip4_dst_hoplimit(&rt->dst);
2077 n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
2086 static bool is_merged_eswitch_dev(struct mlx5e_priv *priv,
2087 struct net_device *peer_netdev)
2089 struct mlx5e_priv *peer_priv;
2091 peer_priv = netdev_priv(peer_netdev);
2093 return (MLX5_CAP_ESW(priv->mdev, merged_eswitch) &&
2094 (priv->netdev->netdev_ops == peer_netdev->netdev_ops) &&
2095 same_hw_devs(priv, peer_priv) &&
2096 MLX5_VPORT_MANAGER(peer_priv->mdev) &&
2097 (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS));
2100 static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
2101 struct net_device *mirred_dev,
2102 struct net_device **out_dev,
2104 struct neighbour **out_n,
2107 struct neighbour *n = NULL;
2108 struct dst_entry *dst;
2110 #if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
2111 struct mlx5e_rep_priv *uplink_rpriv;
2112 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2115 ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst,
2120 *out_ttl = ip6_dst_hoplimit(dst);
2122 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2123 /* if the egress device isn't on the same HW e-switch, we use the uplink */
2124 if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
2125 *out_dev = uplink_rpriv->netdev;
2127 *out_dev = dst->dev;
2132 n = dst_neigh_lookup(dst, &fl6->daddr);
2141 static void gen_vxlan_header_ipv4(struct net_device *out_dev,
2142 char buf[], int encap_size,
2143 unsigned char h_dest[ETH_ALEN],
2147 __be16 udp_dst_port,
2150 struct ethhdr *eth = (struct ethhdr *)buf;
2151 struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr));
2152 struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr));
2153 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2155 memset(buf, 0, encap_size);
2157 ether_addr_copy(eth->h_dest, h_dest);
2158 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2159 eth->h_proto = htons(ETH_P_IP);
2165 ip->protocol = IPPROTO_UDP;
2169 udp->dest = udp_dst_port;
2170 vxh->vx_flags = VXLAN_HF_VNI;
2171 vxh->vx_vni = vxlan_vni_field(vx_vni);
2174 static void gen_vxlan_header_ipv6(struct net_device *out_dev,
2175 char buf[], int encap_size,
2176 unsigned char h_dest[ETH_ALEN],
2178 struct in6_addr *daddr,
2179 struct in6_addr *saddr,
2180 __be16 udp_dst_port,
2183 struct ethhdr *eth = (struct ethhdr *)buf;
2184 struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
2185 struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
2186 struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
2188 memset(buf, 0, encap_size);
2190 ether_addr_copy(eth->h_dest, h_dest);
2191 ether_addr_copy(eth->h_source, out_dev->dev_addr);
2192 eth->h_proto = htons(ETH_P_IPV6);
2194 ip6_flow_hdr(ip6h, 0, 0);
2195 /* the HW fills up ipv6 payload len */
2196 ip6h->nexthdr = IPPROTO_UDP;
2197 ip6h->hop_limit = ttl;
2198 ip6h->daddr = *daddr;
2199 ip6h->saddr = *saddr;
2201 udp->dest = udp_dst_port;
2202 vxh->vx_flags = VXLAN_HF_VNI;
2203 vxh->vx_vni = vxlan_vni_field(vx_vni);
2206 static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
2207 struct net_device *mirred_dev,
2208 struct mlx5e_encap_entry *e)
2210 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2211 int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN;
2212 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2213 struct net_device *out_dev;
2214 struct neighbour *n = NULL;
2215 struct flowi4 fl4 = {};
2220 if (max_encap_size < ipv4_encap_size) {
2221 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2222 ipv4_encap_size, max_encap_size);
2226 encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
2230 switch (e->tunnel_type) {
2231 case MLX5_HEADER_TYPE_VXLAN:
2232 fl4.flowi4_proto = IPPROTO_UDP;
2233 fl4.fl4_dport = tun_key->tp_dst;
2239 fl4.flowi4_tos = tun_key->tos;
2240 fl4.daddr = tun_key->u.ipv4.dst;
2241 fl4.saddr = tun_key->u.ipv4.src;
2243 err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev,
2248 /* used by mlx5e_detach_encap to lookup a neigh hash table
2249 * entry in the neigh hash table when a user deletes a rule
2251 e->m_neigh.dev = n->dev;
2252 e->m_neigh.family = n->ops->family;
2253 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2254 e->out_dev = out_dev;
2256 /* It's importent to add the neigh to the hash table before checking
2257 * the neigh validity state. So if we'll get a notification, in case the
2258 * neigh changes it's validity state, we would find the relevant neigh
2261 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2265 read_lock_bh(&n->lock);
2266 nud_state = n->nud_state;
2267 ether_addr_copy(e->h_dest, n->ha);
2268 read_unlock_bh(&n->lock);
2270 switch (e->tunnel_type) {
2271 case MLX5_HEADER_TYPE_VXLAN:
2272 gen_vxlan_header_ipv4(out_dev, encap_header,
2273 ipv4_encap_size, e->h_dest, ttl,
2275 fl4.saddr, tun_key->tp_dst,
2276 tunnel_id_to_key32(tun_key->tun_id));
2280 goto destroy_neigh_entry;
2282 e->encap_size = ipv4_encap_size;
2283 e->encap_header = encap_header;
2285 if (!(nud_state & NUD_VALID)) {
2286 neigh_event_send(n, NULL);
2291 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
2292 ipv4_encap_size, encap_header, &e->encap_id);
2294 goto destroy_neigh_entry;
2296 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2297 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2301 destroy_neigh_entry:
2302 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2304 kfree(encap_header);
2311 static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
2312 struct net_device *mirred_dev,
2313 struct mlx5e_encap_entry *e)
2315 int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
2316 int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN;
2317 struct ip_tunnel_key *tun_key = &e->tun_info.key;
2318 struct net_device *out_dev;
2319 struct neighbour *n = NULL;
2320 struct flowi6 fl6 = {};
2325 if (max_encap_size < ipv6_encap_size) {
2326 mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
2327 ipv6_encap_size, max_encap_size);
2331 encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
2335 switch (e->tunnel_type) {
2336 case MLX5_HEADER_TYPE_VXLAN:
2337 fl6.flowi6_proto = IPPROTO_UDP;
2338 fl6.fl6_dport = tun_key->tp_dst;
2345 fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
2346 fl6.daddr = tun_key->u.ipv6.dst;
2347 fl6.saddr = tun_key->u.ipv6.src;
2349 err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev,
2354 /* used by mlx5e_detach_encap to lookup a neigh hash table
2355 * entry in the neigh hash table when a user deletes a rule
2357 e->m_neigh.dev = n->dev;
2358 e->m_neigh.family = n->ops->family;
2359 memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len);
2360 e->out_dev = out_dev;
2362 /* It's importent to add the neigh to the hash table before checking
2363 * the neigh validity state. So if we'll get a notification, in case the
2364 * neigh changes it's validity state, we would find the relevant neigh
2367 err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e);
2371 read_lock_bh(&n->lock);
2372 nud_state = n->nud_state;
2373 ether_addr_copy(e->h_dest, n->ha);
2374 read_unlock_bh(&n->lock);
2376 switch (e->tunnel_type) {
2377 case MLX5_HEADER_TYPE_VXLAN:
2378 gen_vxlan_header_ipv6(out_dev, encap_header,
2379 ipv6_encap_size, e->h_dest, ttl,
2381 &fl6.saddr, tun_key->tp_dst,
2382 tunnel_id_to_key32(tun_key->tun_id));
2386 goto destroy_neigh_entry;
2389 e->encap_size = ipv6_encap_size;
2390 e->encap_header = encap_header;
2392 if (!(nud_state & NUD_VALID)) {
2393 neigh_event_send(n, NULL);
2398 err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
2399 ipv6_encap_size, encap_header, &e->encap_id);
2401 goto destroy_neigh_entry;
2403 e->flags |= MLX5_ENCAP_ENTRY_VALID;
2404 mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev));
2408 destroy_neigh_entry:
2409 mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
2411 kfree(encap_header);
2418 static int mlx5e_attach_encap(struct mlx5e_priv *priv,
2419 struct ip_tunnel_info *tun_info,
2420 struct net_device *mirred_dev,
2421 struct net_device **encap_dev,
2422 struct mlx5e_tc_flow *flow)
2424 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2425 struct mlx5e_rep_priv *uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw,
2427 struct net_device *up_dev = uplink_rpriv->netdev;
2428 unsigned short family = ip_tunnel_info_af(tun_info);
2429 struct mlx5e_priv *up_priv = netdev_priv(up_dev);
2430 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2431 struct ip_tunnel_key *key = &tun_info->key;
2432 struct mlx5e_encap_entry *e;
2433 int tunnel_type, err = 0;
2437 /* udp dst port must be set */
2438 if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
2439 goto vxlan_encap_offload_err;
2441 /* setting udp src port isn't supported */
2442 if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
2443 vxlan_encap_offload_err:
2444 netdev_warn(priv->netdev,
2445 "must set udp dst port and not set udp src port\n");
2449 if (mlx5e_vxlan_lookup_port(up_priv, be16_to_cpu(key->tp_dst)) &&
2450 MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
2451 tunnel_type = MLX5_HEADER_TYPE_VXLAN;
2453 netdev_warn(priv->netdev,
2454 "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
2458 hash_key = hash_encap_info(key);
2460 hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
2461 encap_hlist, hash_key) {
2462 if (!cmp_encap_info(&e->tun_info.key, key)) {
2468 /* must verify if encap is valid or not */
2472 e = kzalloc(sizeof(*e), GFP_KERNEL);
2476 e->tun_info = *tun_info;
2477 e->tunnel_type = tunnel_type;
2478 INIT_LIST_HEAD(&e->flows);
2480 if (family == AF_INET)
2481 err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e);
2482 else if (family == AF_INET6)
2483 err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e);
2485 if (err && err != -EAGAIN)
2488 hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key);
2491 list_add(&flow->encap, &e->flows);
2492 *encap_dev = e->out_dev;
2493 if (e->flags & MLX5_ENCAP_ENTRY_VALID)
2494 attr->encap_id = e->encap_id;
2505 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
2506 struct mlx5e_tc_flow_parse_attr *parse_attr,
2507 struct mlx5e_tc_flow *flow)
2509 struct mlx5_esw_flow_attr *attr = flow->esw_attr;
2510 struct mlx5e_rep_priv *rpriv = priv->ppriv;
2511 struct ip_tunnel_info *info = NULL;
2512 const struct tc_action *a;
2517 if (!tcf_exts_has_actions(exts))
2520 attr->in_rep = rpriv->rep;
2521 attr->in_mdev = priv->mdev;
2523 tcf_exts_to_list(exts, &actions);
2524 list_for_each_entry(a, &actions, list) {
2525 if (is_tcf_gact_shot(a)) {
2526 action |= MLX5_FLOW_CONTEXT_ACTION_DROP |
2527 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2531 if (is_tcf_pedit(a)) {
2534 err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
2539 action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
2540 attr->mirror_count = attr->out_count;
2544 if (is_tcf_csum(a)) {
2545 if (csum_offload_supported(priv, action,
2546 tcf_csum_update_flags(a)))
2552 if (is_tcf_mirred_egress_redirect(a) || is_tcf_mirred_egress_mirror(a)) {
2553 struct mlx5e_priv *out_priv;
2554 struct net_device *out_dev;
2556 out_dev = tcf_mirred_dev(a);
2558 if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
2559 pr_err("can't support more than %d output ports, can't offload forwarding\n",
2564 if (switchdev_port_same_parent_id(priv->netdev,
2566 is_merged_eswitch_dev(priv, out_dev)) {
2567 action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2568 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2569 out_priv = netdev_priv(out_dev);
2570 rpriv = out_priv->ppriv;
2571 attr->out_rep[attr->out_count] = rpriv->rep;
2572 attr->out_mdev[attr->out_count++] = out_priv->mdev;
2574 parse_attr->mirred_ifindex = out_dev->ifindex;
2575 parse_attr->tun_info = *info;
2576 attr->parse_attr = parse_attr;
2577 action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
2578 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
2579 MLX5_FLOW_CONTEXT_ACTION_COUNT;
2580 /* attr->out_rep is resolved when we handle encap */
2582 pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
2583 priv->netdev->name, out_dev->name);
2589 if (is_tcf_tunnel_set(a)) {
2590 info = tcf_tunnel_info(a);
2595 attr->mirror_count = attr->out_count;
2599 if (is_tcf_vlan(a)) {
2600 if (tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
2601 action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_POP;
2602 } else if (tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
2603 action |= MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH;
2604 attr->vlan_vid = tcf_vlan_push_vid(a);
2605 if (mlx5_eswitch_vlan_actions_supported(priv->mdev)) {
2606 attr->vlan_prio = tcf_vlan_push_prio(a);
2607 attr->vlan_proto = tcf_vlan_push_proto(a);
2608 if (!attr->vlan_proto)
2609 attr->vlan_proto = htons(ETH_P_8021Q);
2610 } else if (tcf_vlan_push_proto(a) != htons(ETH_P_8021Q) ||
2611 tcf_vlan_push_prio(a)) {
2614 } else { /* action is TCA_VLAN_ACT_MODIFY */
2617 attr->mirror_count = attr->out_count;
2621 if (is_tcf_tunnel_release(a)) {
2622 action |= MLX5_FLOW_CONTEXT_ACTION_DECAP;
2629 attr->action = action;
2630 if (!actions_match_supported(priv, exts, parse_attr, flow))
2633 if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
2634 netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
2641 static void get_flags(int flags, u8 *flow_flags)
2643 u8 __flow_flags = 0;
2645 if (flags & MLX5E_TC_INGRESS)
2646 __flow_flags |= MLX5E_TC_FLOW_INGRESS;
2647 if (flags & MLX5E_TC_EGRESS)
2648 __flow_flags |= MLX5E_TC_FLOW_EGRESS;
2650 *flow_flags = __flow_flags;
2653 static const struct rhashtable_params tc_ht_params = {
2654 .head_offset = offsetof(struct mlx5e_tc_flow, node),
2655 .key_offset = offsetof(struct mlx5e_tc_flow, cookie),
2656 .key_len = sizeof(((struct mlx5e_tc_flow *)0)->cookie),
2657 .automatic_shrinking = true,
2660 static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
2662 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2663 struct mlx5e_rep_priv *uplink_rpriv;
2665 if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) {
2666 uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
2667 return &uplink_rpriv->tc_ht;
2669 return &priv->fs.tc.ht;
2672 int mlx5e_configure_flower(struct mlx5e_priv *priv,
2673 struct tc_cls_flower_offload *f, int flags)
2675 struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
2676 struct mlx5e_tc_flow_parse_attr *parse_attr;
2677 struct rhashtable *tc_ht = get_tc_ht(priv);
2678 struct mlx5e_tc_flow *flow;
2679 int attr_size, err = 0;
2682 get_flags(flags, &flow_flags);
2684 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2686 netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
2690 if (esw && esw->mode == SRIOV_OFFLOADS) {
2691 flow_flags |= MLX5E_TC_FLOW_ESWITCH;
2692 attr_size = sizeof(struct mlx5_esw_flow_attr);
2694 flow_flags |= MLX5E_TC_FLOW_NIC;
2695 attr_size = sizeof(struct mlx5_nic_flow_attr);
2698 flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
2699 parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
2700 if (!parse_attr || !flow) {
2705 flow->cookie = f->cookie;
2706 flow->flags = flow_flags;
2709 err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
2713 if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
2714 err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
2717 flow->rule = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
2719 err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
2722 flow->rule = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
2725 if (IS_ERR(flow->rule)) {
2726 err = PTR_ERR(flow->rule);
2732 flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
2734 if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
2735 !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
2738 err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
2740 mlx5e_tc_del_flow(priv, flow);
2752 #define DIRECTION_MASK (MLX5E_TC_INGRESS | MLX5E_TC_EGRESS)
2753 #define FLOW_DIRECTION_MASK (MLX5E_TC_FLOW_INGRESS | MLX5E_TC_FLOW_EGRESS)
2755 static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags)
2757 if ((flow->flags & FLOW_DIRECTION_MASK) == (flags & DIRECTION_MASK))
2763 int mlx5e_delete_flower(struct mlx5e_priv *priv,
2764 struct tc_cls_flower_offload *f, int flags)
2766 struct rhashtable *tc_ht = get_tc_ht(priv);
2767 struct mlx5e_tc_flow *flow;
2769 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2770 if (!flow || !same_flow_direction(flow, flags))
2773 rhashtable_remove_fast(tc_ht, &flow->node, tc_ht_params);
2775 mlx5e_tc_del_flow(priv, flow);
2782 int mlx5e_stats_flower(struct mlx5e_priv *priv,
2783 struct tc_cls_flower_offload *f, int flags)
2785 struct rhashtable *tc_ht = get_tc_ht(priv);
2786 struct mlx5e_tc_flow *flow;
2787 struct mlx5_fc *counter;
2792 flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
2793 if (!flow || !same_flow_direction(flow, flags))
2796 if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
2799 counter = mlx5_flow_rule_counter(flow->rule);
2803 mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
2805 tcf_exts_stats_update(f->exts, bytes, packets, lastuse);
2810 int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
2812 struct mlx5e_tc_table *tc = &priv->fs.tc;
2814 hash_init(tc->mod_hdr_tbl);
2815 hash_init(tc->hairpin_tbl);
2817 return rhashtable_init(&tc->ht, &tc_ht_params);
2820 static void _mlx5e_tc_del_flow(void *ptr, void *arg)
2822 struct mlx5e_tc_flow *flow = ptr;
2823 struct mlx5e_priv *priv = flow->priv;
2825 mlx5e_tc_del_flow(priv, flow);
2829 void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
2831 struct mlx5e_tc_table *tc = &priv->fs.tc;
2833 rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
2835 if (!IS_ERR_OR_NULL(tc->t)) {
2836 mlx5_destroy_flow_table(tc->t);
2841 int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
2843 return rhashtable_init(tc_ht, &tc_ht_params);
2846 void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
2848 rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL);