net/mlx5e: For TC offloads, always add new flow instead of appending the actions
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
index 85796727093eec1ddfc3873cc345f2b8d861ea01..8b25850cbf6aad959b281b01a671977513ca9a96 100644 (file)
@@ -61,6 +61,7 @@ struct mlx5_nic_flow_attr {
        u32 hairpin_tirn;
        u8 match_level;
        struct mlx5_flow_table  *hairpin_ft;
+       struct mlx5_fc          *counter;
 };
 
 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
@@ -100,11 +101,6 @@ struct mlx5e_tc_flow_parse_attr {
        int mirred_ifindex;
 };
 
-enum {
-       MLX5_HEADER_TYPE_VXLAN = 0x0,
-       MLX5_HEADER_TYPE_NVGRE = 0x1,
-};
-
 #define MLX5E_TC_TABLE_NUM_GROUPS 4
 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
 
@@ -532,7 +528,8 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
 #define UNKNOWN_MATCH_PRIO 8
 
 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
-                                 struct mlx5_flow_spec *spec, u8 *match_prio)
+                                 struct mlx5_flow_spec *spec, u8 *match_prio,
+                                 struct netlink_ext_ack *extack)
 {
        void *headers_c, *headers_v;
        u8 prio_val, prio_mask = 0;
@@ -540,8 +537,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
 
 #ifdef CONFIG_MLX5_CORE_EN_DCB
        if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
-               netdev_warn(priv->netdev,
-                           "only PCP trust state supported for hairpin\n");
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "only PCP trust state supported for hairpin");
                return -EOPNOTSUPP;
        }
 #endif
@@ -557,8 +554,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
        if (!vlan_present || !prio_mask) {
                prio_val = UNKNOWN_MATCH_PRIO;
        } else if (prio_mask != 0x7) {
-               netdev_warn(priv->netdev,
-                           "masked priority match not supported for hairpin\n");
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "masked priority match not supported for hairpin");
                return -EOPNOTSUPP;
        }
 
@@ -568,7 +565,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
 
 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
                                  struct mlx5e_tc_flow *flow,
-                                 struct mlx5e_tc_flow_parse_attr *parse_attr)
+                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
+                                 struct netlink_ext_ack *extack)
 {
        int peer_ifindex = parse_attr->mirred_ifindex;
        struct mlx5_hairpin_params params;
@@ -583,12 +581,13 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
 
        peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
        if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
-               netdev_warn(priv->netdev, "hairpin is not supported\n");
+               NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
                return -EOPNOTSUPP;
        }
 
        peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
-       err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
+       err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
+                                    extack);
        if (err)
                return err;
        hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
@@ -674,29 +673,28 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
        }
 }
 
-static struct mlx5_flow_handle *
+static int
 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
                      struct mlx5e_tc_flow_parse_attr *parse_attr,
-                     struct mlx5e_tc_flow *flow)
+                     struct mlx5e_tc_flow *flow,
+                     struct netlink_ext_ack *extack)
 {
        struct mlx5_nic_flow_attr *attr = flow->nic_attr;
        struct mlx5_core_dev *dev = priv->mdev;
        struct mlx5_flow_destination dest[2] = {};
        struct mlx5_flow_act flow_act = {
                .action = attr->action,
-               .has_flow_tag = true,
                .flow_tag = attr->flow_tag,
-               .encap_id = 0,
+               .reformat_id = 0,
+               .flags    = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
        };
        struct mlx5_fc *counter = NULL;
-       struct mlx5_flow_handle *rule;
        bool table_created = false;
        int err, dest_ix = 0;
 
        if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
-               err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+               err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
                if (err) {
-                       rule = ERR_PTR(err);
                        goto err_add_hairpin_flow;
                }
                if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
@@ -716,22 +714,21 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
                counter = mlx5_fc_create(dev, true);
                if (IS_ERR(counter)) {
-                       rule = ERR_CAST(counter);
+                       err = PTR_ERR(counter);
                        goto err_fc_create;
                }
                dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
-               dest[dest_ix].counter = counter;
+               dest[dest_ix].counter_id = mlx5_fc_id(counter);
                dest_ix++;
+               attr->counter = counter;
        }
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
                err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
                flow_act.modify_id = attr->mod_hdr_id;
                kfree(parse_attr->mod_hdr_actions);
-               if (err) {
-                       rule = ERR_PTR(err);
+               if (err)
                        goto err_create_mod_hdr_id;
-               }
        }
 
        if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
@@ -753,9 +750,11 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
                                                            MLX5E_TC_TABLE_NUM_GROUPS,
                                                            MLX5E_TC_FT_LEVEL, 0);
                if (IS_ERR(priv->fs.tc.t)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Failed to create tc offload table\n");
                        netdev_err(priv->netdev,
                                   "Failed to create tc offload table\n");
-                       rule = ERR_CAST(priv->fs.tc.t);
+                       err = PTR_ERR(priv->fs.tc.t);
                        goto err_create_ft;
                }
 
@@ -765,13 +764,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
        if (attr->match_level != MLX5_MATCH_NONE)
                parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 
-       rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
-                                  &flow_act, dest, dest_ix);
+       flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
+                                           &flow_act, dest, dest_ix);
 
-       if (IS_ERR(rule))
+       if (IS_ERR(flow->rule[0])) {
+               err = PTR_ERR(flow->rule[0]);
                goto err_add_rule;
+       }
 
-       return rule;
+       return 0;
 
 err_add_rule:
        if (table_created) {
@@ -787,7 +788,7 @@ err_fc_create:
        if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
                mlx5e_hairpin_flow_del(priv, flow);
 err_add_hairpin_flow:
-       return rule;
+       return err;
 }
 
 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
@@ -796,7 +797,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
        struct mlx5_nic_flow_attr *attr = flow->nic_attr;
        struct mlx5_fc *counter = NULL;
 
-       counter = mlx5_flow_rule_counter(flow->rule[0]);
+       counter = attr->counter;
        mlx5_del_flow_rules(flow->rule[0]);
        mlx5_fc_destroy(priv->mdev, counter);
 
@@ -819,30 +820,35 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct ip_tunnel_info *tun_info,
                              struct net_device *mirred_dev,
                              struct net_device **encap_dev,
-                             struct mlx5e_tc_flow *flow);
+                             struct mlx5e_tc_flow *flow,
+                             struct netlink_ext_ack *extack);
 
-static struct mlx5_flow_handle *
+static int
 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                      struct mlx5e_tc_flow_parse_attr *parse_attr,
-                     struct mlx5e_tc_flow *flow)
+                     struct mlx5e_tc_flow *flow,
+                     struct netlink_ext_ack *extack)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
        struct net_device *out_dev, *encap_dev = NULL;
-       struct mlx5_flow_handle *rule = NULL;
+       struct mlx5_fc *counter = NULL;
        struct mlx5e_rep_priv *rpriv;
        struct mlx5e_priv *out_priv;
-       int err;
+       int err = 0, encap_err = 0;
+
+       /* keep the old behaviour, use same prio for all offloaded rules */
+       attr->prio = 1;
 
-       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
                out_dev = __dev_get_by_index(dev_net(priv->netdev),
                                             attr->parse_attr->mirred_ifindex);
-               err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
-                                        out_dev, &encap_dev, flow);
-               if (err) {
-                       rule = ERR_PTR(err);
-                       if (err != -EAGAIN)
-                               goto err_attach_encap;
+               encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
+                                              out_dev, &encap_dev, flow,
+                                              extack);
+               if (encap_err && encap_err != -EAGAIN) {
+                       err = encap_err;
+                       goto err_attach_encap;
                }
                out_priv = netdev_priv(encap_dev);
                rpriv = out_priv->ppriv;
@@ -851,49 +857,61 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
        }
 
        err = mlx5_eswitch_add_vlan_action(esw, attr);
-       if (err) {
-               rule = ERR_PTR(err);
+       if (err)
                goto err_add_vlan;
-       }
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
                err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
                kfree(parse_attr->mod_hdr_actions);
-               if (err) {
-                       rule = ERR_PTR(err);
+               if (err)
                        goto err_mod_hdr;
+       }
+
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+               counter = mlx5_fc_create(esw->dev, true);
+               if (IS_ERR(counter)) {
+                       err = PTR_ERR(counter);
+                       goto err_create_counter;
                }
+
+               attr->counter = counter;
        }
 
-       /* we get here if (1) there's no error (rule being null) or when
+       /* we get here if (1) there's no error or when
         * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
         */
-       if (rule != ERR_PTR(-EAGAIN)) {
-               rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
-               if (IS_ERR(rule))
+       if (encap_err != -EAGAIN) {
+               flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
+               if (IS_ERR(flow->rule[0])) {
+                       err = PTR_ERR(flow->rule[0]);
                        goto err_add_rule;
+               }
 
                if (attr->mirror_count) {
                        flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
-                       if (IS_ERR(flow->rule[1]))
+                       if (IS_ERR(flow->rule[1])) {
+                               err = PTR_ERR(flow->rule[1]);
                                goto err_fwd_rule;
+                       }
                }
        }
-       return rule;
+
+       return encap_err;
 
 err_fwd_rule:
-       mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
-       rule = flow->rule[1];
+       mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
 err_add_rule:
+       mlx5_fc_destroy(esw->dev, counter);
+err_create_counter:
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
                mlx5e_detach_mod_hdr(priv, flow);
 err_mod_hdr:
        mlx5_eswitch_del_vlan_action(esw, attr);
 err_add_vlan:
-       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
                mlx5e_detach_encap(priv, flow);
 err_attach_encap:
-       return rule;
+       return err;
 }
 
 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
@@ -905,19 +923,22 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
        if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
                flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
                if (attr->mirror_count)
-                       mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
+                       mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
                mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
        }
 
        mlx5_eswitch_del_vlan_action(esw, attr);
 
-       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
                mlx5e_detach_encap(priv, flow);
                kvfree(attr->parse_attr);
        }
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
                mlx5e_detach_mod_hdr(priv, flow);
+
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+               mlx5_fc_destroy(esw->dev, attr->counter);
 }
 
 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
@@ -928,9 +949,10 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
        struct mlx5e_tc_flow *flow;
        int err;
 
-       err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
-                              e->encap_size, e->encap_header,
-                              &e->encap_id);
+       err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+                                        e->encap_size, e->encap_header,
+                                        MLX5_FLOW_NAMESPACE_FDB,
+                                        &e->encap_id);
        if (err) {
                mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
                               err);
@@ -977,17 +999,25 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
 
                        flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
                        if (attr->mirror_count)
-                               mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
+                               mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
                        mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
                }
        }
 
        if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
                e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
-               mlx5_encap_dealloc(priv->mdev, e->encap_id);
+               mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
        }
 }
 
+static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
+{
+       if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+               return flow->esw_attr->counter;
+       else
+               return flow->nic_attr->counter;
+}
+
 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
 {
        struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
@@ -1013,7 +1043,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
                        continue;
                list_for_each_entry(flow, &e->flows, encap) {
                        if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
-                               counter = mlx5_flow_rule_counter(flow->rule[0]);
+                               counter = mlx5e_tc_get_counter(flow);
                                mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
                                if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
                                        neigh_used = true;
@@ -1053,7 +1083,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
                mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
 
                if (e->flags & MLX5_ENCAP_ENTRY_VALID)
-                       mlx5_encap_dealloc(priv->mdev, e->encap_id);
+                       mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
 
                hash_del_rcu(&e->encap_hlist);
                kfree(e->encap_header);
@@ -1105,6 +1135,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                             struct mlx5_flow_spec *spec,
                             struct tc_cls_flower_offload *f)
 {
+       struct netlink_ext_ack *extack = f->common.extack;
        void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
                                       outer_headers);
        void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1133,6 +1164,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
                        parse_vxlan_attr(spec, f);
                else {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "port isn't an offloaded vxlan udp dport");
                        netdev_warn(priv->netdev,
                                    "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
                        return -EOPNOTSUPP;
@@ -1149,6 +1182,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                         udp_sport, ntohs(key->src));
        } else { /* udp dst port must be given */
 vxlan_match_offload_err:
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "IP tunnel decap offload supported only for vxlan, must set UDP dport");
                netdev_warn(priv->netdev,
                            "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
                return -EOPNOTSUPP;
@@ -1225,6 +1260,16 @@ vxlan_match_offload_err:
 
                MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
                MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+
+               if (mask->ttl &&
+                   !MLX5_CAP_ESW_FLOWTABLE_FDB
+                       (priv->mdev,
+                        ft_field_support.outer_ipv4_ttl)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Matching on TTL is not supported");
+                       return -EOPNOTSUPP;
+               }
+
        }
 
        /* Enforce DMAC when offloading incoming tunneled flows.
@@ -1247,6 +1292,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                              struct tc_cls_flower_offload *f,
                              u8 *match_level)
 {
+       struct netlink_ext_ack *extack = f->common.extack;
        void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
                                       outer_headers);
        void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1277,6 +1323,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
              BIT(FLOW_DISSECTOR_KEY_TCP) |
              BIT(FLOW_DISSECTOR_KEY_IP)  |
              BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
+               NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
                netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
                            f->dissector->used_keys);
                return -EOPNOTSUPP;
@@ -1553,8 +1600,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
 
                if (mask->ttl &&
                    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
-                                               ft_field_support.outer_ipv4_ttl))
+                                               ft_field_support.outer_ipv4_ttl)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Matching on TTL is not supported");
                        return -EOPNOTSUPP;
+               }
 
                if (mask->tos || mask->ttl)
                        *match_level = MLX5_MATCH_L3;
@@ -1596,6 +1646,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                 udp_dport, ntohs(key->dst));
                        break;
                default:
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Only UDP and TCP transports are supported for L4 matching");
                        netdev_err(priv->netdev,
                                   "Only UDP and TCP transport are supported\n");
                        return -EINVAL;
@@ -1632,6 +1684,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
                            struct mlx5_flow_spec *spec,
                            struct tc_cls_flower_offload *f)
 {
+       struct netlink_ext_ack *extack = f->common.extack;
        struct mlx5_core_dev *dev = priv->mdev;
        struct mlx5_eswitch *esw = dev->priv.eswitch;
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1646,6 +1699,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
                if (rep->vport != FDB_UPLINK_VPORT &&
                    (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
                    esw->offloads.inline_mode < match_level)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Flow is not offloaded due to min inline setting");
                        netdev_warn(priv->netdev,
                                    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
                                    match_level, esw->offloads.inline_mode);
@@ -1747,7 +1802,8 @@ static struct mlx5_fields fields[] = {
  */
 static int offload_pedit_fields(struct pedit_headers *masks,
                                struct pedit_headers *vals,
-                               struct mlx5e_tc_flow_parse_attr *parse_attr)
+                               struct mlx5e_tc_flow_parse_attr *parse_attr,
+                               struct netlink_ext_ack *extack)
 {
        struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
        int i, action_size, nactions, max_actions, first, last, next_z;
@@ -1786,11 +1842,15 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                        continue;
 
                if (s_mask && a_mask) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "can't set and add to the same HW field");
                        printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
                        return -EOPNOTSUPP;
                }
 
                if (nactions == max_actions) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "too many pedit actions, can't offload");
                        printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
                        return -EOPNOTSUPP;
                }
@@ -1823,6 +1883,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                next_z = find_next_zero_bit(&mask, field_bsize, first);
                last  = find_last_bit(&mask, field_bsize);
                if (first < next_z && next_z < last) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "rewrite of few sub-fields isn't supported");
                        printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
                               mask);
                        return -EOPNOTSUPP;
@@ -1881,7 +1943,8 @@ static const struct pedit_headers zero_masks = {};
 
 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
                                 const struct tc_action *a, int namespace,
-                                struct mlx5e_tc_flow_parse_attr *parse_attr)
+                                struct mlx5e_tc_flow_parse_attr *parse_attr,
+                                struct netlink_ext_ack *extack)
 {
        struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
        int nkeys, i, err = -EOPNOTSUPP;
@@ -1899,12 +1962,13 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
                err = -EOPNOTSUPP; /* can't be all optimistic */
 
                if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
-                       netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "legacy pedit isn't offloaded");
                        goto out_err;
                }
 
                if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
-                       netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
+                       NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
                        goto out_err;
                }
 
@@ -1921,13 +1985,15 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
        if (err)
                goto out_err;
 
-       err = offload_pedit_fields(masks, vals, parse_attr);
+       err = offload_pedit_fields(masks, vals, parse_attr, extack);
        if (err < 0)
                goto out_dealloc_parsed_actions;
 
        for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
                cmd_masks = &masks[cmd];
                if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "attempt to offload an unsupported field");
                        netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
                        print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
                                       16, 1, cmd_masks, sizeof(zero_masks), true);
@@ -1944,19 +2010,26 @@ out_err:
        return err;
 }
 
-static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
+static bool csum_offload_supported(struct mlx5e_priv *priv,
+                                  u32 action,
+                                  u32 update_flags,
+                                  struct netlink_ext_ack *extack)
 {
        u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
                         TCA_CSUM_UPDATE_FLAG_UDP;
 
        /*  The HW recalcs checksums only if re-writing headers */
        if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "TC csum action is only offloaded with pedit");
                netdev_warn(priv->netdev,
                            "TC csum action is only offloaded with pedit\n");
                return false;
        }
 
        if (update_flags & ~prot_flags) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "can't offload TC csum action for some header/s");
                netdev_warn(priv->netdev,
                            "can't offload TC csum action for some header/s - flags %#x\n",
                            update_flags);
@@ -1967,7 +2040,8 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
 }
 
 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
-                                         struct tcf_exts *exts)
+                                         struct tcf_exts *exts,
+                                         struct netlink_ext_ack *extack)
 {
        const struct tc_action *a;
        bool modify_ip_header;
@@ -2005,6 +2079,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
        ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
        if (modify_ip_header && ip_proto != IPPROTO_TCP &&
            ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "can't offload re-write of non TCP/UDP");
                pr_info("can't offload re-write of ip proto %d\n", ip_proto);
                return false;
        }
@@ -2016,7 +2092,8 @@ out_ok:
 static bool actions_match_supported(struct mlx5e_priv *priv,
                                    struct tcf_exts *exts,
                                    struct mlx5e_tc_flow_parse_attr *parse_attr,
-                                   struct mlx5e_tc_flow *flow)
+                                   struct mlx5e_tc_flow *flow,
+                                   struct netlink_ext_ack *extack)
 {
        u32 actions;
 
@@ -2030,7 +2107,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
                return false;
 
        if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
-               return modify_header_match_supported(&parse_attr->spec, exts);
+               return modify_header_match_supported(&parse_attr->spec, exts,
+                                                    extack);
 
        return true;
 }
@@ -2043,15 +2121,16 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
        fmdev = priv->mdev;
        pmdev = peer_priv->mdev;
 
-       mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
-       mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
+       fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
+       psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
 
        return (fsystem_guid == psystem_guid);
 }
 
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                struct mlx5e_tc_flow_parse_attr *parse_attr,
-                               struct mlx5e_tc_flow *flow)
+                               struct mlx5e_tc_flow *flow,
+                               struct netlink_ext_ack *extack)
 {
        struct mlx5_nic_flow_attr *attr = flow->nic_attr;
        const struct tc_action *a;
@@ -2075,7 +2154,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_pedit(a)) {
                        err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
-                                                   parse_attr);
+                                                   parse_attr, extack);
                        if (err)
                                return err;
 
@@ -2086,7 +2165,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_csum(a)) {
                        if (csum_offload_supported(priv, action,
-                                                  tcf_csum_update_flags(a)))
+                                                  tcf_csum_update_flags(a),
+                                                  extack))
                                continue;
 
                        return -EOPNOTSUPP;
@@ -2102,6 +2182,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
                                          MLX5_FLOW_CONTEXT_ACTION_COUNT;
                        } else {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "device is not on same HW, can't offload");
                                netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
                                            peer_dev->name);
                                return -EINVAL;
@@ -2113,8 +2195,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        u32 mark = tcf_skbedit_mark(a);
 
                        if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
-                               netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
-                                           mark);
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Bad flow mark - only 16 bit is supported");
                                return -EINVAL;
                        }
 
@@ -2127,7 +2209,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        }
 
        attr->action = action;
-       if (!actions_match_supported(priv, exts, parse_attr, flow))
+       if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
                return -EOPNOTSUPP;
 
        return 0;
@@ -2331,7 +2413,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
                return -ENOMEM;
 
        switch (e->tunnel_type) {
-       case MLX5_HEADER_TYPE_VXLAN:
+       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
                fl4.flowi4_proto = IPPROTO_UDP;
                fl4.fl4_dport = tun_key->tp_dst;
                break;
@@ -2375,7 +2457,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
        read_unlock_bh(&n->lock);
 
        switch (e->tunnel_type) {
-       case MLX5_HEADER_TYPE_VXLAN:
+       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
                gen_vxlan_header_ipv4(out_dev, encap_header,
                                      ipv4_encap_size, e->h_dest, tos, ttl,
                                      fl4.daddr,
@@ -2395,8 +2477,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
                goto out;
        }
 
-       err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
-                              ipv4_encap_size, encap_header, &e->encap_id);
+       err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+                                        ipv4_encap_size, encap_header,
+                                        MLX5_FLOW_NAMESPACE_FDB,
+                                        &e->encap_id);
        if (err)
                goto destroy_neigh_entry;
 
@@ -2440,7 +2524,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
                return -ENOMEM;
 
        switch (e->tunnel_type) {
-       case MLX5_HEADER_TYPE_VXLAN:
+       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
                fl6.flowi6_proto = IPPROTO_UDP;
                fl6.fl6_dport = tun_key->tp_dst;
                break;
@@ -2484,7 +2568,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
        read_unlock_bh(&n->lock);
 
        switch (e->tunnel_type) {
-       case MLX5_HEADER_TYPE_VXLAN:
+       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
                gen_vxlan_header_ipv6(out_dev, encap_header,
                                      ipv6_encap_size, e->h_dest, tos, ttl,
                                      &fl6.daddr,
@@ -2505,8 +2589,10 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
                goto out;
        }
 
-       err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
-                              ipv6_encap_size, encap_header, &e->encap_id);
+       err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+                                        ipv6_encap_size, encap_header,
+                                        MLX5_FLOW_NAMESPACE_FDB,
+                                        &e->encap_id);
        if (err)
                goto destroy_neigh_entry;
 
@@ -2529,7 +2615,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct ip_tunnel_info *tun_info,
                              struct net_device *mirred_dev,
                              struct net_device **encap_dev,
-                             struct mlx5e_tc_flow *flow)
+                             struct mlx5e_tc_flow *flow,
+                             struct netlink_ext_ack *extack)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        unsigned short family = ip_tunnel_info_af(tun_info);
@@ -2547,6 +2634,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        /* setting udp src port isn't supported */
        if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
 vxlan_encap_offload_err:
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "must set udp dst port and not set udp src port");
                netdev_warn(priv->netdev,
                            "must set udp dst port and not set udp src port\n");
                return -EOPNOTSUPP;
@@ -2554,8 +2643,10 @@ vxlan_encap_offload_err:
 
        if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
            MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
-               tunnel_type = MLX5_HEADER_TYPE_VXLAN;
+               tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
        } else {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "port isn't an offloaded vxlan udp dport");
                netdev_warn(priv->netdev,
                            "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
                return -EOPNOTSUPP;
@@ -2660,7 +2751,8 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
 
 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                struct mlx5e_tc_flow_parse_attr *parse_attr,
-                               struct mlx5e_tc_flow *flow)
+                               struct mlx5e_tc_flow *flow,
+                               struct netlink_ext_ack *extack)
 {
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -2686,7 +2778,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_pedit(a)) {
                        err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
-                                                   parse_attr);
+                                                   parse_attr, extack);
                        if (err)
                                return err;
 
@@ -2697,7 +2789,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_csum(a)) {
                        if (csum_offload_supported(priv, action,
-                                                  tcf_csum_update_flags(a)))
+                                                  tcf_csum_update_flags(a),
+                                                  extack))
                                continue;
 
                        return -EOPNOTSUPP;
@@ -2710,6 +2803,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        out_dev = tcf_mirred_dev(a);
 
                        if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "can't support more output ports, can't offload forwarding");
                                pr_err("can't support more than %d output ports, can't offload forwarding\n",
                                       attr->out_count);
                                return -EOPNOTSUPP;
@@ -2728,11 +2823,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                parse_attr->mirred_ifindex = out_dev->ifindex;
                                parse_attr->tun_info = *info;
                                attr->parse_attr = parse_attr;
-                               action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+                               action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
                                          MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
                                          MLX5_FLOW_CONTEXT_ACTION_COUNT;
                                /* attr->out_rep is resolved when we handle encap */
                        } else {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "devices are not on same switch HW, can't offload forwarding");
                                pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
                                       priv->netdev->name, out_dev->name);
                                return -EINVAL;
@@ -2769,10 +2866,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        }
 
        attr->action = action;
-       if (!actions_match_supported(priv, exts, parse_attr, flow))
+       if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
                return -EOPNOTSUPP;
 
        if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "current firmware doesn't support split rule for port mirroring");
                netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
                return -EOPNOTSUPP;
        }
@@ -2811,31 +2910,15 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
                return &priv->fs.tc.ht;
 }
 
-int mlx5e_configure_flower(struct mlx5e_priv *priv,
-                          struct tc_cls_flower_offload *f, int flags)
+static int
+mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
+                struct tc_cls_flower_offload *f, u8 flow_flags,
+                struct mlx5e_tc_flow_parse_attr **__parse_attr,
+                struct mlx5e_tc_flow **__flow)
 {
-       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5e_tc_flow_parse_attr *parse_attr;
-       struct rhashtable *tc_ht = get_tc_ht(priv);
        struct mlx5e_tc_flow *flow;
-       int attr_size, err = 0;
-       u8 flow_flags = 0;
-
-       get_flags(flags, &flow_flags);
-
-       flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
-       if (flow) {
-               netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
-               return 0;
-       }
-
-       if (esw && esw->mode == SRIOV_OFFLOADS) {
-               flow_flags |= MLX5E_TC_FLOW_ESWITCH;
-               attr_size  = sizeof(struct mlx5_esw_flow_attr);
-       } else {
-               flow_flags |= MLX5E_TC_FLOW_NIC;
-               attr_size  = sizeof(struct mlx5_nic_flow_attr);
-       }
+       int err;
 
        flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
        parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
@@ -2849,45 +2932,155 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
        flow->priv = priv;
 
        err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
-       if (err < 0)
+       if (err)
                goto err_free;
 
-       if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
-               err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
-               if (err < 0)
-                       goto err_free;
-               flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
-       } else {
-               err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
-               if (err < 0)
-                       goto err_free;
-               flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
-       }
+       *__flow = flow;
+       *__parse_attr = parse_attr;
 
-       if (IS_ERR(flow->rule[0])) {
-               err = PTR_ERR(flow->rule[0]);
-               if (err != -EAGAIN)
-                       goto err_free;
-       }
+       return 0;
 
-       if (err != -EAGAIN)
+err_free:
+       kfree(flow);
+       kvfree(parse_attr);
+       return err;
+}
+
+static int
+mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+                  struct tc_cls_flower_offload *f,
+                  u8 flow_flags,
+                  struct mlx5e_tc_flow **__flow)
+{
+       struct netlink_ext_ack *extack = f->common.extack;
+       struct mlx5e_tc_flow_parse_attr *parse_attr;
+       struct mlx5e_tc_flow *flow;
+       int attr_size, err;
+
+       flow_flags |= MLX5E_TC_FLOW_ESWITCH;
+       attr_size  = sizeof(struct mlx5_esw_flow_attr);
+       err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
+                              &parse_attr, &flow);
+       if (err)
+               goto out;
+
+       err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
+       if (err)
+               goto err_free;
+
+       err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
+       if (err && err != -EAGAIN)
+               goto err_free;
+
+       if (!err)
                flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
 
-       if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
-           !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
+       if (!(flow->esw_attr->action &
+             MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
                kvfree(parse_attr);
 
-       err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
-       if (err) {
-               mlx5e_tc_del_flow(priv, flow);
-               kfree(flow);
-       }
+       *__flow = flow;
 
+       return 0;
+
+err_free:
+       kfree(flow);
+       kvfree(parse_attr);
+out:
        return err;
+}
+
+static int
+mlx5e_add_nic_flow(struct mlx5e_priv *priv,
+                  struct tc_cls_flower_offload *f,
+                  u8 flow_flags,
+                  struct mlx5e_tc_flow **__flow)
+{
+       struct netlink_ext_ack *extack = f->common.extack;
+       struct mlx5e_tc_flow_parse_attr *parse_attr;
+       struct mlx5e_tc_flow *flow;
+       int attr_size, err;
+
+       flow_flags |= MLX5E_TC_FLOW_NIC;
+       attr_size  = sizeof(struct mlx5_nic_flow_attr);
+       err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
+                              &parse_attr, &flow);
+       if (err)
+               goto out;
+
+       err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
+       if (err)
+               goto err_free;
+
+       err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
+       if (err)
+               goto err_free;
+
+       flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+       kvfree(parse_attr);
+       *__flow = flow;
+
+       return 0;
 
 err_free:
+       kfree(flow);
        kvfree(parse_attr);
+out:
+       return err;
+}
+
+static int
+mlx5e_tc_add_flow(struct mlx5e_priv *priv,
+                 struct tc_cls_flower_offload *f,
+                 int flags,
+                 struct mlx5e_tc_flow **flow)
+{
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       u8 flow_flags;
+       int err;
+
+       get_flags(flags, &flow_flags);
+
+       if (esw && esw->mode == SRIOV_OFFLOADS)
+               err = mlx5e_add_fdb_flow(priv, f, flow_flags, flow);
+       else
+               err = mlx5e_add_nic_flow(priv, f, flow_flags, flow);
+
+       return err;
+}
+
+int mlx5e_configure_flower(struct mlx5e_priv *priv,
+                          struct tc_cls_flower_offload *f, int flags)
+{
+       struct netlink_ext_ack *extack = f->common.extack;
+       struct rhashtable *tc_ht = get_tc_ht(priv);
+       struct mlx5e_tc_flow *flow;
+       int err = 0;
+
+       flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+       if (flow) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "flow cookie already exists, ignoring");
+               netdev_warn_once(priv->netdev,
+                                "flow cookie %lx already exists, ignoring\n",
+                                f->cookie);
+               goto out;
+       }
+
+       err = mlx5e_tc_add_flow(priv, f, flags, &flow);
+       if (err)
+               goto out;
+
+       err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
+       if (err)
+               goto err_free;
+
+       return 0;
+
+err_free:
+       mlx5e_tc_del_flow(priv, flow);
        kfree(flow);
+out:
        return err;
 }
 
@@ -2938,7 +3131,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
        if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
                return 0;
 
-       counter = mlx5_flow_rule_counter(flow->rule[0]);
+       counter = mlx5e_tc_get_counter(flow);
        if (!counter)
                return 0;