net/mlx5e: Don't match on vlan non-existence if ethertype is wildcarded
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
index 85796727093eec1ddfc3873cc345f2b8d861ea01..84eb6939e69a96dcd6a33ead451113d7cf4044f9 100644 (file)
@@ -61,6 +61,7 @@ struct mlx5_nic_flow_attr {
        u32 hairpin_tirn;
        u8 match_level;
        struct mlx5_flow_table  *hairpin_ft;
+       struct mlx5_fc          *counter;
 };
 
 #define MLX5E_TC_FLOW_BASE (MLX5E_TC_LAST_EXPORTED_BIT + 1)
@@ -73,6 +74,7 @@ enum {
        MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2),
        MLX5E_TC_FLOW_HAIRPIN   = BIT(MLX5E_TC_FLOW_BASE + 3),
        MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4),
+       MLX5E_TC_FLOW_SLOW        = BIT(MLX5E_TC_FLOW_BASE + 5),
 };
 
 #define MLX5E_TC_MAX_SPLITS 1
@@ -81,7 +83,7 @@ struct mlx5e_tc_flow {
        struct rhash_head       node;
        struct mlx5e_priv       *priv;
        u64                     cookie;
-       u                     flags;
+       u16                     flags;
        struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1];
        struct list_head        encap;   /* flows sharing the same encap ID */
        struct list_head        mod_hdr; /* flows sharing the same mod hdr ID */
@@ -100,11 +102,6 @@ struct mlx5e_tc_flow_parse_attr {
        int mirred_ifindex;
 };
 
-enum {
-       MLX5_HEADER_TYPE_VXLAN = 0x0,
-       MLX5_HEADER_TYPE_NVGRE = 0x1,
-};
-
 #define MLX5E_TC_TABLE_NUM_GROUPS 4
 #define MLX5E_TC_TABLE_MAX_GROUP_SIZE BIT(16)
 
@@ -532,7 +529,8 @@ static struct mlx5e_hairpin_entry *mlx5e_hairpin_get(struct mlx5e_priv *priv,
 #define UNKNOWN_MATCH_PRIO 8
 
 static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
-                                 struct mlx5_flow_spec *spec, u8 *match_prio)
+                                 struct mlx5_flow_spec *spec, u8 *match_prio,
+                                 struct netlink_ext_ack *extack)
 {
        void *headers_c, *headers_v;
        u8 prio_val, prio_mask = 0;
@@ -540,8 +538,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
 
 #ifdef CONFIG_MLX5_CORE_EN_DCB
        if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_PCP) {
-               netdev_warn(priv->netdev,
-                           "only PCP trust state supported for hairpin\n");
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "only PCP trust state supported for hairpin");
                return -EOPNOTSUPP;
        }
 #endif
@@ -557,8 +555,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
        if (!vlan_present || !prio_mask) {
                prio_val = UNKNOWN_MATCH_PRIO;
        } else if (prio_mask != 0x7) {
-               netdev_warn(priv->netdev,
-                           "masked priority match not supported for hairpin\n");
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "masked priority match not supported for hairpin");
                return -EOPNOTSUPP;
        }
 
@@ -568,7 +566,8 @@ static int mlx5e_hairpin_get_prio(struct mlx5e_priv *priv,
 
 static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
                                  struct mlx5e_tc_flow *flow,
-                                 struct mlx5e_tc_flow_parse_attr *parse_attr)
+                                 struct mlx5e_tc_flow_parse_attr *parse_attr,
+                                 struct netlink_ext_ack *extack)
 {
        int peer_ifindex = parse_attr->mirred_ifindex;
        struct mlx5_hairpin_params params;
@@ -583,12 +582,13 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv,
 
        peer_mdev = mlx5e_hairpin_get_mdev(dev_net(priv->netdev), peer_ifindex);
        if (!MLX5_CAP_GEN(priv->mdev, hairpin) || !MLX5_CAP_GEN(peer_mdev, hairpin)) {
-               netdev_warn(priv->netdev, "hairpin is not supported\n");
+               NL_SET_ERR_MSG_MOD(extack, "hairpin is not supported");
                return -EOPNOTSUPP;
        }
 
        peer_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
-       err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio);
+       err = mlx5e_hairpin_get_prio(priv, &parse_attr->spec, &match_prio,
+                                    extack);
        if (err)
                return err;
        hpe = mlx5e_hairpin_get(priv, peer_id, match_prio);
@@ -674,29 +674,28 @@ static void mlx5e_hairpin_flow_del(struct mlx5e_priv *priv,
        }
 }
 
-static struct mlx5_flow_handle *
+static int
 mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
                      struct mlx5e_tc_flow_parse_attr *parse_attr,
-                     struct mlx5e_tc_flow *flow)
+                     struct mlx5e_tc_flow *flow,
+                     struct netlink_ext_ack *extack)
 {
        struct mlx5_nic_flow_attr *attr = flow->nic_attr;
        struct mlx5_core_dev *dev = priv->mdev;
        struct mlx5_flow_destination dest[2] = {};
        struct mlx5_flow_act flow_act = {
                .action = attr->action,
-               .has_flow_tag = true,
                .flow_tag = attr->flow_tag,
-               .encap_id = 0,
+               .reformat_id = 0,
+               .flags    = FLOW_ACT_HAS_TAG | FLOW_ACT_NO_APPEND,
        };
        struct mlx5_fc *counter = NULL;
-       struct mlx5_flow_handle *rule;
        bool table_created = false;
        int err, dest_ix = 0;
 
        if (flow->flags & MLX5E_TC_FLOW_HAIRPIN) {
-               err = mlx5e_hairpin_flow_add(priv, flow, parse_attr);
+               err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack);
                if (err) {
-                       rule = ERR_PTR(err);
                        goto err_add_hairpin_flow;
                }
                if (flow->flags & MLX5E_TC_FLOW_HAIRPIN_RSS) {
@@ -716,22 +715,21 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
                counter = mlx5_fc_create(dev, true);
                if (IS_ERR(counter)) {
-                       rule = ERR_CAST(counter);
+                       err = PTR_ERR(counter);
                        goto err_fc_create;
                }
                dest[dest_ix].type = MLX5_FLOW_DESTINATION_TYPE_COUNTER;
-               dest[dest_ix].counter = counter;
+               dest[dest_ix].counter_id = mlx5_fc_id(counter);
                dest_ix++;
+               attr->counter = counter;
        }
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
                err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
                flow_act.modify_id = attr->mod_hdr_id;
                kfree(parse_attr->mod_hdr_actions);
-               if (err) {
-                       rule = ERR_PTR(err);
+               if (err)
                        goto err_create_mod_hdr_id;
-               }
        }
 
        if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
@@ -753,9 +751,11 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
                                                            MLX5E_TC_TABLE_NUM_GROUPS,
                                                            MLX5E_TC_FT_LEVEL, 0);
                if (IS_ERR(priv->fs.tc.t)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Failed to create tc offload table\n");
                        netdev_err(priv->netdev,
                                   "Failed to create tc offload table\n");
-                       rule = ERR_CAST(priv->fs.tc.t);
+                       err = PTR_ERR(priv->fs.tc.t);
                        goto err_create_ft;
                }
 
@@ -765,13 +765,15 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
        if (attr->match_level != MLX5_MATCH_NONE)
                parse_attr->spec.match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 
-       rule = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
-                                  &flow_act, dest, dest_ix);
+       flow->rule[0] = mlx5_add_flow_rules(priv->fs.tc.t, &parse_attr->spec,
+                                           &flow_act, dest, dest_ix);
 
-       if (IS_ERR(rule))
+       if (IS_ERR(flow->rule[0])) {
+               err = PTR_ERR(flow->rule[0]);
                goto err_add_rule;
+       }
 
-       return rule;
+       return 0;
 
 err_add_rule:
        if (table_created) {
@@ -787,7 +789,7 @@ err_fc_create:
        if (flow->flags & MLX5E_TC_FLOW_HAIRPIN)
                mlx5e_hairpin_flow_del(priv, flow);
 err_add_hairpin_flow:
-       return rule;
+       return err;
 }
 
 static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
@@ -796,7 +798,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv,
        struct mlx5_nic_flow_attr *attr = flow->nic_attr;
        struct mlx5_fc *counter = NULL;
 
-       counter = mlx5_flow_rule_counter(flow->rule[0]);
+       counter = attr->counter;
        mlx5_del_flow_rules(flow->rule[0]);
        mlx5_fc_destroy(priv->mdev, counter);
 
@@ -819,30 +821,119 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct ip_tunnel_info *tun_info,
                              struct net_device *mirred_dev,
                              struct net_device **encap_dev,
-                             struct mlx5e_tc_flow *flow);
+                             struct mlx5e_tc_flow *flow,
+                             struct netlink_ext_ack *extack);
 
 static struct mlx5_flow_handle *
+mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
+                          struct mlx5e_tc_flow *flow,
+                          struct mlx5_flow_spec *spec,
+                          struct mlx5_esw_flow_attr *attr)
+{
+       struct mlx5_flow_handle *rule;
+
+       rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
+       if (IS_ERR(rule))
+               return rule;
+
+       if (attr->mirror_count) {
+               flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
+               if (IS_ERR(flow->rule[1])) {
+                       mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+                       return flow->rule[1];
+               }
+       }
+
+       flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+       return rule;
+}
+
+static void
+mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
+                            struct mlx5e_tc_flow *flow,
+                          struct mlx5_esw_flow_attr *attr)
+{
+       flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
+
+       if (attr->mirror_count)
+               mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
+
+       mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+}
+
+static struct mlx5_flow_handle *
+mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw,
+                             struct mlx5e_tc_flow *flow,
+                             struct mlx5_flow_spec *spec,
+                             struct mlx5_esw_flow_attr *slow_attr)
+{
+       struct mlx5_flow_handle *rule;
+
+       memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
+       slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
+       slow_attr->mirror_count = 0,
+       slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN,
+
+       rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr);
+       if (!IS_ERR(rule))
+               flow->flags |= MLX5E_TC_FLOW_SLOW;
+
+       return rule;
+}
+
+static void
+mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw,
+                                 struct mlx5e_tc_flow *flow,
+                                 struct mlx5_esw_flow_attr *slow_attr)
+{
+       memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr));
+       mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr);
+       flow->flags &= ~MLX5E_TC_FLOW_SLOW;
+}
+
+static int
 mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
                      struct mlx5e_tc_flow_parse_attr *parse_attr,
-                     struct mlx5e_tc_flow *flow)
+                     struct mlx5e_tc_flow *flow,
+                     struct netlink_ext_ack *extack)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       u32 max_chain = mlx5_eswitch_get_chain_range(esw);
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+       u16 max_prio = mlx5_eswitch_get_prio_range(esw);
        struct net_device *out_dev, *encap_dev = NULL;
-       struct mlx5_flow_handle *rule = NULL;
+       struct mlx5_fc *counter = NULL;
        struct mlx5e_rep_priv *rpriv;
        struct mlx5e_priv *out_priv;
-       int err;
+       int err = 0, encap_err = 0;
+
+       /* if prios are not supported, keep the old behaviour of using same prio
+        * for all offloaded rules.
+        */
+       if (!mlx5_eswitch_prios_supported(esw))
+               attr->prio = 1;
 
-       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+       if (attr->chain > max_chain) {
+               NL_SET_ERR_MSG(extack, "Requested chain is out of supported range");
+               err = -EOPNOTSUPP;
+               goto err_max_prio_chain;
+       }
+
+       if (attr->prio > max_prio) {
+               NL_SET_ERR_MSG(extack, "Requested priority is out of supported range");
+               err = -EOPNOTSUPP;
+               goto err_max_prio_chain;
+       }
+
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
                out_dev = __dev_get_by_index(dev_net(priv->netdev),
                                             attr->parse_attr->mirred_ifindex);
-               err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
-                                        out_dev, &encap_dev, flow);
-               if (err) {
-                       rule = ERR_PTR(err);
-                       if (err != -EAGAIN)
-                               goto err_attach_encap;
+               encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info,
+                                              out_dev, &encap_dev, flow,
+                                              extack);
+               if (encap_err && encap_err != -EAGAIN) {
+                       err = encap_err;
+                       goto err_attach_encap;
                }
                out_priv = netdev_priv(encap_dev);
                rpriv = out_priv->ppriv;
@@ -851,49 +942,58 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv,
        }
 
        err = mlx5_eswitch_add_vlan_action(esw, attr);
-       if (err) {
-               rule = ERR_PTR(err);
+       if (err)
                goto err_add_vlan;
-       }
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) {
                err = mlx5e_attach_mod_hdr(priv, flow, parse_attr);
                kfree(parse_attr->mod_hdr_actions);
-               if (err) {
-                       rule = ERR_PTR(err);
+               if (err)
                        goto err_mod_hdr;
+       }
+
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) {
+               counter = mlx5_fc_create(esw->dev, true);
+               if (IS_ERR(counter)) {
+                       err = PTR_ERR(counter);
+                       goto err_create_counter;
                }
+
+               attr->counter = counter;
        }
 
-       /* we get here if (1) there's no error (rule being null) or when
+       /* we get here if (1) there's no error or when
         * (2) there's an encap action and we're on -EAGAIN (no valid neigh)
         */
-       if (rule != ERR_PTR(-EAGAIN)) {
-               rule = mlx5_eswitch_add_offloaded_rule(esw, &parse_attr->spec, attr);
-               if (IS_ERR(rule))
-                       goto err_add_rule;
-
-               if (attr->mirror_count) {
-                       flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &parse_attr->spec, attr);
-                       if (IS_ERR(flow->rule[1]))
-                               goto err_fwd_rule;
-               }
+       if (encap_err == -EAGAIN) {
+               /* continue with goto slow path rule instead */
+               struct mlx5_esw_flow_attr slow_attr;
+
+               flow->rule[0] = mlx5e_tc_offload_to_slow_path(esw, flow, &parse_attr->spec, &slow_attr);
+       } else {
+               flow->rule[0] = mlx5e_tc_offload_fdb_rules(esw, flow, &parse_attr->spec, attr);
        }
-       return rule;
 
-err_fwd_rule:
-       mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
-       rule = flow->rule[1];
+       if (IS_ERR(flow->rule[0])) {
+               err = PTR_ERR(flow->rule[0]);
+               goto err_add_rule;
+       }
+
+       return 0;
+
 err_add_rule:
+       mlx5_fc_destroy(esw->dev, counter);
+err_create_counter:
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
                mlx5e_detach_mod_hdr(priv, flow);
 err_mod_hdr:
        mlx5_eswitch_del_vlan_action(esw, attr);
 err_add_vlan:
-       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)
                mlx5e_detach_encap(priv, flow);
 err_attach_encap:
-       return rule;
+err_max_prio_chain:
+       return err;
 }
 
 static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
@@ -901,36 +1001,43 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+       struct mlx5_esw_flow_attr slow_attr;
 
        if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
-               flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
-               if (attr->mirror_count)
-                       mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
-               mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+               if (flow->flags & MLX5E_TC_FLOW_SLOW)
+                       mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+               else
+                       mlx5e_tc_unoffload_fdb_rules(esw, flow, attr);
        }
 
        mlx5_eswitch_del_vlan_action(esw, attr);
 
-       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP) {
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) {
                mlx5e_detach_encap(priv, flow);
                kvfree(attr->parse_attr);
        }
 
        if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
                mlx5e_detach_mod_hdr(priv, flow);
+
+       if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT)
+               mlx5_fc_destroy(esw->dev, attr->counter);
 }
 
 void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
                              struct mlx5e_encap_entry *e)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-       struct mlx5_esw_flow_attr *esw_attr;
+       struct mlx5_esw_flow_attr slow_attr, *esw_attr;
+       struct mlx5_flow_handle *rule;
+       struct mlx5_flow_spec *spec;
        struct mlx5e_tc_flow *flow;
        int err;
 
-       err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
-                              e->encap_size, e->encap_header,
-                              &e->encap_id);
+       err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+                                        e->encap_size, e->encap_header,
+                                        MLX5_FLOW_NAMESPACE_FDB,
+                                        &e->encap_id);
        if (err) {
                mlx5_core_warn(priv->mdev, "Failed to offload cached encapsulation header, %d\n",
                               err);
@@ -942,26 +1049,20 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv,
        list_for_each_entry(flow, &e->flows, encap) {
                esw_attr = flow->esw_attr;
                esw_attr->encap_id = e->encap_id;
-               flow->rule[0] = mlx5_eswitch_add_offloaded_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
-               if (IS_ERR(flow->rule[0])) {
-                       err = PTR_ERR(flow->rule[0]);
+               spec = &esw_attr->parse_attr->spec;
+
+               /* update from slow path rule to encap rule */
+               rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr);
+               if (IS_ERR(rule)) {
+                       err = PTR_ERR(rule);
                        mlx5_core_warn(priv->mdev, "Failed to update cached encapsulation flow, %d\n",
                                       err);
                        continue;
                }
 
-               if (esw_attr->mirror_count) {
-                       flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, &esw_attr->parse_attr->spec, esw_attr);
-                       if (IS_ERR(flow->rule[1])) {
-                               mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], esw_attr);
-                               err = PTR_ERR(flow->rule[1]);
-                               mlx5_core_warn(priv->mdev, "Failed to update cached mirror flow, %d\n",
-                                              err);
-                               continue;
-                       }
-               }
-
-               flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+               mlx5e_tc_unoffload_from_slow_path(esw, flow, &slow_attr);
+               flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when slow path rule removed */
+               flow->rule[0] = rule;
        }
 }
 
@@ -969,25 +1070,44 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv,
                              struct mlx5e_encap_entry *e)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5_esw_flow_attr slow_attr;
+       struct mlx5_flow_handle *rule;
+       struct mlx5_flow_spec *spec;
        struct mlx5e_tc_flow *flow;
+       int err;
 
        list_for_each_entry(flow, &e->flows, encap) {
-               if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
-                       struct mlx5_esw_flow_attr *attr = flow->esw_attr;
+               spec = &flow->esw_attr->parse_attr->spec;
 
-                       flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED;
-                       if (attr->mirror_count)
-                               mlx5_eswitch_del_offloaded_rule(esw, flow->rule[1], attr);
-                       mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr);
+               /* update from encap rule to slow path rule */
+               rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr);
+
+               if (IS_ERR(rule)) {
+                       err = PTR_ERR(rule);
+                       mlx5_core_warn(priv->mdev, "Failed to update slow path (encap) flow, %d\n",
+                                      err);
+                       continue;
                }
+
+               mlx5e_tc_unoffload_fdb_rules(esw, flow, flow->esw_attr);
+               flow->flags |= MLX5E_TC_FLOW_OFFLOADED; /* was unset when fast path rule removed */
+               flow->rule[0] = rule;
        }
 
        if (e->flags & MLX5_ENCAP_ENTRY_VALID) {
                e->flags &= ~MLX5_ENCAP_ENTRY_VALID;
-               mlx5_encap_dealloc(priv->mdev, e->encap_id);
+               mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
        }
 }
 
+static struct mlx5_fc *mlx5e_tc_get_counter(struct mlx5e_tc_flow *flow)
+{
+       if (flow->flags & MLX5E_TC_FLOW_ESWITCH)
+               return flow->esw_attr->counter;
+       else
+               return flow->nic_attr->counter;
+}
+
 void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
 {
        struct mlx5e_neigh *m_neigh = &nhe->m_neigh;
@@ -1013,7 +1133,7 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe)
                        continue;
                list_for_each_entry(flow, &e->flows, encap) {
                        if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) {
-                               counter = mlx5_flow_rule_counter(flow->rule[0]);
+                               counter = mlx5e_tc_get_counter(flow);
                                mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
                                if (time_after((unsigned long)lastuse, nhe->reported_lastuse)) {
                                        neigh_used = true;
@@ -1053,7 +1173,7 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
                mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e);
 
                if (e->flags & MLX5_ENCAP_ENTRY_VALID)
-                       mlx5_encap_dealloc(priv->mdev, e->encap_id);
+                       mlx5_packet_reformat_dealloc(priv->mdev, e->encap_id);
 
                hash_del_rcu(&e->encap_hlist);
                kfree(e->encap_header);
@@ -1105,6 +1225,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                             struct mlx5_flow_spec *spec,
                             struct tc_cls_flower_offload *f)
 {
+       struct netlink_ext_ack *extack = f->common.extack;
        void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
                                       outer_headers);
        void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1133,6 +1254,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                    MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
                        parse_vxlan_attr(spec, f);
                else {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "port isn't an offloaded vxlan udp dport");
                        netdev_warn(priv->netdev,
                                    "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
                        return -EOPNOTSUPP;
@@ -1149,6 +1272,8 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
                         udp_sport, ntohs(key->src));
        } else { /* udp dst port must be given */
 vxlan_match_offload_err:
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "IP tunnel decap offload supported only for vxlan, must set UDP dport");
                netdev_warn(priv->netdev,
                            "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
                return -EOPNOTSUPP;
@@ -1225,6 +1350,16 @@ vxlan_match_offload_err:
 
                MLX5_SET(fte_match_set_lyr_2_4, headers_c, ttl_hoplimit, mask->ttl);
                MLX5_SET(fte_match_set_lyr_2_4, headers_v, ttl_hoplimit, key->ttl);
+
+               if (mask->ttl &&
+                   !MLX5_CAP_ESW_FLOWTABLE_FDB
+                       (priv->mdev,
+                        ft_field_support.outer_ipv4_ttl)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Matching on TTL is not supported");
+                       return -EOPNOTSUPP;
+               }
+
        }
 
        /* Enforce DMAC when offloading incoming tunneled flows.
@@ -1247,6 +1382,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                              struct tc_cls_flower_offload *f,
                              u8 *match_level)
 {
+       struct netlink_ext_ack *extack = f->common.extack;
        void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
                                       outer_headers);
        void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
@@ -1277,6 +1413,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
              BIT(FLOW_DISSECTOR_KEY_TCP) |
              BIT(FLOW_DISSECTOR_KEY_IP)  |
              BIT(FLOW_DISSECTOR_KEY_ENC_IP))) {
+               NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
                netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n",
                            f->dissector->used_keys);
                return -EOPNOTSUPP;
@@ -1310,31 +1447,21 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                         inner_headers);
        }
 
-       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
-               struct flow_dissector_key_eth_addrs *key =
+       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
+               struct flow_dissector_key_basic *key =
                        skb_flow_dissector_target(f->dissector,
-                                                 FLOW_DISSECTOR_KEY_ETH_ADDRS,
+                                                 FLOW_DISSECTOR_KEY_BASIC,
                                                  f->key);
-               struct flow_dissector_key_eth_addrs *mask =
+               struct flow_dissector_key_basic *mask =
                        skb_flow_dissector_target(f->dissector,
-                                                 FLOW_DISSECTOR_KEY_ETH_ADDRS,
+                                                 FLOW_DISSECTOR_KEY_BASIC,
                                                  f->mask);
+               MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
+                        ntohs(mask->n_proto));
+               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
+                        ntohs(key->n_proto));
 
-               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-                                            dmac_47_16),
-                               mask->dst);
-               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-                                            dmac_47_16),
-                               key->dst);
-
-               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
-                                            smac_47_16),
-                               mask->src);
-               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
-                                            smac_47_16),
-                               key->src);
-
-               if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
+               if (mask->n_proto)
                        *match_level = MLX5_MATCH_L2;
        }
 
@@ -1368,9 +1495,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
 
                        *match_level = MLX5_MATCH_L2;
                }
-       } else {
+       } else if (*match_level != MLX5_MATCH_NONE) {
                MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
                MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+               *match_level = MLX5_MATCH_L2;
        }
 
        if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
@@ -1408,21 +1536,31 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                }
        }
 
-       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
-               struct flow_dissector_key_basic *key =
+       if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
+               struct flow_dissector_key_eth_addrs *key =
                        skb_flow_dissector_target(f->dissector,
-                                                 FLOW_DISSECTOR_KEY_BASIC,
+                                                 FLOW_DISSECTOR_KEY_ETH_ADDRS,
                                                  f->key);
-               struct flow_dissector_key_basic *mask =
+               struct flow_dissector_key_eth_addrs *mask =
                        skb_flow_dissector_target(f->dissector,
-                                                 FLOW_DISSECTOR_KEY_BASIC,
+                                                 FLOW_DISSECTOR_KEY_ETH_ADDRS,
                                                  f->mask);
-               MLX5_SET(fte_match_set_lyr_2_4, headers_c, ethertype,
-                        ntohs(mask->n_proto));
-               MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
-                        ntohs(key->n_proto));
 
-               if (mask->n_proto)
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+                                            dmac_47_16),
+                               mask->dst);
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                                            dmac_47_16),
+                               key->dst);
+
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+                                            smac_47_16),
+                               mask->src);
+               ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+                                            smac_47_16),
+                               key->src);
+
+               if (!is_zero_ether_addr(mask->src) || !is_zero_ether_addr(mask->dst))
                        *match_level = MLX5_MATCH_L2;
        }
 
@@ -1553,8 +1691,11 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
 
                if (mask->ttl &&
                    !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev,
-                                               ft_field_support.outer_ipv4_ttl))
+                                               ft_field_support.outer_ipv4_ttl)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Matching on TTL is not supported");
                        return -EOPNOTSUPP;
+               }
 
                if (mask->tos || mask->ttl)
                        *match_level = MLX5_MATCH_L3;
@@ -1596,6 +1737,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                 udp_dport, ntohs(key->dst));
                        break;
                default:
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Only UDP and TCP transports are supported for L4 matching");
                        netdev_err(priv->netdev,
                                   "Only UDP and TCP transport are supported\n");
                        return -EINVAL;
@@ -1632,6 +1775,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
                            struct mlx5_flow_spec *spec,
                            struct tc_cls_flower_offload *f)
 {
+       struct netlink_ext_ack *extack = f->common.extack;
        struct mlx5_core_dev *dev = priv->mdev;
        struct mlx5_eswitch *esw = dev->priv.eswitch;
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1646,6 +1790,8 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
                if (rep->vport != FDB_UPLINK_VPORT &&
                    (esw->offloads.inline_mode != MLX5_INLINE_MODE_NONE &&
                    esw->offloads.inline_mode < match_level)) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Flow is not offloaded due to min inline setting");
                        netdev_warn(priv->netdev,
                                    "Flow is not offloaded due to min inline setting, required %d actual %d\n",
                                    match_level, esw->offloads.inline_mode);
@@ -1747,7 +1893,8 @@ static struct mlx5_fields fields[] = {
  */
 static int offload_pedit_fields(struct pedit_headers *masks,
                                struct pedit_headers *vals,
-                               struct mlx5e_tc_flow_parse_attr *parse_attr)
+                               struct mlx5e_tc_flow_parse_attr *parse_attr,
+                               struct netlink_ext_ack *extack)
 {
        struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
        int i, action_size, nactions, max_actions, first, last, next_z;
@@ -1786,11 +1933,15 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                        continue;
 
                if (s_mask && a_mask) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "can't set and add to the same HW field");
                        printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field);
                        return -EOPNOTSUPP;
                }
 
                if (nactions == max_actions) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "too many pedit actions, can't offload");
                        printk(KERN_WARNING "mlx5: parsed %d pedit actions, can't do more\n", nactions);
                        return -EOPNOTSUPP;
                }
@@ -1823,6 +1974,8 @@ static int offload_pedit_fields(struct pedit_headers *masks,
                next_z = find_next_zero_bit(&mask, field_bsize, first);
                last  = find_last_bit(&mask, field_bsize);
                if (first < next_z && next_z < last) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "rewrite of few sub-fields isn't supported");
                        printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n",
                               mask);
                        return -EOPNOTSUPP;
@@ -1881,7 +2034,8 @@ static const struct pedit_headers zero_masks = {};
 
 static int parse_tc_pedit_action(struct mlx5e_priv *priv,
                                 const struct tc_action *a, int namespace,
-                                struct mlx5e_tc_flow_parse_attr *parse_attr)
+                                struct mlx5e_tc_flow_parse_attr *parse_attr,
+                                struct netlink_ext_ack *extack)
 {
        struct pedit_headers masks[__PEDIT_CMD_MAX], vals[__PEDIT_CMD_MAX], *cmd_masks;
        int nkeys, i, err = -EOPNOTSUPP;
@@ -1899,12 +2053,13 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
                err = -EOPNOTSUPP; /* can't be all optimistic */
 
                if (htype == TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK) {
-                       netdev_warn(priv->netdev, "legacy pedit isn't offloaded\n");
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "legacy pedit isn't offloaded");
                        goto out_err;
                }
 
                if (cmd != TCA_PEDIT_KEY_EX_CMD_SET && cmd != TCA_PEDIT_KEY_EX_CMD_ADD) {
-                       netdev_warn(priv->netdev, "pedit cmd %d isn't offloaded\n", cmd);
+                       NL_SET_ERR_MSG_MOD(extack, "pedit cmd isn't offloaded");
                        goto out_err;
                }
 
@@ -1921,13 +2076,15 @@ static int parse_tc_pedit_action(struct mlx5e_priv *priv,
        if (err)
                goto out_err;
 
-       err = offload_pedit_fields(masks, vals, parse_attr);
+       err = offload_pedit_fields(masks, vals, parse_attr, extack);
        if (err < 0)
                goto out_dealloc_parsed_actions;
 
        for (cmd = 0; cmd < __PEDIT_CMD_MAX; cmd++) {
                cmd_masks = &masks[cmd];
                if (memcmp(cmd_masks, &zero_masks, sizeof(zero_masks))) {
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "attempt to offload an unsupported field");
                        netdev_warn(priv->netdev, "attempt to offload an unsupported field (cmd %d)\n", cmd);
                        print_hex_dump(KERN_WARNING, "mask: ", DUMP_PREFIX_ADDRESS,
                                       16, 1, cmd_masks, sizeof(zero_masks), true);
@@ -1944,19 +2101,26 @@ out_err:
        return err;
 }
 
-static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 update_flags)
+static bool csum_offload_supported(struct mlx5e_priv *priv,
+                                  u32 action,
+                                  u32 update_flags,
+                                  struct netlink_ext_ack *extack)
 {
        u32 prot_flags = TCA_CSUM_UPDATE_FLAG_IPV4HDR | TCA_CSUM_UPDATE_FLAG_TCP |
                         TCA_CSUM_UPDATE_FLAG_UDP;
 
        /*  The HW recalcs checksums only if re-writing headers */
        if (!(action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "TC csum action is only offloaded with pedit");
                netdev_warn(priv->netdev,
                            "TC csum action is only offloaded with pedit\n");
                return false;
        }
 
        if (update_flags & ~prot_flags) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "can't offload TC csum action for some header/s");
                netdev_warn(priv->netdev,
                            "can't offload TC csum action for some header/s - flags %#x\n",
                            update_flags);
@@ -1967,7 +2131,8 @@ static bool csum_offload_supported(struct mlx5e_priv *priv, u32 action, u32 upda
 }
 
 static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
-                                         struct tcf_exts *exts)
+                                         struct tcf_exts *exts,
+                                         struct netlink_ext_ack *extack)
 {
        const struct tc_action *a;
        bool modify_ip_header;
@@ -2005,6 +2170,8 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec,
        ip_proto = MLX5_GET(fte_match_set_lyr_2_4, headers_v, ip_protocol);
        if (modify_ip_header && ip_proto != IPPROTO_TCP &&
            ip_proto != IPPROTO_UDP && ip_proto != IPPROTO_ICMP) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "can't offload re-write of non TCP/UDP");
                pr_info("can't offload re-write of ip proto %d\n", ip_proto);
                return false;
        }
@@ -2016,7 +2183,8 @@ out_ok:
 static bool actions_match_supported(struct mlx5e_priv *priv,
                                    struct tcf_exts *exts,
                                    struct mlx5e_tc_flow_parse_attr *parse_attr,
-                                   struct mlx5e_tc_flow *flow)
+                                   struct mlx5e_tc_flow *flow,
+                                   struct netlink_ext_ack *extack)
 {
        u32 actions;
 
@@ -2030,7 +2198,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
                return false;
 
        if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
-               return modify_header_match_supported(&parse_attr->spec, exts);
+               return modify_header_match_supported(&parse_attr->spec, exts,
+                                                    extack);
 
        return true;
 }
@@ -2043,15 +2212,16 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
        fmdev = priv->mdev;
        pmdev = peer_priv->mdev;
 
-       mlx5_query_nic_vport_system_image_guid(fmdev, &fsystem_guid);
-       mlx5_query_nic_vport_system_image_guid(pmdev, &psystem_guid);
+       fsystem_guid = mlx5_query_nic_system_image_guid(fmdev);
+       psystem_guid = mlx5_query_nic_system_image_guid(pmdev);
 
        return (fsystem_guid == psystem_guid);
 }
 
 static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                struct mlx5e_tc_flow_parse_attr *parse_attr,
-                               struct mlx5e_tc_flow *flow)
+                               struct mlx5e_tc_flow *flow,
+                               struct netlink_ext_ack *extack)
 {
        struct mlx5_nic_flow_attr *attr = flow->nic_attr;
        const struct tc_action *a;
@@ -2075,7 +2245,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_pedit(a)) {
                        err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_KERNEL,
-                                                   parse_attr);
+                                                   parse_attr, extack);
                        if (err)
                                return err;
 
@@ -2086,7 +2256,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_csum(a)) {
                        if (csum_offload_supported(priv, action,
-                                                  tcf_csum_update_flags(a)))
+                                                  tcf_csum_update_flags(a),
+                                                  extack))
                                continue;
 
                        return -EOPNOTSUPP;
@@ -2102,6 +2273,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
                                          MLX5_FLOW_CONTEXT_ACTION_COUNT;
                        } else {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "device is not on same HW, can't offload");
                                netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n",
                                            peer_dev->name);
                                return -EINVAL;
@@ -2113,8 +2286,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        u32 mark = tcf_skbedit_mark(a);
 
                        if (mark & ~MLX5E_TC_FLOW_ID_MASK) {
-                               netdev_warn(priv->netdev, "Bad flow mark - only 16 bit is supported: 0x%x\n",
-                                           mark);
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "Bad flow mark - only 16 bit is supported");
                                return -EINVAL;
                        }
 
@@ -2127,7 +2300,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        }
 
        attr->action = action;
-       if (!actions_match_supported(priv, exts, parse_attr, flow))
+       if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
                return -EOPNOTSUPP;
 
        return 0;
@@ -2331,7 +2504,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
                return -ENOMEM;
 
        switch (e->tunnel_type) {
-       case MLX5_HEADER_TYPE_VXLAN:
+       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
                fl4.flowi4_proto = IPPROTO_UDP;
                fl4.fl4_dport = tun_key->tp_dst;
                break;
@@ -2375,7 +2548,7 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
        read_unlock_bh(&n->lock);
 
        switch (e->tunnel_type) {
-       case MLX5_HEADER_TYPE_VXLAN:
+       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
                gen_vxlan_header_ipv4(out_dev, encap_header,
                                      ipv4_encap_size, e->h_dest, tos, ttl,
                                      fl4.daddr,
@@ -2395,8 +2568,10 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
                goto out;
        }
 
-       err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
-                              ipv4_encap_size, encap_header, &e->encap_id);
+       err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+                                        ipv4_encap_size, encap_header,
+                                        MLX5_FLOW_NAMESPACE_FDB,
+                                        &e->encap_id);
        if (err)
                goto destroy_neigh_entry;
 
@@ -2440,7 +2615,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
                return -ENOMEM;
 
        switch (e->tunnel_type) {
-       case MLX5_HEADER_TYPE_VXLAN:
+       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
                fl6.flowi6_proto = IPPROTO_UDP;
                fl6.fl6_dport = tun_key->tp_dst;
                break;
@@ -2484,7 +2659,7 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
        read_unlock_bh(&n->lock);
 
        switch (e->tunnel_type) {
-       case MLX5_HEADER_TYPE_VXLAN:
+       case MLX5_REFORMAT_TYPE_L2_TO_VXLAN:
                gen_vxlan_header_ipv6(out_dev, encap_header,
                                      ipv6_encap_size, e->h_dest, tos, ttl,
                                      &fl6.daddr,
@@ -2505,8 +2680,10 @@ static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
                goto out;
        }
 
-       err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
-                              ipv6_encap_size, encap_header, &e->encap_id);
+       err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type,
+                                        ipv6_encap_size, encap_header,
+                                        MLX5_FLOW_NAMESPACE_FDB,
+                                        &e->encap_id);
        if (err)
                goto destroy_neigh_entry;
 
@@ -2529,7 +2706,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
                              struct ip_tunnel_info *tun_info,
                              struct net_device *mirred_dev,
                              struct net_device **encap_dev,
-                             struct mlx5e_tc_flow *flow)
+                             struct mlx5e_tc_flow *flow,
+                             struct netlink_ext_ack *extack)
 {
        struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        unsigned short family = ip_tunnel_info_af(tun_info);
@@ -2547,6 +2725,8 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
        /* setting udp src port isn't supported */
        if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
 vxlan_encap_offload_err:
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "must set udp dst port and not set udp src port");
                netdev_warn(priv->netdev,
                            "must set udp dst port and not set udp src port\n");
                return -EOPNOTSUPP;
@@ -2554,8 +2734,10 @@ vxlan_encap_offload_err:
 
        if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) &&
            MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
-               tunnel_type = MLX5_HEADER_TYPE_VXLAN;
+               tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN;
        } else {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "port isn't an offloaded vxlan udp dport");
                netdev_warn(priv->netdev,
                            "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
                return -EOPNOTSUPP;
@@ -2660,8 +2842,10 @@ static int parse_tc_vlan_action(struct mlx5e_priv *priv,
 
 static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                struct mlx5e_tc_flow_parse_attr *parse_attr,
-                               struct mlx5e_tc_flow *flow)
+                               struct mlx5e_tc_flow *flow,
+                               struct netlink_ext_ack *extack)
 {
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5_esw_flow_attr *attr = flow->esw_attr;
        struct mlx5e_rep_priv *rpriv = priv->ppriv;
        struct ip_tunnel_info *info = NULL;
@@ -2686,7 +2870,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_pedit(a)) {
                        err = parse_tc_pedit_action(priv, a, MLX5_FLOW_NAMESPACE_FDB,
-                                                   parse_attr);
+                                                   parse_attr, extack);
                        if (err)
                                return err;
 
@@ -2697,7 +2881,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
 
                if (is_tcf_csum(a)) {
                        if (csum_offload_supported(priv, action,
-                                                  tcf_csum_update_flags(a)))
+                                                  tcf_csum_update_flags(a),
+                                                  extack))
                                continue;
 
                        return -EOPNOTSUPP;
@@ -2710,6 +2895,8 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        out_dev = tcf_mirred_dev(a);
 
                        if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "can't support more output ports, can't offload forwarding");
                                pr_err("can't support more than %d output ports, can't offload forwarding\n",
                                       attr->out_count);
                                return -EOPNOTSUPP;
@@ -2728,11 +2915,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                                parse_attr->mirred_ifindex = out_dev->ifindex;
                                parse_attr->tun_info = *info;
                                attr->parse_attr = parse_attr;
-                               action |= MLX5_FLOW_CONTEXT_ACTION_ENCAP |
+                               action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT |
                                          MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
                                          MLX5_FLOW_CONTEXT_ACTION_COUNT;
                                /* attr->out_rep is resolved when we handle encap */
                        } else {
+                               NL_SET_ERR_MSG_MOD(extack,
+                                                  "devices are not on same switch HW, can't offload forwarding");
                                pr_err("devices %s %s not on same switch HW, can't offload forwarding\n",
                                       priv->netdev->name, out_dev->name);
                                return -EINVAL;
@@ -2765,14 +2954,35 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
                        continue;
                }
 
+               if (is_tcf_gact_goto_chain(a)) {
+                       u32 dest_chain = tcf_gact_goto_chain_index(a);
+                       u32 max_chain = mlx5_eswitch_get_chain_range(esw);
+
+                       if (dest_chain <= attr->chain) {
+                               NL_SET_ERR_MSG(extack, "Goto earlier chain isn't supported");
+                               return -EOPNOTSUPP;
+                       }
+                       if (dest_chain > max_chain) {
+                               NL_SET_ERR_MSG(extack, "Requested destination chain is out of supported range");
+                               return -EOPNOTSUPP;
+                       }
+                       action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
+                                 MLX5_FLOW_CONTEXT_ACTION_COUNT;
+                       attr->dest_chain = dest_chain;
+
+                       continue;
+               }
+
                return -EINVAL;
        }
 
        attr->action = action;
-       if (!actions_match_supported(priv, exts, parse_attr, flow))
+       if (!actions_match_supported(priv, exts, parse_attr, flow, extack))
                return -EOPNOTSUPP;
 
        if (attr->out_count > 1 && !mlx5_esw_has_fwd_fdb(priv->mdev)) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "current firmware doesn't support split rule for port mirroring");
                netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n");
                return -EOPNOTSUPP;
        }
@@ -2780,9 +2990,9 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
        return 0;
 }
 
-static void get_flags(int flags, u8 *flow_flags)
+static void get_flags(int flags, u16 *flow_flags)
 {
-       u8 __flow_flags = 0;
+       u16 __flow_flags = 0;
 
        if (flags & MLX5E_TC_INGRESS)
                __flow_flags |= MLX5E_TC_FLOW_INGRESS;
@@ -2811,31 +3021,15 @@ static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv)
                return &priv->fs.tc.ht;
 }
 
-int mlx5e_configure_flower(struct mlx5e_priv *priv,
-                          struct tc_cls_flower_offload *f, int flags)
+static int
+mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size,
+                struct tc_cls_flower_offload *f, u16 flow_flags,
+                struct mlx5e_tc_flow_parse_attr **__parse_attr,
+                struct mlx5e_tc_flow **__flow)
 {
-       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
        struct mlx5e_tc_flow_parse_attr *parse_attr;
-       struct rhashtable *tc_ht = get_tc_ht(priv);
        struct mlx5e_tc_flow *flow;
-       int attr_size, err = 0;
-       u8 flow_flags = 0;
-
-       get_flags(flags, &flow_flags);
-
-       flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
-       if (flow) {
-               netdev_warn_once(priv->netdev, "flow cookie %lx already exists, ignoring\n", f->cookie);
-               return 0;
-       }
-
-       if (esw && esw->mode == SRIOV_OFFLOADS) {
-               flow_flags |= MLX5E_TC_FLOW_ESWITCH;
-               attr_size  = sizeof(struct mlx5_esw_flow_attr);
-       } else {
-               flow_flags |= MLX5E_TC_FLOW_NIC;
-               attr_size  = sizeof(struct mlx5_nic_flow_attr);
-       }
+       int err;
 
        flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
        parse_attr = kvzalloc(sizeof(*parse_attr), GFP_KERNEL);
@@ -2849,45 +3043,161 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv,
        flow->priv = priv;
 
        err = parse_cls_flower(priv, flow, &parse_attr->spec, f);
-       if (err < 0)
+       if (err)
                goto err_free;
 
-       if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
-               err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow);
-               if (err < 0)
-                       goto err_free;
-               flow->rule[0] = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow);
-       } else {
-               err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow);
-               if (err < 0)
-                       goto err_free;
-               flow->rule[0] = mlx5e_tc_add_nic_flow(priv, parse_attr, flow);
-       }
+       *__flow = flow;
+       *__parse_attr = parse_attr;
 
-       if (IS_ERR(flow->rule[0])) {
-               err = PTR_ERR(flow->rule[0]);
-               if (err != -EAGAIN)
-                       goto err_free;
-       }
+       return 0;
+
+err_free:
+       kfree(flow);
+       kvfree(parse_attr);
+       return err;
+}
+
+static int
+mlx5e_add_fdb_flow(struct mlx5e_priv *priv,
+                  struct tc_cls_flower_offload *f,
+                  u16 flow_flags,
+                  struct mlx5e_tc_flow **__flow)
+{
+       struct netlink_ext_ack *extack = f->common.extack;
+       struct mlx5e_tc_flow_parse_attr *parse_attr;
+       struct mlx5e_tc_flow *flow;
+       int attr_size, err;
+
+       flow_flags |= MLX5E_TC_FLOW_ESWITCH;
+       attr_size  = sizeof(struct mlx5_esw_flow_attr);
+       err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
+                              &parse_attr, &flow);
+       if (err)
+               goto out;
+
+       flow->esw_attr->chain = f->common.chain_index;
+       flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16;
+       err = parse_tc_fdb_actions(priv, f->exts, parse_attr, flow, extack);
+       if (err)
+               goto err_free;
 
-       if (err != -EAGAIN)
-               flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+       err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack);
+       if (err)
+               goto err_free;
 
-       if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) ||
-           !(flow->esw_attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP))
+       if (!(flow->esw_attr->action &
+             MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT))
                kvfree(parse_attr);
 
-       err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
-       if (err) {
-               mlx5e_tc_del_flow(priv, flow);
-               kfree(flow);
-       }
+       *__flow = flow;
 
+       return 0;
+
+err_free:
+       kfree(flow);
+       kvfree(parse_attr);
+out:
        return err;
+}
+
+static int
+mlx5e_add_nic_flow(struct mlx5e_priv *priv,
+                  struct tc_cls_flower_offload *f,
+                  u16 flow_flags,
+                  struct mlx5e_tc_flow **__flow)
+{
+       struct netlink_ext_ack *extack = f->common.extack;
+       struct mlx5e_tc_flow_parse_attr *parse_attr;
+       struct mlx5e_tc_flow *flow;
+       int attr_size, err;
+
+       /* multi-chain not supported for NIC rules */
+       if (!tc_cls_can_offload_and_chain0(priv->netdev, &f->common))
+               return -EOPNOTSUPP;
+
+       flow_flags |= MLX5E_TC_FLOW_NIC;
+       attr_size  = sizeof(struct mlx5_nic_flow_attr);
+       err = mlx5e_alloc_flow(priv, attr_size, f, flow_flags,
+                              &parse_attr, &flow);
+       if (err)
+               goto out;
+
+       err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack);
+       if (err)
+               goto err_free;
+
+       err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack);
+       if (err)
+               goto err_free;
+
+       flow->flags |= MLX5E_TC_FLOW_OFFLOADED;
+       kvfree(parse_attr);
+       *__flow = flow;
+
+       return 0;
 
 err_free:
+       kfree(flow);
        kvfree(parse_attr);
+out:
+       return err;
+}
+
+static int
+mlx5e_tc_add_flow(struct mlx5e_priv *priv,
+                 struct tc_cls_flower_offload *f,
+                 int flags,
+                 struct mlx5e_tc_flow **flow)
+{
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       u16 flow_flags;
+       int err;
+
+       get_flags(flags, &flow_flags);
+
+       if (!tc_can_offload_extack(priv->netdev, f->common.extack))
+               return -EOPNOTSUPP;
+
+       if (esw && esw->mode == SRIOV_OFFLOADS)
+               err = mlx5e_add_fdb_flow(priv, f, flow_flags, flow);
+       else
+               err = mlx5e_add_nic_flow(priv, f, flow_flags, flow);
+
+       return err;
+}
+
+int mlx5e_configure_flower(struct mlx5e_priv *priv,
+                          struct tc_cls_flower_offload *f, int flags)
+{
+       struct netlink_ext_ack *extack = f->common.extack;
+       struct rhashtable *tc_ht = get_tc_ht(priv);
+       struct mlx5e_tc_flow *flow;
+       int err = 0;
+
+       flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params);
+       if (flow) {
+               NL_SET_ERR_MSG_MOD(extack,
+                                  "flow cookie already exists, ignoring");
+               netdev_warn_once(priv->netdev,
+                                "flow cookie %lx already exists, ignoring\n",
+                                f->cookie);
+               goto out;
+       }
+
+       err = mlx5e_tc_add_flow(priv, f, flags, &flow);
+       if (err)
+               goto out;
+
+       err = rhashtable_insert_fast(tc_ht, &flow->node, tc_ht_params);
+       if (err)
+               goto err_free;
+
+       return 0;
+
+err_free:
+       mlx5e_tc_del_flow(priv, flow);
        kfree(flow);
+out:
        return err;
 }
 
@@ -2938,7 +3248,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
        if (!(flow->flags & MLX5E_TC_FLOW_OFFLOADED))
                return 0;
 
-       counter = mlx5_flow_rule_counter(flow->rule[0]);
+       counter = mlx5e_tc_get_counter(flow);
        if (!counter)
                return 0;