mlx5: implement ethtool::get_fec_stats
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_tc.c
index df2a0af854bbf6c7789585d090589c64d31bc6f6..d157d1b9cad645a082acf232646c20cf44670899 100644 (file)
@@ -47,6 +47,7 @@
 #include <net/tc_act/tc_pedit.h>
 #include <net/tc_act/tc_csum.h>
 #include <net/tc_act/tc_mpls.h>
+#include <net/psample.h>
 #include <net/arp.h>
 #include <net/ipv6_stubs.h>
 #include <net/bareudp.h>
@@ -65,6 +66,7 @@
 #include "en/mod_hdr.h"
 #include "en/tc_priv.h"
 #include "en/tc_tun_encap.h"
+#include "esw/sample.h"
 #include "lib/devcom.h"
 #include "lib/geneve.h"
 #include "lib/fs_chains.h"
@@ -221,6 +223,25 @@ get_ct_priv(struct mlx5e_priv *priv)
        return priv->fs.tc.ct;
 }
 
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+static struct mlx5_esw_psample *
+get_sample_priv(struct mlx5e_priv *priv)
+{
+       struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+       struct mlx5_rep_uplink_priv *uplink_priv;
+       struct mlx5e_rep_priv *uplink_rpriv;
+
+       if (is_mdev_switchdev_mode(priv->mdev)) {
+               uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH);
+               uplink_priv = &uplink_rpriv->uplink_priv;
+
+               return uplink_priv->esw_psample;
+       }
+
+       return NULL;
+}
+#endif
+
 struct mlx5_flow_handle *
 mlx5_tc_rule_insert(struct mlx5e_priv *priv,
                    struct mlx5_flow_spec *spec,
@@ -445,11 +466,15 @@ static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp)
        mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn);
 }
 
-static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
+static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
 {
-       u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE], rqn;
        struct mlx5e_priv *priv = hp->func_priv;
        int i, ix, sz = MLX5E_INDIR_RQT_SIZE;
+       u32 *indirection_rqt, rqn;
+
+       indirection_rqt = kcalloc(sz, sizeof(*indirection_rqt), GFP_KERNEL);
+       if (!indirection_rqt)
+               return -ENOMEM;
 
        mlx5e_build_default_indir_rqt(indirection_rqt, sz,
                                      hp->num_channels);
@@ -462,6 +487,9 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc)
                rqn = hp->pair->rqn[ix];
                MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
        }
+
+       kfree(indirection_rqt);
+       return 0;
 }
 
 static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
@@ -482,12 +510,15 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
        MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
        MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
 
-       mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+       err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc);
+       if (err)
+               goto out;
 
        err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn);
        if (!err)
                hp->indir_rqt.enabled = true;
 
+out:
        kvfree(in);
        return err;
 }
@@ -896,7 +927,7 @@ mlx5e_add_offloaded_nic_rule(struct mlx5e_priv *priv,
                        if (IS_ERR(dest[dest_ix].ft))
                                return ERR_CAST(dest[dest_ix].ft);
                } else {
-                       dest[dest_ix].ft = priv->fs.vlan.ft.t;
+                       dest[dest_ix].ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
                }
                dest_ix++;
        }
@@ -1077,19 +1108,27 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw,
        if (flow_flag_test(flow, CT)) {
                mod_hdr_acts = &attr->parse_attr->mod_hdr_acts;
 
-               return mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
+               rule = mlx5_tc_ct_flow_offload(get_ct_priv(flow->priv),
                                               flow, spec, attr,
                                               mod_hdr_acts);
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+       } else if (flow_flag_test(flow, SAMPLE)) {
+               rule = mlx5_esw_sample_offload(get_sample_priv(flow->priv), spec, attr);
+#endif
+       } else {
+               rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
        }
 
-       rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr);
        if (IS_ERR(rule))
                return rule;
 
        if (attr->esw_attr->split_count) {
                flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr);
                if (IS_ERR(flow->rule[1])) {
-                       mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
+                       if (flow_flag_test(flow, CT))
+                               mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr);
+                       else
+                               mlx5_eswitch_del_offloaded_rule(esw, rule, attr);
                        return flow->rule[1];
                }
        }
@@ -1111,6 +1150,13 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw,
                return;
        }
 
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+       if (flow_flag_test(flow, SAMPLE)) {
+               mlx5_esw_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr);
+               return;
+       }
+#endif
+
        if (attr->esw_attr->split_count)
                mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr);
 
@@ -1467,6 +1513,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
        if (flow_flag_test(flow, L3_TO_L2_DECAP))
                mlx5e_detach_decap(priv, flow);
 
+       kfree(flow->attr->esw_attr->sample);
        kfree(flow->attr);
 }
 
@@ -1947,6 +1994,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                                    misc_parameters);
        void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
                                    misc_parameters);
+       void *misc_c_3 = MLX5_ADDR_OF(fte_match_param, spec->match_criteria,
+                                   misc_parameters_3);
+       void *misc_v_3 = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+                                   misc_parameters_3);
        struct flow_rule *rule = flow_cls_offload_flow_rule(f);
        struct flow_dissector *dissector = rule->match.dissector;
        u16 addr_type = 0;
@@ -1976,6 +2027,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
              BIT(FLOW_DISSECTOR_KEY_CT) |
              BIT(FLOW_DISSECTOR_KEY_ENC_IP) |
              BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) |
+             BIT(FLOW_DISSECTOR_KEY_ICMP) |
              BIT(FLOW_DISSECTOR_KEY_MPLS))) {
                NL_SET_ERR_MSG_MOD(extack, "Unsupported key");
                netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n",
@@ -2295,7 +2347,49 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
                if (match.mask->flags)
                        *match_level = MLX5_MATCH_L4;
        }
+       if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
+               struct flow_match_icmp match;
 
+               flow_rule_match_icmp(rule, &match);
+               switch (ip_proto) {
+               case IPPROTO_ICMP:
+                       if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
+                             MLX5_FLEX_PROTO_ICMP))
+                               return -EOPNOTSUPP;
+                       MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_type,
+                                match.mask->type);
+                       MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_type,
+                                match.key->type);
+                       MLX5_SET(fte_match_set_misc3, misc_c_3, icmp_code,
+                                match.mask->code);
+                       MLX5_SET(fte_match_set_misc3, misc_v_3, icmp_code,
+                                match.key->code);
+                       break;
+               case IPPROTO_ICMPV6:
+                       if (!(MLX5_CAP_GEN(priv->mdev, flex_parser_protocols) &
+                             MLX5_FLEX_PROTO_ICMPV6))
+                               return -EOPNOTSUPP;
+                       MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_type,
+                                match.mask->type);
+                       MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_type,
+                                match.key->type);
+                       MLX5_SET(fte_match_set_misc3, misc_c_3, icmpv6_code,
+                                match.mask->code);
+                       MLX5_SET(fte_match_set_misc3, misc_v_3, icmpv6_code,
+                                match.key->code);
+                       break;
+               default:
+                       NL_SET_ERR_MSG_MOD(extack,
+                                          "Code and type matching only with ICMP and ICMPv6");
+                       netdev_err(priv->netdev,
+                                  "Code and type matching only with ICMP and ICMPv6\n");
+                       return -EINVAL;
+               }
+               if (match.mask->code || match.mask->type) {
+                       *match_level = MLX5_MATCH_L4;
+                       spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3;
+               }
+       }
        /* Currenlty supported only for MPLS over UDP */
        if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) &&
            !netif_is_bareudp(filter_dev)) {
@@ -3011,7 +3105,8 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
        actions = flow->attr->action;
 
        if (mlx5e_is_eswitch_flow(flow)) {
-               if (flow->attr->esw_attr->split_count && ct_flow) {
+               if (flow->attr->esw_attr->split_count && ct_flow &&
+                   !MLX5_CAP_GEN(flow->attr->esw_attr->in_mdev, reg_c_preserve)) {
                        /* All registers used by ct are cleared when using
                         * split rules.
                         */
@@ -3049,6 +3144,13 @@ static bool same_hw_devs(struct mlx5e_priv *priv, struct mlx5e_priv *peer_priv)
        return (fsystem_guid == psystem_guid);
 }
 
+static bool same_vf_reps(struct mlx5e_priv *priv,
+                        struct net_device *out_dev)
+{
+       return mlx5e_eswitch_vf_rep(priv->netdev) &&
+              priv->netdev == out_dev;
+}
+
 static int add_vlan_rewrite_action(struct mlx5e_priv *priv, int namespace,
                                   const struct flow_action_entry *act,
                                   struct mlx5e_tc_flow_parse_attr *parse_attr,
@@ -3558,6 +3660,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
        bool ft_flow = mlx5e_is_ft_flow(flow);
        const struct flow_action_entry *act;
        struct mlx5_esw_flow_attr *esw_attr;
+       struct mlx5_sample_attr sample = {};
        bool encap = false, decap = false;
        u32 action = attr->action;
        int err, i, if_count = 0;
@@ -3734,6 +3837,12 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                                        return -EOPNOTSUPP;
                                }
 
+                               if (same_vf_reps(priv, out_dev)) {
+                                       NL_SET_ERR_MSG_MOD(extack,
+                                                          "can't forward from a VF to itself");
+                                       return -EOPNOTSUPP;
+                               }
+
                                out_priv = netdev_priv(out_dev);
                                rpriv = out_priv->ppriv;
                                esw_attr->dests[esw_attr->out_count].rep = rpriv->rep;
@@ -3806,11 +3915,27 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                        attr->dest_chain = act->chain_index;
                        break;
                case FLOW_ACTION_CT:
+                       if (flow_flag_test(flow, SAMPLE)) {
+                               NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
+                               return -EOPNOTSUPP;
+                       }
                        err = mlx5_tc_ct_parse_action(get_ct_priv(priv), attr, act, extack);
                        if (err)
                                return err;
 
                        flow_flag_set(flow, CT);
+                       esw_attr->split_count = esw_attr->out_count;
+                       break;
+               case FLOW_ACTION_SAMPLE:
+                       if (flow_flag_test(flow, CT)) {
+                               NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported");
+                               return -EOPNOTSUPP;
+                       }
+                       sample.rate = act->sample.rate;
+                       sample.group_num = act->sample.psample_group->group_num;
+                       if (act->sample.truncate)
+                               sample.trunc_size = act->sample.trunc_size;
+                       flow_flag_set(flow, SAMPLE);
                        break;
                default:
                        NL_SET_ERR_MSG_MOD(extack, "The offload action is not supported");
@@ -3873,11 +3998,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                        return -EOPNOTSUPP;
                }
 
-               if (attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) {
-                       NL_SET_ERR_MSG_MOD(extack,
-                                          "Mirroring goto chain rules isn't supported");
-                       return -EOPNOTSUPP;
-               }
                attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
        }
 
@@ -3895,6 +4015,16 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
                return -EOPNOTSUPP;
        }
 
+       /* Allocate sample attribute only when there is a sample action and
+        * no errors after parsing.
+        */
+       if (flow_flag_test(flow, SAMPLE)) {
+               esw_attr->sample = kzalloc(sizeof(*esw_attr->sample), GFP_KERNEL);
+               if (!esw_attr->sample)
+                       return -ENOMEM;
+               *esw_attr->sample = sample;
+       }
+
        return 0;
 }
 
@@ -4297,6 +4427,11 @@ int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv,
        struct mlx5e_tc_flow *flow;
        int err = 0;
 
+       if (!mlx5_esw_hold(priv->mdev))
+               return -EAGAIN;
+
+       mlx5_esw_get(priv->mdev);
+
        rcu_read_lock();
        flow = rhashtable_lookup(tc_ht, &f->cookie, tc_ht_params);
        if (flow) {
@@ -4334,11 +4469,14 @@ rcu_unlock:
        if (err)
                goto err_free;
 
+       mlx5_esw_release(priv->mdev);
        return 0;
 
 err_free:
        mlx5e_flow_put(priv, flow);
 out:
+       mlx5_esw_put(priv->mdev);
+       mlx5_esw_release(priv->mdev);
        return err;
 }
 
@@ -4378,6 +4516,7 @@ int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv,
        trace_mlx5e_delete_flower(f);
        mlx5e_flow_put(priv, flow);
 
+       mlx5_esw_put(priv->mdev);
        return 0;
 
 errout:
@@ -4513,6 +4652,10 @@ static int scan_tc_matchall_fdb_actions(struct mlx5e_priv *priv,
        flow_action_for_each(i, act, flow_action) {
                switch (act->id) {
                case FLOW_ACTION_POLICE:
+                       if (act->police.rate_pkt_ps) {
+                               NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
+                               return -EOPNOTSUPP;
+                       }
                        err = apply_police_params(priv, act->police.rate_bytes_ps, extack);
                        if (err)
                                return err;
@@ -4647,6 +4790,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
 {
        struct mlx5e_tc_table *tc = &priv->fs.tc;
        struct mlx5_core_dev *dev = priv->mdev;
+       struct mapping_ctx *chains_mapping;
        struct mlx5_chains_attr attr = {};
        int err;
 
@@ -4661,15 +4805,22 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
 
        lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key);
 
-       if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) {
+       chains_mapping = mapping_create(sizeof(struct mlx5_mapped_obj),
+                                       MLX5E_TC_TABLE_CHAIN_TAG_MASK, true);
+       if (IS_ERR(chains_mapping)) {
+               err = PTR_ERR(chains_mapping);
+               goto err_mapping;
+       }
+       tc->mapping = chains_mapping;
+
+       if (MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level))
                attr.flags = MLX5_CHAINS_AND_PRIOS_SUPPORTED |
                        MLX5_CHAINS_IGNORE_FLOW_LEVEL_SUPPORTED;
-               attr.max_restore_tag = MLX5E_TC_TABLE_CHAIN_TAG_MASK;
-       }
        attr.ns = MLX5_FLOW_NAMESPACE_KERNEL;
        attr.max_ft_sz = mlx5e_tc_nic_get_ft_size(dev);
        attr.max_grp_num = MLX5E_TC_TABLE_NUM_GROUPS;
-       attr.default_ft = priv->fs.vlan.ft.t;
+       attr.default_ft = mlx5e_vlan_get_flowtable(priv->fs.vlan);
+       attr.mapping = chains_mapping;
 
        tc->chains = mlx5_chains_create(dev, &attr);
        if (IS_ERR(tc->chains)) {
@@ -4679,10 +4830,6 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
 
        tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr,
                                 MLX5_FLOW_NAMESPACE_KERNEL);
-       if (IS_ERR(tc->ct)) {
-               err = PTR_ERR(tc->ct);
-               goto err_ct;
-       }
 
        tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
        err = register_netdevice_notifier_dev_net(priv->netdev,
@@ -4698,9 +4845,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
 
 err_reg:
        mlx5_tc_ct_clean(tc->ct);
-err_ct:
        mlx5_chains_destroy(tc->chains);
 err_chains:
+       mapping_destroy(chains_mapping);
+err_mapping:
        rhashtable_destroy(&tc->ht);
        return err;
 }
@@ -4735,6 +4883,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
        mutex_destroy(&tc->t_lock);
 
        mlx5_tc_ct_clean(tc->ct);
+       mapping_destroy(tc->mapping);
        mlx5_chains_destroy(tc->chains);
 }
 
@@ -4757,8 +4906,10 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht)
                                               esw_chains(esw),
                                               &esw->offloads.mod_hdr,
                                               MLX5_FLOW_NAMESPACE_FDB);
-       if (IS_ERR(uplink_priv->ct_priv))
-               goto err_ct;
+
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+       uplink_priv->esw_psample = mlx5_esw_sample_init(netdev_priv(priv->netdev));
+#endif
 
        mapping = mapping_create(sizeof(struct tunnel_match_key),
                                 TUNNEL_INFO_BITS_MASK, true);
@@ -4797,8 +4948,10 @@ err_ht_init:
 err_enc_opts_mapping:
        mapping_destroy(uplink_priv->tunnel_mapping);
 err_tun_mapping:
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+       mlx5_esw_sample_cleanup(uplink_priv->esw_psample);
+#endif
        mlx5_tc_ct_clean(uplink_priv->ct_priv);
-err_ct:
        netdev_warn(priv->netdev,
                    "Failed to initialize tc (eswitch), err: %d", err);
        return err;
@@ -4816,6 +4969,9 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht)
        mapping_destroy(uplink_priv->tunnel_enc_opts_mapping);
        mapping_destroy(uplink_priv->tunnel_mapping);
 
+#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE)
+       mlx5_esw_sample_cleanup(uplink_priv->esw_psample);
+#endif
        mlx5_tc_ct_clean(uplink_priv->ct_priv);
 }
 
@@ -4871,9 +5027,17 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
 int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
                            void *cb_priv)
 {
-       unsigned long flags = MLX5_TC_FLAG(INGRESS) | MLX5_TC_FLAG(NIC_OFFLOAD);
+       unsigned long flags = MLX5_TC_FLAG(INGRESS);
        struct mlx5e_priv *priv = cb_priv;
 
+       if (!priv->netdev || !netif_device_present(priv->netdev))
+               return -EOPNOTSUPP;
+
+       if (mlx5e_is_uplink_rep(priv))
+               flags |= MLX5_TC_FLAG(ESW_OFFLOAD);
+       else
+               flags |= MLX5_TC_FLAG(NIC_OFFLOAD);
+
        switch (type) {
        case TC_SETUP_CLSFLOWER:
                return mlx5e_setup_tc_cls_flower(priv, type_data, flags);
@@ -4889,6 +5053,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
        u32 chain = 0, chain_tag, reg_b, zone_restore_id;
        struct mlx5e_priv *priv = netdev_priv(skb->dev);
        struct mlx5e_tc_table *tc = &priv->fs.tc;
+       struct mlx5_mapped_obj mapped_obj;
        struct tc_skb_ext *tc_skb_ext;
        int err;
 
@@ -4896,7 +5061,7 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
 
        chain_tag = reg_b & MLX5E_TC_TABLE_CHAIN_TAG_MASK;
 
-       err = mlx5_get_chain_for_tag(nic_chains(priv), chain_tag, &chain);
+       err = mapping_find(tc->mapping, chain_tag, &mapped_obj);
        if (err) {
                netdev_dbg(priv->netdev,
                           "Couldn't find chain for chain tag: %d, err: %d\n",
@@ -4904,7 +5069,8 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
                return false;
        }
 
-       if (chain) {
+       if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) {
+               chain = mapped_obj.chain;
                tc_skb_ext = skb_ext_add(skb, TC_SKB_EXT);
                if (WARN_ON(!tc_skb_ext))
                        return false;
@@ -4917,6 +5083,9 @@ bool mlx5e_tc_update_skb(struct mlx5_cqe64 *cqe,
                if (!mlx5e_tc_ct_restore_flow(tc->ct, skb,
                                              zone_restore_id))
                        return false;
+       } else {
+               netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type);
+               return false;
        }
 #endif /* CONFIG_NET_TC_SKB_EXT */