net/mlx5: Configure cache line size for start and end padding
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index 2b7dd315020cd9e1a21b28643621122695cd06dd..1b7fe43ab22b2767fb226360453abceaf9d38621 100644 (file)
@@ -31,6 +31,7 @@
  */
 
 #include <net/tc_act/tc_gact.h>
+#include <linux/crash_dump.h>
 #include <net/pkt_cls.h>
 #include <linux/mlx5/fs.h>
 #include <net/vxlan.h>
@@ -83,16 +84,20 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
        priv->params.rq_wq_type = rq_type;
        switch (priv->params.rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
-               priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+               priv->params.log_rq_size = is_kdump_kernel() ?
+                       MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
+                       MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
                priv->params.mpwqe_log_stride_sz =
                        MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
-                       MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
-                       MLX5_MPWRQ_LOG_STRIDE_SIZE;
+                       MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(priv->mdev) :
+                       MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(priv->mdev);
                priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
                        priv->params.mpwqe_log_stride_sz;
                break;
        default: /* MLX5_WQ_TYPE_LINKED_LIST */
-               priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+               priv->params.log_rq_size = is_kdump_kernel() ?
+                       MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+                       MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
        }
        priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
                                               BIT(priv->params.log_rq_size));
@@ -268,6 +273,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 
+       if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+               out = pstats->phy_statistical_counters;
+               MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+               mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+       }
+
        MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
                out = pstats->per_prio_counters[prio];
@@ -291,11 +302,34 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
                                      &qcnt->rx_out_of_buffer);
 }
 
+static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
+{
+       struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+       void *out;
+       u32 *in;
+
+       if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
+               return;
+
+       in = mlx5_vzalloc(sz);
+       if (!in)
+               return;
+
+       out = pcie_stats->pcie_perf_counters;
+       MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+
+       kvfree(in);
+}
+
 void mlx5e_update_stats(struct mlx5e_priv *priv)
 {
-       mlx5e_update_q_counter(priv);
-       mlx5e_update_vport_counters(priv);
+       mlx5e_update_pcie_counters(priv);
        mlx5e_update_pport_counters(priv);
+       mlx5e_update_vport_counters(priv);
+       mlx5e_update_q_counter(priv);
        mlx5e_update_sw_counters(priv);
 }
 
@@ -317,6 +351,8 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
                              enum mlx5_dev_event event, unsigned long param)
 {
        struct mlx5e_priv *priv = vpriv;
+       struct ptp_clock_event ptp_event;
+       struct mlx5_eqe *eqe = NULL;
 
        if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
                return;
@@ -326,7 +362,15 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
        case MLX5_DEV_EVENT_PORT_DOWN:
                queue_work(priv->wq, &priv->update_carrier_work);
                break;
-
+       case MLX5_DEV_EVENT_PPS:
+               eqe = (struct mlx5_eqe *)param;
+               ptp_event.type = PTP_CLOCK_EXTTS;
+               ptp_event.index = eqe->data.pps.pin;
+               ptp_event.timestamp =
+                       timecounter_cyc2time(&priv->tstamp.clock,
+                                            be64_to_cpu(eqe->data.pps.time_stamp));
+               mlx5e_pps_event_handler(vpriv, &ptp_event);
+               break;
        default:
                break;
        }
@@ -343,9 +387,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
        synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
 }
 
-#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-
 static inline int mlx5e_get_wqe_mtt_sz(void)
 {
        /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
@@ -372,7 +413,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
        cseg->imm       = rq->mkey_be;
 
        ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
-       ucseg->klm_octowords =
+       ucseg->xlt_octowords =
                cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
        ucseg->bsf_octowords =
                cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
@@ -534,9 +575,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                goto err_rq_wq_destroy;
        }
 
-       rq->buff.map_dir = DMA_FROM_DEVICE;
-       if (rq->xdp_prog)
+       if (rq->xdp_prog) {
                rq->buff.map_dir = DMA_BIDIRECTIONAL;
+               rq->rx_headroom = XDP_PACKET_HEADROOM;
+       } else {
+               rq->buff.map_dir = DMA_FROM_DEVICE;
+               rq->rx_headroom = MLX5_RX_HEADROOM;
+       }
 
        switch (priv->params.rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -586,7 +631,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                byte_count = rq->buff.wqe_sz;
 
                /* calc the required page order */
-               frag_sz = MLX5_RX_HEADROOM +
+               frag_sz = rq->rx_headroom +
                          byte_count /* packet data */ +
                          SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
                frag_sz = SKB_DATA_ALIGN(frag_sz);
@@ -967,7 +1012,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        sq->channel   = c;
        sq->tc        = tc;
 
-       err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
+       err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false);
        if (err)
                return err;
 
@@ -979,12 +1024,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
                goto err_unmap_free_uar;
 
        sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
-       if (sq->uar.bf_map) {
+       if (sq->bfreg.wc)
                set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
-               sq->uar_map = sq->uar.bf_map;
-       } else {
-               sq->uar_map = sq->uar.map;
-       }
+
        sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
        sq->max_inline  = param->max_inline;
        sq->min_inline_mode =
@@ -1012,7 +1054,7 @@ err_sq_wq_destroy:
        mlx5_wq_destroy(&sq->wq_ctrl);
 
 err_unmap_free_uar:
-       mlx5_unmap_free_uar(mdev, &sq->uar);
+       mlx5_free_bfreg(mdev, &sq->bfreg);
 
        return err;
 }
@@ -1024,7 +1066,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
 
        mlx5e_free_sq_db(sq);
        mlx5_wq_destroy(&sq->wq_ctrl);
-       mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+       mlx5_free_bfreg(priv->mdev, &sq->bfreg);
 }
 
 static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
@@ -1058,7 +1100,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
        MLX5_SET(sqc,  sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
 
        MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
-       MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
+       MLX5_SET(wq,   wq, uar_page,      sq->bfreg.index);
        MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
                                          MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
@@ -1216,7 +1258,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
        mcq->comp       = mlx5e_completion_event;
        mcq->event      = mlx5e_cq_error_event;
        mcq->irqn       = irqn;
-       mcq->uar        = &mdev->mlx5e_res.cq_uar;
 
        for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
                struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -1265,7 +1306,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 
        MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
        MLX5_SET(cqc,   cqc, c_eqn,         eqn);
-       MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
+       MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
        MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
                                            MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
@@ -1472,6 +1513,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
        return err;
 }
 
+static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
+{
+       return is_kdump_kernel() ?
+               MLX5E_MIN_NUM_CHANNELS :
+               min_t(int, mdev->priv.eq_table.num_comp_vectors,
+                     MLX5E_MAX_NUM_CHANNELS);
+}
+
 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
                              struct mlx5e_channel_param *cparam,
                              struct mlx5e_channel **cp)
@@ -1677,7 +1726,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
 {
        void *cqc = param->cqc;
 
-       MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
+       MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
 }
 
 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
@@ -2022,8 +2071,23 @@ static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
        MLX5_SET(tirc, tirc, lro_timeout_period_usecs, priv->params.lro_timeout);
 }
 
-void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
+void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_priv *priv, void *tirc,
+                                   enum mlx5e_traffic_types tt)
 {
+       void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_L4_SPORT |\
+                                MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
+                                MLX5_HASH_FIELD_SEL_DST_IP   |\
+                                MLX5_HASH_FIELD_SEL_IPSEC_SPI)
+
        MLX5_SET(tirc, tirc, rx_hash_fn,
                 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
        if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
@@ -2035,6 +2099,88 @@ void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
                MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
                memcpy(rss_key, priv->params.toeplitz_hash_key, len);
        }
+
+       switch (tt) {
+       case MLX5E_TT_IPV4_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV6_TCP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_TCP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV4_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV6_UDP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+                        MLX5_L4_PROT_TYPE_UDP);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_L4PORTS);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_AH:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV4_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV6_IPSEC_ESP:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP_IPSEC_SPI);
+               break;
+
+       case MLX5E_TT_IPV4:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV4);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+
+       case MLX5E_TT_IPV6:
+               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+                        MLX5_L3_PROT_TYPE_IPV6);
+               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+                        MLX5_HASH_IP);
+               break;
+       default:
+               WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
+       }
 }
 
 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
@@ -2296,7 +2442,6 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
        mcq->comp       = mlx5e_completion_event;
        mcq->event      = mlx5e_cq_error_event;
        mcq->irqn       = irqn;
-       mcq->uar        = &mdev->mlx5e_res.cq_uar;
 
        cq->priv = priv;
 
@@ -2404,110 +2549,13 @@ void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
                                      enum mlx5e_traffic_types tt)
 {
-       void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
-
        MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
 
-#define MLX5_HASH_IP            (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP)
-
-#define MLX5_HASH_IP_L4PORTS    (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP   |\
-                                MLX5_HASH_FIELD_SEL_L4_SPORT |\
-                                MLX5_HASH_FIELD_SEL_L4_DPORT)
-
-#define MLX5_HASH_IP_IPSEC_SPI  (MLX5_HASH_FIELD_SEL_SRC_IP   |\
-                                MLX5_HASH_FIELD_SEL_DST_IP   |\
-                                MLX5_HASH_FIELD_SEL_IPSEC_SPI)
-
        mlx5e_build_tir_ctx_lro(tirc, priv);
 
        MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
        MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
-       mlx5e_build_tir_ctx_hash(tirc, priv);
-
-       switch (tt) {
-       case MLX5E_TT_IPV4_TCP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_TCP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV6_TCP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_TCP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV4_UDP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_UDP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV6_UDP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
-                        MLX5_L4_PROT_TYPE_UDP);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_L4PORTS);
-               break;
-
-       case MLX5E_TT_IPV4_IPSEC_AH:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV6_IPSEC_AH:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV4_IPSEC_ESP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV6_IPSEC_ESP:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP_IPSEC_SPI);
-               break;
-
-       case MLX5E_TT_IPV4:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV4);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP);
-               break;
-
-       case MLX5E_TT_IPV6:
-               MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
-                        MLX5_L3_PROT_TYPE_IPV6);
-               MLX5_SET(rx_hash_field_select, hfso, selected_fields,
-                        MLX5_HASH_IP);
-               break;
-       default:
-               WARN_ONCE(true,
-                         "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
-       }
+       mlx5e_build_indir_tir_ctx_hash(priv, tirc, tt);
 }
 
 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
@@ -2686,7 +2734,7 @@ mqprio:
        return mlx5e_setup_tc(dev, tc->tc);
 }
 
-static struct rtnl_link_stats64 *
+static void
 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
@@ -2729,7 +2777,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
        stats->multicast =
                VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
 
-       return stats;
 }
 
 static void mlx5e_set_rx_mode(struct net_device *dev)
@@ -2987,11 +3034,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct mlx5_core_dev *mdev = priv->mdev;
 
-       if (min_tx_rate)
-               return -EOPNOTSUPP;
-
        return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
-                                          max_tx_rate);
+                                          max_tx_rate, min_tx_rate);
 }
 
 static int mlx5_vport_link2ifla(u8 esw_link)
@@ -3159,11 +3203,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
        bool reset, was_opened;
        int i;
 
-       if (prog && prog->xdp_adjust_head) {
-               netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n");
-               return -EOPNOTSUPP;
-       }
-
        mutex_lock(&priv->state_lock);
 
        if ((netdev->features & NETIF_F_LRO) && prog) {
@@ -3331,7 +3370,7 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = {
 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
 {
        if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
            !MLX5_CAP_GEN(mdev, nic_flow_table) ||
            !MLX5_CAP_ETH(mdev, csum_cap) ||
@@ -3343,7 +3382,7 @@ static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
                               < 3) {
                mlx5_core_warn(mdev,
                               "Not creating net device, some required device capabilities are missing\n");
-               return -ENOTSUPP;
+               return -EOPNOTSUPP;
        }
        if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
                mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
@@ -3432,22 +3471,6 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
                        MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
 }
 
-static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
-                                  u8 *min_inline_mode)
-{
-       switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
-       case MLX5_CAP_INLINE_MODE_L2:
-               *min_inline_mode = MLX5_INLINE_MODE_L2;
-               break;
-       case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
-               mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
-               break;
-       case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
-               *min_inline_mode = MLX5_INLINE_MODE_NONE;
-               break;
-       }
-}
-
 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
 {
        int i;
@@ -3481,7 +3504,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
        priv->params.lro_timeout =
                mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
 
-       priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+       priv->params.log_sq_size = is_kdump_kernel() ?
+               MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
+               MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
 
        /* set CQE compression */
        priv->params.rx_cqe_compress_def = false;
@@ -3507,7 +3532,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
        priv->params.tx_cq_moderation.pkts =
                MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
        priv->params.tx_max_inline         = mlx5e_get_max_inline_cap(mdev);
-       mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
+       mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
        priv->params.num_tc                = 1;
        priv->params.rss_hfunc             = ETH_RSS_HASH_XOR;