net/mlx5: Add support for setting VF min rate
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index 2b7dd315020cd9e1a21b28643621122695cd06dd..c819d07fbdb381c669ca39f0729b2589a404c24a 100644 (file)
@@ -268,6 +268,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 
+       if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+               out = pstats->phy_statistical_counters;
+               MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+               mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+       }
+
        MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
                out = pstats->per_prio_counters[prio];
@@ -291,11 +297,34 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
                                      &qcnt->rx_out_of_buffer);
 }
 
+static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
+{
+       struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+       void *out;
+       u32 *in;
+
+       if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
+               return;
+
+       in = mlx5_vzalloc(sz);
+       if (!in)
+               return;
+
+       out = pcie_stats->pcie_perf_counters;
+       MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+
+       kvfree(in);
+}
+
 void mlx5e_update_stats(struct mlx5e_priv *priv)
 {
-       mlx5e_update_q_counter(priv);
-       mlx5e_update_vport_counters(priv);
+       mlx5e_update_pcie_counters(priv);
        mlx5e_update_pport_counters(priv);
+       mlx5e_update_vport_counters(priv);
+       mlx5e_update_q_counter(priv);
        mlx5e_update_sw_counters(priv);
 }
 
@@ -317,6 +346,8 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
                              enum mlx5_dev_event event, unsigned long param)
 {
        struct mlx5e_priv *priv = vpriv;
+       struct ptp_clock_event ptp_event;
+       struct mlx5_eqe *eqe = NULL;
 
        if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
                return;
@@ -326,7 +357,15 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
        case MLX5_DEV_EVENT_PORT_DOWN:
                queue_work(priv->wq, &priv->update_carrier_work);
                break;
-
+       case MLX5_DEV_EVENT_PPS:
+               eqe = (struct mlx5_eqe *)param;
+               ptp_event.type = PTP_CLOCK_EXTTS;
+               ptp_event.index = eqe->data.pps.pin;
+               ptp_event.timestamp =
+                       timecounter_cyc2time(&priv->tstamp.clock,
+                                            be64_to_cpu(eqe->data.pps.time_stamp));
+               mlx5e_pps_event_handler(vpriv, &ptp_event);
+               break;
        default:
                break;
        }
@@ -343,9 +382,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
        synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
 }
 
-#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-
 static inline int mlx5e_get_wqe_mtt_sz(void)
 {
        /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
@@ -372,7 +408,7 @@ static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, struct mlx5e_sq *sq,
        cseg->imm       = rq->mkey_be;
 
        ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
-       ucseg->klm_octowords =
+       ucseg->xlt_octowords =
                cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
        ucseg->bsf_octowords =
                cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
@@ -534,9 +570,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                goto err_rq_wq_destroy;
        }
 
-       rq->buff.map_dir = DMA_FROM_DEVICE;
-       if (rq->xdp_prog)
+       if (rq->xdp_prog) {
                rq->buff.map_dir = DMA_BIDIRECTIONAL;
+               rq->rx_headroom = XDP_PACKET_HEADROOM;
+       } else {
+               rq->buff.map_dir = DMA_FROM_DEVICE;
+               rq->rx_headroom = MLX5_RX_HEADROOM;
+       }
 
        switch (priv->params.rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -586,7 +626,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                byte_count = rq->buff.wqe_sz;
 
                /* calc the required page order */
-               frag_sz = MLX5_RX_HEADROOM +
+               frag_sz = rq->rx_headroom +
                          byte_count /* packet data */ +
                          SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
                frag_sz = SKB_DATA_ALIGN(frag_sz);
@@ -967,7 +1007,7 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
        sq->channel   = c;
        sq->tc        = tc;
 
-       err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
+       err = mlx5_alloc_bfreg(mdev, &sq->bfreg, MLX5_CAP_GEN(mdev, bf), false);
        if (err)
                return err;
 
@@ -979,12 +1019,9 @@ static int mlx5e_create_sq(struct mlx5e_channel *c,
                goto err_unmap_free_uar;
 
        sq->wq.db       = &sq->wq.db[MLX5_SND_DBR];
-       if (sq->uar.bf_map) {
+       if (sq->bfreg.wc)
                set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
-               sq->uar_map = sq->uar.bf_map;
-       } else {
-               sq->uar_map = sq->uar.map;
-       }
+
        sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
        sq->max_inline  = param->max_inline;
        sq->min_inline_mode =
@@ -1012,7 +1049,7 @@ err_sq_wq_destroy:
        mlx5_wq_destroy(&sq->wq_ctrl);
 
 err_unmap_free_uar:
-       mlx5_unmap_free_uar(mdev, &sq->uar);
+       mlx5_free_bfreg(mdev, &sq->bfreg);
 
        return err;
 }
@@ -1024,7 +1061,7 @@ static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
 
        mlx5e_free_sq_db(sq);
        mlx5_wq_destroy(&sq->wq_ctrl);
-       mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+       mlx5_free_bfreg(priv->mdev, &sq->bfreg);
 }
 
 static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
@@ -1058,7 +1095,7 @@ static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
        MLX5_SET(sqc,  sqc, tis_lst_sz, param->type == MLX5E_SQ_ICO ? 0 : 1);
 
        MLX5_SET(wq,   wq, wq_type,       MLX5_WQ_TYPE_CYCLIC);
-       MLX5_SET(wq,   wq, uar_page,      sq->uar.index);
+       MLX5_SET(wq,   wq, uar_page,      sq->bfreg.index);
        MLX5_SET(wq,   wq, log_wq_pg_sz,  sq->wq_ctrl.buf.page_shift -
                                          MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq, dbr_addr,      sq->wq_ctrl.db.dma);
@@ -1216,7 +1253,6 @@ static int mlx5e_create_cq(struct mlx5e_channel *c,
        mcq->comp       = mlx5e_completion_event;
        mcq->event      = mlx5e_cq_error_event;
        mcq->irqn       = irqn;
-       mcq->uar        = &mdev->mlx5e_res.cq_uar;
 
        for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
                struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
@@ -1265,7 +1301,7 @@ static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 
        MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
        MLX5_SET(cqc,   cqc, c_eqn,         eqn);
-       MLX5_SET(cqc,   cqc, uar_page,      mcq->uar->index);
+       MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
        MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
                                            MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
@@ -1677,7 +1713,7 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
 {
        void *cqc = param->cqc;
 
-       MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
+       MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
 }
 
 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
@@ -2296,7 +2332,6 @@ static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
        mcq->comp       = mlx5e_completion_event;
        mcq->event      = mlx5e_cq_error_event;
        mcq->irqn       = irqn;
-       mcq->uar        = &mdev->mlx5e_res.cq_uar;
 
        cq->priv = priv;
 
@@ -2686,7 +2721,7 @@ mqprio:
        return mlx5e_setup_tc(dev, tc->tc);
 }
 
-static struct rtnl_link_stats64 *
+static void
 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
        struct mlx5e_priv *priv = netdev_priv(dev);
@@ -2729,7 +2764,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
        stats->multicast =
                VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
 
-       return stats;
 }
 
 static void mlx5e_set_rx_mode(struct net_device *dev)
@@ -2987,11 +3021,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct mlx5_core_dev *mdev = priv->mdev;
 
-       if (min_tx_rate)
-               return -EOPNOTSUPP;
-
        return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
-                                          max_tx_rate);
+                                          max_tx_rate, min_tx_rate);
 }
 
 static int mlx5_vport_link2ifla(u8 esw_link)
@@ -3159,11 +3190,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
        bool reset, was_opened;
        int i;
 
-       if (prog && prog->xdp_adjust_head) {
-               netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n");
-               return -EOPNOTSUPP;
-       }
-
        mutex_lock(&priv->state_lock);
 
        if ((netdev->features & NETIF_F_LRO) && prog) {