net/mlx5: Add support for setting VF min rate
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index f74ba73c55c750be02de2c5ff8065cde118e60fa..c819d07fbdb381c669ca39f0729b2589a404c24a 100644 (file)
@@ -268,6 +268,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
        MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
        mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
 
+       if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+               out = pstats->phy_statistical_counters;
+               MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+               mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+       }
+
        MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
        for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
                out = pstats->per_prio_counters[prio];
@@ -291,11 +297,34 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
                                      &qcnt->rx_out_of_buffer);
 }
 
+static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
+{
+       struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+       struct mlx5_core_dev *mdev = priv->mdev;
+       int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+       void *out;
+       u32 *in;
+
+       if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
+               return;
+
+       in = mlx5_vzalloc(sz);
+       if (!in)
+               return;
+
+       out = pcie_stats->pcie_perf_counters;
+       MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+       mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+
+       kvfree(in);
+}
+
 void mlx5e_update_stats(struct mlx5e_priv *priv)
 {
-       mlx5e_update_q_counter(priv);
-       mlx5e_update_vport_counters(priv);
+       mlx5e_update_pcie_counters(priv);
        mlx5e_update_pport_counters(priv);
+       mlx5e_update_vport_counters(priv);
+       mlx5e_update_q_counter(priv);
        mlx5e_update_sw_counters(priv);
 }
 
@@ -317,6 +346,8 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
                              enum mlx5_dev_event event, unsigned long param)
 {
        struct mlx5e_priv *priv = vpriv;
+       struct ptp_clock_event ptp_event;
+       struct mlx5_eqe *eqe = NULL;
 
        if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
                return;
@@ -326,7 +357,15 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
        case MLX5_DEV_EVENT_PORT_DOWN:
                queue_work(priv->wq, &priv->update_carrier_work);
                break;
-
+       case MLX5_DEV_EVENT_PPS:
+               eqe = (struct mlx5_eqe *)param;
+               ptp_event.type = PTP_CLOCK_EXTTS;
+               ptp_event.index = eqe->data.pps.pin;
+               ptp_event.timestamp =
+                       timecounter_cyc2time(&priv->tstamp.clock,
+                                            be64_to_cpu(eqe->data.pps.time_stamp));
+               mlx5e_pps_event_handler(vpriv, &ptp_event);
+               break;
        default:
                break;
        }
@@ -343,9 +382,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
        synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
 }
 
-#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-
 static inline int mlx5e_get_wqe_mtt_sz(void)
 {
        /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
@@ -534,9 +570,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                goto err_rq_wq_destroy;
        }
 
-       rq->buff.map_dir = DMA_FROM_DEVICE;
-       if (rq->xdp_prog)
+       if (rq->xdp_prog) {
                rq->buff.map_dir = DMA_BIDIRECTIONAL;
+               rq->rx_headroom = XDP_PACKET_HEADROOM;
+       } else {
+               rq->buff.map_dir = DMA_FROM_DEVICE;
+               rq->rx_headroom = MLX5_RX_HEADROOM;
+       }
 
        switch (priv->params.rq_wq_type) {
        case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -586,7 +626,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
                byte_count = rq->buff.wqe_sz;
 
                /* calc the required page order */
-               frag_sz = MLX5_RX_HEADROOM +
+               frag_sz = rq->rx_headroom +
                          byte_count /* packet data */ +
                          SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
                frag_sz = SKB_DATA_ALIGN(frag_sz);
@@ -2981,11 +3021,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
        struct mlx5e_priv *priv = netdev_priv(dev);
        struct mlx5_core_dev *mdev = priv->mdev;
 
-       if (min_tx_rate)
-               return -EOPNOTSUPP;
-
        return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
-                                          max_tx_rate);
+                                          max_tx_rate, min_tx_rate);
 }
 
 static int mlx5_vport_link2ifla(u8 esw_link)
@@ -3153,11 +3190,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
        bool reset, was_opened;
        int i;
 
-       if (prog && prog->xdp_adjust_head) {
-               netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n");
-               return -EOPNOTSUPP;
-       }
-
        mutex_lock(&priv->state_lock);
 
        if ((netdev->features & NETIF_F_LRO) && prog) {