net/mlx5e: Introducing new statistics rwlock
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index 0f2b66b41899a21380f3ea478a9b53bedaea5f91..0e9c64580abb9bed2a0460458a66bccf4e12d995 100644 (file)
@@ -46,6 +46,7 @@
 #include "accel/ipsec.h"
 #include "accel/tls.h"
 #include "vxlan.h"
+#include "en/port.h"
 
 struct mlx5e_rq_param {
        u32                     rqc[MLX5_ST_SZ_DW(rqc)];
@@ -645,8 +646,8 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
                                                MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
 
-       mlx5_fill_page_array(&rq->wq_ctrl.buf,
-                            (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+       mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
+                                 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
 
        err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
 
@@ -747,23 +748,24 @@ static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
        mlx5_core_destroy_rq(rq->mdev, rq->rqn);
 }
 
-static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq, int wait_time)
 {
-       unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
+       unsigned long exp_time = jiffies + msecs_to_jiffies(wait_time);
        struct mlx5e_channel *c = rq->channel;
 
        struct mlx5_wq_ll *wq = &rq->wq;
        u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
 
-       while (time_before(jiffies, exp_time)) {
+       do {
                if (wq->cur_sz >= min_wqes)
                        return 0;
 
                msleep(20);
-       }
+       } while (time_before(jiffies, exp_time));
+
+       netdev_warn(c->netdev, "Failed to get min RX wqes on Channel[%d] RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
+                   c->ix, rq->rqn, wq->cur_sz, min_wqes);
 
-       netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
-                   rq->rqn, wq->cur_sz, min_wqes);
        return -ETIMEDOUT;
 }
 
@@ -819,7 +821,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
                goto err_destroy_rq;
 
        if (params->rx_dim_enabled)
-               c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
+               __set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
 
        return 0;
 
@@ -834,13 +836,15 @@ err_free_rq:
 static void mlx5e_activate_rq(struct mlx5e_rq *rq)
 {
        struct mlx5e_icosq *sq = &rq->channel->icosq;
-       u16 pi = sq->pc & sq->wq.sz_m1;
+       struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5e_tx_wqe *nopwqe;
 
+       u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
        set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
        sq->db.ico_wqe[pi].opcode     = MLX5_OPCODE_NOP;
-       nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
-       mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
+       nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+       mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
 }
 
 static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -883,6 +887,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
 {
        void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
        struct mlx5_core_dev *mdev = c->mdev;
+       struct mlx5_wq_cyc *wq = &sq->wq;
        int err;
 
        sq->pdev      = c->pdev;
@@ -892,10 +897,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
        sq->min_inline_mode = params->tx_min_inline_mode;
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
-       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
        if (err)
                return err;
-       sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+       wq->db = &wq->db[MLX5_SND_DBR];
 
        err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
        if (err)
@@ -938,23 +943,22 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
 {
        void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
        struct mlx5_core_dev *mdev = c->mdev;
+       struct mlx5_wq_cyc *wq = &sq->wq;
        int err;
 
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
-       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
        if (err)
                return err;
-       sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+       wq->db = &wq->db[MLX5_SND_DBR];
 
        err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
-       sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
-
        return 0;
 
 err_sq_wq_destroy:
@@ -1003,6 +1007,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
 {
        void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
        struct mlx5_core_dev *mdev = c->mdev;
+       struct mlx5_wq_cyc *wq = &sq->wq;
        int err;
 
        sq->pdev      = c->pdev;
@@ -1020,10 +1025,10 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
                set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
-       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
        if (err)
                return err;
-       sq->wq.db    = &sq->wq.db[MLX5_SND_DBR];
+       wq->db    = &wq->db[MLX5_SND_DBR];
 
        err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
        if (err)
@@ -1032,8 +1037,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
        sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
 
-       sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
-
        return 0;
 
 err_sq_wq_destroy:
@@ -1093,7 +1096,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
                                          MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq, dbr_addr,      csp->wq_ctrl->db.dma);
 
-       mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+       mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
+                                 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
 
        err = mlx5_core_create_sq(mdev, in, inlen, sqn);
 
@@ -1236,6 +1240,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
 {
        struct mlx5e_channel *c = sq->channel;
+       struct mlx5_wq_cyc *wq = &sq->wq;
 
        clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
        /* prevent netif_tx_wake_queue */
@@ -1244,12 +1249,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
        netif_tx_disable_queue(sq->txq);
 
        /* last doorbell out, godspeed .. */
-       if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
+       if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
+               u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
                struct mlx5e_tx_wqe *nop;
 
-               sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
-               nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
-               mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
+               sq->db.wqe_info[pi].skb = NULL;
+               nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+               mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
        }
 }
 
@@ -1533,7 +1539,7 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
 
 static void mlx5e_free_cq(struct mlx5e_cq *cq)
 {
-       mlx5_cqwq_destroy(&cq->wq_ctrl);
+       mlx5_wq_destroy(&cq->wq_ctrl);
 }
 
 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
@@ -1549,7 +1555,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        int err;
 
        inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
-               sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
+               sizeof(u64) * cq->wq_ctrl.buf.npages;
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
                return -ENOMEM;
@@ -1558,7 +1564,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 
        memcpy(cqc, param->cqc, sizeof(param->cqc));
 
-       mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
+       mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
                                  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
 
        mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
@@ -1566,7 +1572,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
        MLX5_SET(cqc,   cqc, c_eqn,         eqn);
        MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
-       MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
+       MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
                                            MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
 
@@ -2128,13 +2134,11 @@ static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
        int err = 0;
        int i;
 
-       for (i = 0; i < chs->num; i++) {
-               err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
-               if (err)
-                       break;
-       }
+       for (i = 0; i < chs->num; i++)
+               err |= mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq,
+                                                 err ? 0 : 20000);
 
-       return err;
+       return err ? -ETIMEDOUT : 0;
 }
 
 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
@@ -2654,6 +2658,9 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
 
        mlx5e_build_channels_tx_maps(priv);
        mlx5e_activate_channels(&priv->channels);
+       write_lock(&priv->stats_lock);
+       priv->channels_active = true;
+       write_unlock(&priv->stats_lock);
        netif_tx_start_all_queues(priv->netdev);
 
        if (MLX5_VPORT_MANAGER(priv->mdev))
@@ -2675,6 +2682,9 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
         */
        netif_tx_stop_all_queues(priv->netdev);
        netif_tx_disable(priv->netdev);
+       write_lock(&priv->stats_lock);
+       priv->channels_active = false;
+       write_unlock(&priv->stats_lock);
        mlx5e_deactivate_channels(&priv->channels);
 }
 
@@ -3137,22 +3147,23 @@ out:
 
 #ifdef CONFIG_MLX5_ESWITCH
 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
-                                    struct tc_cls_flower_offload *cls_flower)
+                                    struct tc_cls_flower_offload *cls_flower,
+                                    int flags)
 {
        switch (cls_flower->command) {
        case TC_CLSFLOWER_REPLACE:
-               return mlx5e_configure_flower(priv, cls_flower);
+               return mlx5e_configure_flower(priv, cls_flower, flags);
        case TC_CLSFLOWER_DESTROY:
-               return mlx5e_delete_flower(priv, cls_flower);
+               return mlx5e_delete_flower(priv, cls_flower, flags);
        case TC_CLSFLOWER_STATS:
-               return mlx5e_stats_flower(priv, cls_flower);
+               return mlx5e_stats_flower(priv, cls_flower, flags);
        default:
                return -EOPNOTSUPP;
        }
 }
 
-int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
-                           void *cb_priv)
+static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+                                  void *cb_priv)
 {
        struct mlx5e_priv *priv = cb_priv;
 
@@ -3161,7 +3172,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
 
        switch (type) {
        case TC_SETUP_CLSFLOWER:
-               return mlx5e_setup_tc_cls_flower(priv, type_data);
+               return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
        default:
                return -EOPNOTSUPP;
        }
@@ -3218,6 +3229,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
                stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
                stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
        } else {
+               mlx5e_grp_sw_update_stats(priv);
                stats->rx_packets = sstats->rx_packets;
                stats->rx_bytes   = sstats->rx_bytes;
                stats->tx_packets = sstats->tx_packets;
@@ -4082,7 +4094,7 @@ static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
        u32 link_speed = 0;
        u32 pci_bw = 0;
 
-       mlx5e_get_max_linkspeed(mdev, &link_speed);
+       mlx5e_port_max_linkspeed(mdev, &link_speed);
        pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
        mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
                           link_speed, pci_bw);
@@ -4243,6 +4255,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
                               profile->max_nch(mdev), netdev->mtu);
 
        mutex_init(&priv->state_lock);
+       rwlock_init(&priv->stats_lock);
 
        INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
        INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
@@ -4462,7 +4475,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
                goto err_destroy_direct_tirs;
        }
 
-       err = mlx5e_tc_init(priv);
+       err = mlx5e_tc_nic_init(priv);
        if (err)
                goto err_destroy_flow_steering;
 
@@ -4483,7 +4496,7 @@ err_destroy_indirect_rqts:
 
 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
 {
-       mlx5e_tc_cleanup(priv);
+       mlx5e_tc_nic_cleanup(priv);
        mlx5e_destroy_flow_steering(priv);
        mlx5e_destroy_direct_tirs(priv);
        mlx5e_destroy_indirect_tirs(priv);