net/mlx5e: Present SW stats when state is not opened
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx5 / core / en_main.c
index 417bf2e8ab85687127192e7e2fc479150a37b787..adc55de6d4f445da9ca67e93e446fd7d76bc94c7 100644 (file)
@@ -46,6 +46,7 @@
 #include "accel/ipsec.h"
 #include "accel/tls.h"
 #include "vxlan.h"
+#include "en/port.h"
 
 struct mlx5e_rq_param {
        u32                     rqc[MLX5_ST_SZ_DW(rqc)];
@@ -422,6 +423,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
        rq->ix      = c->ix;
        rq->mdev    = mdev;
        rq->hw_mtu  = MLX5E_SW2HW_MTU(params, params->sw_mtu);
+       rq->stats   = &c->priv->channel_stats[c->ix].rq;
 
        rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
        if (IS_ERR(rq->xdp_prog)) {
@@ -645,8 +647,8 @@ static int mlx5e_create_rq(struct mlx5e_rq *rq,
                                                MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq,  dbr_addr,           rq->wq_ctrl.db.dma);
 
-       mlx5_fill_page_array(&rq->wq_ctrl.buf,
-                            (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+       mlx5_fill_page_frag_array(&rq->wq_ctrl.buf,
+                                 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
 
        err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
 
@@ -835,13 +837,15 @@ err_free_rq:
 static void mlx5e_activate_rq(struct mlx5e_rq *rq)
 {
        struct mlx5e_icosq *sq = &rq->channel->icosq;
-       u16 pi = sq->pc & sq->wq.sz_m1;
+       struct mlx5_wq_cyc *wq = &sq->wq;
        struct mlx5e_tx_wqe *nopwqe;
 
+       u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
+
        set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
        sq->db.ico_wqe[pi].opcode     = MLX5_OPCODE_NOP;
-       nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
-       mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
+       nopwqe = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+       mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
 }
 
 static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
@@ -884,6 +888,7 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
 {
        void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
        struct mlx5_core_dev *mdev = c->mdev;
+       struct mlx5_wq_cyc *wq = &sq->wq;
        int err;
 
        sq->pdev      = c->pdev;
@@ -893,10 +898,10 @@ static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
        sq->min_inline_mode = params->tx_min_inline_mode;
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
-       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
        if (err)
                return err;
-       sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+       wq->db = &wq->db[MLX5_SND_DBR];
 
        err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
        if (err)
@@ -939,23 +944,22 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
 {
        void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
        struct mlx5_core_dev *mdev = c->mdev;
+       struct mlx5_wq_cyc *wq = &sq->wq;
        int err;
 
        sq->channel   = c;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
-       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
        if (err)
                return err;
-       sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+       wq->db = &wq->db[MLX5_SND_DBR];
 
        err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
        if (err)
                goto err_sq_wq_destroy;
 
-       sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
-
        return 0;
 
 err_sq_wq_destroy:
@@ -1000,10 +1004,12 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
                             int txq_ix,
                             struct mlx5e_params *params,
                             struct mlx5e_sq_param *param,
-                            struct mlx5e_txqsq *sq)
+                            struct mlx5e_txqsq *sq,
+                            int tc)
 {
        void *sqc_wq               = MLX5_ADDR_OF(sqc, param->sqc, wq);
        struct mlx5_core_dev *mdev = c->mdev;
+       struct mlx5_wq_cyc *wq = &sq->wq;
        int err;
 
        sq->pdev      = c->pdev;
@@ -1014,6 +1020,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        sq->txq_ix    = txq_ix;
        sq->uar_map   = mdev->mlx5e_res.bfreg.map;
        sq->min_inline_mode = params->tx_min_inline_mode;
+       sq->stats     = &c->priv->channel_stats[c->ix].sq[tc];
        INIT_WORK(&sq->recover.recover_work, mlx5e_sq_recover);
        if (MLX5_IPSEC_DEV(c->priv->mdev))
                set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
@@ -1021,10 +1028,10 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
                set_bit(MLX5E_SQ_STATE_TLS, &sq->state);
 
        param->wq.db_numa_node = cpu_to_node(c->cpu);
-       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
+       err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, wq, &sq->wq_ctrl);
        if (err)
                return err;
-       sq->wq.db    = &sq->wq.db[MLX5_SND_DBR];
+       wq->db    = &wq->db[MLX5_SND_DBR];
 
        err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
        if (err)
@@ -1033,8 +1040,6 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
        INIT_WORK(&sq->dim.work, mlx5e_tx_dim_work);
        sq->dim.mode = params->tx_cq_moderation.cq_period_mode;
 
-       sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
-
        return 0;
 
 err_sq_wq_destroy:
@@ -1094,7 +1099,8 @@ static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
                                          MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(wq, wq, dbr_addr,      csp->wq_ctrl->db.dma);
 
-       mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+       mlx5_fill_page_frag_array(&csp->wq_ctrl->buf,
+                                 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
 
        err = mlx5_core_create_sq(mdev, in, inlen, sqn);
 
@@ -1173,13 +1179,14 @@ static int mlx5e_open_txqsq(struct mlx5e_channel *c,
                            int txq_ix,
                            struct mlx5e_params *params,
                            struct mlx5e_sq_param *param,
-                           struct mlx5e_txqsq *sq)
+                           struct mlx5e_txqsq *sq,
+                           int tc)
 {
        struct mlx5e_create_sq_param csp = {};
        u32 tx_rate;
        int err;
 
-       err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
+       err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq, tc);
        if (err)
                return err;
 
@@ -1237,6 +1244,7 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
 {
        struct mlx5e_channel *c = sq->channel;
+       struct mlx5_wq_cyc *wq = &sq->wq;
 
        clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
        /* prevent netif_tx_wake_queue */
@@ -1245,12 +1253,13 @@ static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
        netif_tx_disable_queue(sq->txq);
 
        /* last doorbell out, godspeed .. */
-       if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
+       if (mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1)) {
+               u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc);
                struct mlx5e_tx_wqe *nop;
 
-               sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
-               nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
-               mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
+               sq->db.wqe_info[pi].skb = NULL;
+               nop = mlx5e_post_nop(wq, sq->sqn, &sq->pc);
+               mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &nop->ctrl);
        }
 }
 
@@ -1365,7 +1374,7 @@ static void mlx5e_sq_recover(struct work_struct *work)
                return;
 
        mlx5e_reset_txqsq_cc_pc(sq);
-       sq->stats.recover++;
+       sq->stats->recover++;
        recover->last_recover = jiffies;
        mlx5e_activate_txqsq(sq);
 }
@@ -1534,7 +1543,7 @@ static int mlx5e_alloc_cq(struct mlx5e_channel *c,
 
 static void mlx5e_free_cq(struct mlx5e_cq *cq)
 {
-       mlx5_cqwq_destroy(&cq->wq_ctrl);
+       mlx5_wq_destroy(&cq->wq_ctrl);
 }
 
 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
@@ -1550,7 +1559,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        int err;
 
        inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
-               sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
+               sizeof(u64) * cq->wq_ctrl.buf.npages;
        in = kvzalloc(inlen, GFP_KERNEL);
        if (!in)
                return -ENOMEM;
@@ -1559,7 +1568,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
 
        memcpy(cqc, param->cqc, sizeof(param->cqc));
 
-       mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
+       mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
                                  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
 
        mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
@@ -1567,7 +1576,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
        MLX5_SET(cqc,   cqc, cq_period_mode, param->cq_period_mode);
        MLX5_SET(cqc,   cqc, c_eqn,         eqn);
        MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
-       MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
+       MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
                                            MLX5_ADAPTER_PAGE_SHIFT);
        MLX5_SET64(cqc, cqc, dbr_addr,      cq->wq_ctrl.db.dma);
 
@@ -1660,14 +1669,14 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c,
                          struct mlx5e_params *params,
                          struct mlx5e_channel_param *cparam)
 {
-       int err;
-       int tc;
+       struct mlx5e_priv *priv = c->priv;
+       int err, tc, max_nch = priv->profile->max_nch(priv->mdev);
 
        for (tc = 0; tc < params->num_tc; tc++) {
-               int txq_ix = c->ix + tc * params->num_channels;
+               int txq_ix = c->ix + tc * max_nch;
 
                err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
-                                      params, &cparam->sq, &c->sq[tc]);
+                                      params, &cparam->sq, &c->sq[tc], tc);
                if (err)
                        goto err_close_sqs;
        }
@@ -1797,6 +1806,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
        c->mkey_be  = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
        c->num_tc   = params->num_tc;
        c->xdp      = !!params->xdp_prog;
+       c->stats    = &priv->channel_stats[ix].ch;
 
        mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
        c->irq_desc = irq_to_desc(irq);
@@ -2623,15 +2633,21 @@ static void mlx5e_netdev_set_tcs(struct net_device *netdev)
                netdev_set_tc_queue(netdev, tc, nch, 0);
 }
 
-static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
+static void mlx5e_build_tc2txq_maps(struct mlx5e_priv *priv)
 {
-       struct mlx5e_channel *c;
-       struct mlx5e_txqsq *sq;
+       int max_nch = priv->profile->max_nch(priv->mdev);
        int i, tc;
 
-       for (i = 0; i < priv->channels.num; i++)
+       for (i = 0; i < max_nch; i++)
                for (tc = 0; tc < priv->profile->max_tc; tc++)
-                       priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
+                       priv->channel_tc2txq[i][tc] = i + tc * max_nch;
+}
+
+static void mlx5e_build_tx2sq_maps(struct mlx5e_priv *priv)
+{
+       struct mlx5e_channel *c;
+       struct mlx5e_txqsq *sq;
+       int i, tc;
 
        for (i = 0; i < priv->channels.num; i++) {
                c = priv->channels.c[i];
@@ -2651,8 +2667,11 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
        netif_set_real_num_tx_queues(netdev, num_txqs);
        netif_set_real_num_rx_queues(netdev, priv->channels.num);
 
-       mlx5e_build_channels_tx_maps(priv);
+       mlx5e_build_tx2sq_maps(priv);
        mlx5e_activate_channels(&priv->channels);
+       write_lock(&priv->stats_lock);
+       priv->channels_active = true;
+       write_unlock(&priv->stats_lock);
        netif_tx_start_all_queues(priv->netdev);
 
        if (MLX5_VPORT_MANAGER(priv->mdev))
@@ -2674,6 +2693,9 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
         */
        netif_tx_stop_all_queues(priv->netdev);
        netif_tx_disable(priv->netdev);
+       write_lock(&priv->stats_lock);
+       priv->channels_active = false;
+       write_unlock(&priv->stats_lock);
        mlx5e_deactivate_channels(&priv->channels);
 }
 
@@ -3128,6 +3150,8 @@ static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
        if (err)
                goto out;
 
+       priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
+                                   new_channels.params.num_tc);
        mlx5e_switch_priv_channels(priv, &new_channels, NULL);
 out:
        mutex_unlock(&priv->state_lock);
@@ -3136,22 +3160,23 @@ out:
 
 #ifdef CONFIG_MLX5_ESWITCH
 static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
-                                    struct tc_cls_flower_offload *cls_flower)
+                                    struct tc_cls_flower_offload *cls_flower,
+                                    int flags)
 {
        switch (cls_flower->command) {
        case TC_CLSFLOWER_REPLACE:
-               return mlx5e_configure_flower(priv, cls_flower);
+               return mlx5e_configure_flower(priv, cls_flower, flags);
        case TC_CLSFLOWER_DESTROY:
-               return mlx5e_delete_flower(priv, cls_flower);
+               return mlx5e_delete_flower(priv, cls_flower, flags);
        case TC_CLSFLOWER_STATS:
-               return mlx5e_stats_flower(priv, cls_flower);
+               return mlx5e_stats_flower(priv, cls_flower, flags);
        default:
                return -EOPNOTSUPP;
        }
 }
 
-int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
-                           void *cb_priv)
+static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+                                  void *cb_priv)
 {
        struct mlx5e_priv *priv = cb_priv;
 
@@ -3160,7 +3185,7 @@ int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
 
        switch (type) {
        case TC_SETUP_CLSFLOWER:
-               return mlx5e_setup_tc_cls_flower(priv, type_data);
+               return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS);
        default:
                return -EOPNOTSUPP;
        }
@@ -3217,6 +3242,7 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
                stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
                stats->tx_bytes   = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
        } else {
+               mlx5e_grp_sw_update_stats(priv);
                stats->rx_packets = sstats->rx_packets;
                stats->rx_bytes   = sstats->rx_bytes;
                stats->tx_packets = sstats->tx_packets;
@@ -3813,7 +3839,7 @@ static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev,
                return false;
 
        netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn);
-       sq->channel->stats.eq_rearm++;
+       sq->channel->stats->eq_rearm++;
        return true;
 }
 
@@ -4081,7 +4107,7 @@ static bool slow_pci_heuristic(struct mlx5_core_dev *mdev)
        u32 link_speed = 0;
        u32 pci_bw = 0;
 
-       mlx5e_get_max_linkspeed(mdev, &link_speed);
+       mlx5e_port_max_linkspeed(mdev, &link_speed);
        pci_bw = pcie_bandwidth_available(mdev->pdev, NULL, NULL, NULL);
        mlx5_core_dbg_once(mdev, "Max link speed = %d, PCI BW = %d\n",
                           link_speed, pci_bw);
@@ -4237,11 +4263,13 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
        priv->profile     = profile;
        priv->ppriv       = ppriv;
        priv->msglevel    = MLX5E_MSG_LEVEL;
+       priv->max_opened_tc = 1;
 
        mlx5e_build_nic_params(mdev, &priv->channels.params,
                               profile->max_nch(mdev), netdev->mtu);
 
        mutex_init(&priv->state_lock);
+       rwlock_init(&priv->stats_lock);
 
        INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
        INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
@@ -4424,6 +4452,7 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
        if (err)
                mlx5_core_err(mdev, "TLS initialization failed, %d\n", err);
        mlx5e_build_nic_netdev(netdev);
+       mlx5e_build_tc2txq_maps(priv);
        mlx5e_vxlan_init(priv);
 }
 
@@ -4461,7 +4490,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
                goto err_destroy_direct_tirs;
        }
 
-       err = mlx5e_tc_init(priv);
+       err = mlx5e_tc_nic_init(priv);
        if (err)
                goto err_destroy_flow_steering;
 
@@ -4482,7 +4511,7 @@ err_destroy_indirect_rqts:
 
 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
 {
-       mlx5e_tc_cleanup(priv);
+       mlx5e_tc_nic_cleanup(priv);
        mlx5e_destroy_flow_steering(priv);
        mlx5e_destroy_direct_tirs(priv);
        mlx5e_destroy_indirect_tirs(priv);