RDMA/mlx5: Allow providing extra scatter CQE QP flag
authorLeon Romanovsky <leonro@mellanox.com>
Tue, 28 Jul 2020 12:02:55 +0000 (15:02 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Wed, 29 Jul 2020 17:19:01 +0000 (14:19 -0300)
Scatter CQE feature relies on two flags MLX5_QP_FLAG_SCATTER_CQE and
MLX5_QP_FLAG_ALLOW_SCATTER_CQE, both of them can be provided without
relation to device capability.

Relax global validity check to allow MLX5_QP_FLAG_ALLOW_SCATTER_CQE QP
flag.

Existing user applications are failing on this new validity check.

Fixes: 90ecb37a751b ("RDMA/mlx5: Change scatter CQE flag to be set like other vendor flags")
Fixes: 37518fa49f76 ("RDMA/mlx5: Process all vendor flags in one place")
Link: https://lore.kernel.org/r/20200728120255.805733-1-leon@kernel.org
Reviewed-by: Artemy Kovalyov <artemyko@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/hw/mlx5/qp.c

index e050eade97a1e53f541a4ca76be43dd0462fab97..42620f88e393730c67ee2e3fbf4e30f9d1306cac 100644 (file)
@@ -1766,15 +1766,14 @@ err:
 }
 
 static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+                                        struct mlx5_ib_qp *qp,
                                         struct ib_qp_init_attr *init_attr,
-                                        struct mlx5_ib_create_qp *ucmd,
                                         void *qpc)
 {
        int scqe_sz;
        bool allow_scat_cqe = false;
 
-       if (ucmd)
-               allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
+       allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
 
        if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
                return;
@@ -2012,7 +2011,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
        }
        if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
            (qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
-               configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
+               configure_requester_scat_cqe(dev, qp, init_attr, qpc);
 
        if (qp->rq.wqe_cnt) {
                MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
@@ -2543,13 +2542,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
                return;
        }
 
-       if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
+       switch (flag) {
+       case MLX5_QP_FLAG_SCATTER_CQE:
+       case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
                /*
-                * We don't return error if this flag was provided,
-                * and mlx5 doesn't have right capability.
-                */
-               *flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
+                        * We don't return error if these flags were provided,
+                        * and mlx5 doesn't have right capability.
+                        */
+               *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
+                           MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
                return;
+       default:
+               break;
        }
        mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
 }
@@ -2589,6 +2593,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
        process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
        process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
                            MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+       process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
+                           MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
 
        if (qp->type == IB_QPT_RAW_PACKET) {
                cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||