RDMA/bnxt_re: Fix RQE posting logic
authorDevesh Sharma <devesh.sharma@broadcom.com>
Mon, 22 May 2017 10:15:40 +0000 (03:15 -0700)
committerDoug Ledford <dledford@redhat.com>
Wed, 14 Jun 2017 17:01:59 +0000 (13:01 -0400)
This patch adds code to ring RQ Doorbell aggressively
so that the adapter can DMA RQ buffers sooner, instead
of DMA all WQEs in the post_recv WR list together at the
end of the post_recv verb.
Also use spinlock to serialize RQ posting

Signed-off-by: Kalesh AP <kalesh-anakkur.purayil@broadcom.com>
Signed-off-by: Devesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: Selvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/bnxt_re/bnxt_re.h
drivers/infiniband/hw/bnxt_re/ib_verbs.c
drivers/infiniband/hw/bnxt_re/ib_verbs.h

index d5e457ee7e7b931a4d89b01d508e88d99536254f..08772836fded416e41be8eccfe53cf3e8427691b 100644 (file)
@@ -58,6 +58,8 @@
 
 #define BNXT_RE_UD_QP_HW_STALL         0x400000
 
+#define BNXT_RE_RQ_WQE_THRESHOLD       32
+
 struct bnxt_re_work {
        struct work_struct      work;
        unsigned long           event;
index 08e7e59df28c620f0a596b0e57cc4a20822f1296..491932e70638f0df74c6a79c8b2f45ea1161a725 100644 (file)
@@ -1249,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
 
        qp->ib_qp.qp_num = qp->qplib_qp.id;
        spin_lock_init(&qp->sq_lock);
+       spin_lock_init(&qp->rq_lock);
 
        if (udata) {
                struct bnxt_re_qp_resp resp;
@@ -2281,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
        struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
        struct bnxt_qplib_swqe wqe;
        int rc = 0, payload_sz = 0;
+       unsigned long flags;
+       u32 count = 0;
 
+       spin_lock_irqsave(&qp->rq_lock, flags);
        while (wr) {
                /* House keeping */
                memset(&wqe, 0, sizeof(wqe));
@@ -2310,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
                        *bad_wr = wr;
                        break;
                }
+
+               /* Ring DB if the RQEs posted reaches a threshold value */
+               if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
+                       bnxt_qplib_post_recv_db(&qp->qplib_qp);
+                       count = 0;
+               }
+
                wr = wr->next;
        }
-       bnxt_qplib_post_recv_db(&qp->qplib_qp);
+
+       if (count)
+               bnxt_qplib_post_recv_db(&qp->qplib_qp);
+
+       spin_unlock_irqrestore(&qp->rq_lock, flags);
+
        return rc;
 }
 
index 25a4c8f4d939140adcfababdf40b074ed27e3879..16f62dc456f6460606b405169c854aef0e44d9ce 100644 (file)
@@ -74,6 +74,7 @@ struct bnxt_re_qp {
        struct bnxt_re_dev      *rdev;
        struct ib_qp            ib_qp;
        spinlock_t              sq_lock;        /* protect sq */
+       spinlock_t              rq_lock;        /* protect rq */
        struct bnxt_qplib_qp    qplib_qp;
        struct ib_umem          *sumem;
        struct ib_umem          *rumem;