svcrdma: Display chunk completion ID when posting a rw_ctxt
authorChuck Lever <chuck.lever@oracle.com>
Wed, 29 Apr 2020 21:25:36 +0000 (17:25 -0400)
committerChuck Lever <chuck.lever@oracle.com>
Mon, 13 Jul 2020 21:28:24 +0000 (17:28 -0400)
Re-use the post_rw tracepoint (safely) to trace cc_info lifetime
events, including completion IDs.

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
include/trace/events/rpcrdma.h
net/sunrpc/xprtrdma/svc_rdma_rw.c

index aeeba9188ed5ee71e601bdd83d0648be69362d67..abe9422256379a5f7b74953d76f63b36c7e710b3 100644 (file)
@@ -1802,41 +1802,6 @@ TRACE_EVENT(svcrdma_send_err,
        )
 );
 
-DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
-       TP_PROTO(
-               const struct ib_wc *wc
-       ),
-
-       TP_ARGS(wc),
-
-       TP_STRUCT__entry(
-               __field(const void *, cqe)
-               __field(unsigned int, status)
-               __field(unsigned int, vendor_err)
-       ),
-
-       TP_fast_assign(
-               __entry->cqe = wc->wr_cqe;
-               __entry->status = wc->status;
-               if (wc->status)
-                       __entry->vendor_err = wc->vendor_err;
-               else
-                       __entry->vendor_err = 0;
-       ),
-
-       TP_printk("cqe=%p status=%s (%u/0x%x)",
-               __entry->cqe, rdma_show_wc_status(__entry->status),
-               __entry->status, __entry->vendor_err
-       )
-);
-
-#define DEFINE_SENDCOMP_EVENT(name)                                    \
-               DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \
-                               TP_PROTO(                               \
-                                       const struct ib_wc *wc          \
-                               ),                                      \
-                               TP_ARGS(wc))
-
 TRACE_EVENT(svcrdma_post_send,
        TP_PROTO(
                const struct svc_rdma_send_ctxt *ctxt
@@ -1916,31 +1881,34 @@ TRACE_EVENT(svcrdma_rq_post_err,
        )
 );
 
-TRACE_EVENT(svcrdma_post_rw,
+TRACE_EVENT(svcrdma_post_chunk,
        TP_PROTO(
-               const void *cqe,
+               const struct rpc_rdma_cid *cid,
                int sqecount
        ),
 
-       TP_ARGS(cqe, sqecount),
+       TP_ARGS(cid, sqecount),
 
        TP_STRUCT__entry(
-               __field(const void *, cqe)
+               __field(u32, cq_id)
+               __field(int, completion_id)
                __field(int, sqecount)
        ),
 
        TP_fast_assign(
-               __entry->cqe = cqe;
+               __entry->cq_id = cid->ci_queue_id;
+               __entry->completion_id = cid->ci_completion_id;
                __entry->sqecount = sqecount;
        ),
 
-       TP_printk("cqe=%p sqecount=%d",
-               __entry->cqe, __entry->sqecount
+       TP_printk("cq.id=%u cid=%d sqecount=%d",
+               __entry->cq_id, __entry->completion_id,
+               __entry->sqecount
        )
 );
 
-DEFINE_SENDCOMP_EVENT(read);
-DEFINE_SENDCOMP_EVENT(write);
+DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
+DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
 
 TRACE_EVENT(svcrdma_qp_error,
        TP_PROTO(
index 2038b1b286ddf7debcff20f7265529522737b1dc..c16d10601d6591bf303085f704011d5758abae33 100644 (file)
@@ -145,15 +145,24 @@ static int svc_rdma_rw_ctx_init(struct svcxprt_rdma *rdma,
  * demand, and not cached.
  */
 struct svc_rdma_chunk_ctxt {
+       struct rpc_rdma_cid     cc_cid;
        struct ib_cqe           cc_cqe;
        struct svcxprt_rdma     *cc_rdma;
        struct list_head        cc_rwctxts;
        int                     cc_sqecount;
 };
 
+static void svc_rdma_cc_cid_init(struct svcxprt_rdma *rdma,
+                                struct rpc_rdma_cid *cid)
+{
+       cid->ci_queue_id = rdma->sc_sq_cq->res.id;
+       cid->ci_completion_id = atomic_inc_return(&rdma->sc_completion_ids);
+}
+
 static void svc_rdma_cc_init(struct svcxprt_rdma *rdma,
                             struct svc_rdma_chunk_ctxt *cc)
 {
+       svc_rdma_cc_cid_init(rdma, &cc->cc_cid);
        cc->cc_rdma = rdma;
        svc_xprt_get(&rdma->sc_xprt);
 
@@ -237,7 +246,7 @@ static void svc_rdma_write_done(struct ib_cq *cq, struct ib_wc *wc)
        struct svc_rdma_write_info *info =
                        container_of(cc, struct svc_rdma_write_info, wi_cc);
 
-       trace_svcrdma_wc_write(wc);
+       trace_svcrdma_wc_write(wc, &cc->cc_cid);
 
        atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
        wake_up(&rdma->sc_send_wait);
@@ -295,7 +304,7 @@ static void svc_rdma_wc_read_done(struct ib_cq *cq, struct ib_wc *wc)
        struct svc_rdma_read_info *info =
                        container_of(cc, struct svc_rdma_read_info, ri_cc);
 
-       trace_svcrdma_wc_read(wc);
+       trace_svcrdma_wc_read(wc, &cc->cc_cid);
 
        atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
        wake_up(&rdma->sc_send_wait);
@@ -351,6 +360,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
        do {
                if (atomic_sub_return(cc->cc_sqecount,
                                      &rdma->sc_sq_avail) > 0) {
+                       trace_svcrdma_post_chunk(&cc->cc_cid, cc->cc_sqecount);
                        ret = ib_post_send(rdma->sc_qp, first_wr, &bad_wr);
                        if (ret)
                                break;