1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2016-2018 Oracle. All rights reserved.
4 * Copyright (c) 2014 Open Grid Computing, Inc. All rights reserved.
5 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the BSD-type
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
17 * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
20 * Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials provided
23 * with the distribution.
25 * Neither the name of the Network Appliance, Inc. nor the names of
26 * its contributors may be used to endorse or promote products
27 * derived from this software without specific prior written
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
33 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
34 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
35 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
36 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
40 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Author: Tom Tucker <tom@opengridcomputing.com>
47 * The main entry point is svc_rdma_recvfrom. This is called from
48 * svc_recv when the transport indicates there is incoming data to
49 * be read. "Data Ready" is signaled when an RDMA Receive completes,
50 * or when a set of RDMA Reads complete.
52 * An svc_rqst is passed in. This structure contains an array of
53 * free pages (rq_pages) that will contain the incoming RPC message.
55 * Short messages are moved directly into svc_rqst::rq_arg, and
56 * the RPC Call is ready to be processed by the Upper Layer.
57 * svc_rdma_recvfrom returns the length of the RPC Call message,
58 * completing the reception of the RPC Call.
60 * However, when an incoming message has Read chunks,
61 * svc_rdma_recvfrom must post RDMA Reads to pull the RPC Call's
62 * data payload from the client. svc_rdma_recvfrom sets up the
63 * RDMA Reads using pages in svc_rqst::rq_pages, which are
64 * transferred to an svc_rdma_recv_ctxt for the duration of the
65 * I/O. svc_rdma_recvfrom then returns zero, since the RPC message
66 * is still not yet ready.
68 * When the Read chunk payloads have become available on the
69 * server, "Data Ready" is raised again, and svc_recv calls
70 * svc_rdma_recvfrom again. This second call may use a different
71 * svc_rqst than the first one, thus any information that needs
72 * to be preserved across these two calls is kept in an
75 * The second call to svc_rdma_recvfrom performs final assembly
76 * of the RPC Call message, using the RDMA Read sink pages kept in
77 * the svc_rdma_recv_ctxt. The xdr_buf is copied from the
78 * svc_rdma_recv_ctxt to the second svc_rqst. The second call returns
79 * the length of the completed RPC Call message.
83 * Pages under I/O must be transferred from the first svc_rqst to an
84 * svc_rdma_recv_ctxt before the first svc_rdma_recvfrom call returns.
86 * The first svc_rqst supplies pages for RDMA Reads. These are moved
87 * from rqstp::rq_pages into ctxt::pages. The consumed elements of
88 * the rq_pages array are set to NULL and refilled with the first
89 * svc_rdma_recvfrom call returns.
91 * During the second svc_rdma_recvfrom call, RDMA Read sink pages
92 * are transferred from the svc_rdma_recv_ctxt to the second svc_rqst
93 * (see rdma_read_complete() below).
96 #include <linux/spinlock.h>
97 #include <asm/unaligned.h>
98 #include <rdma/ib_verbs.h>
99 #include <rdma/rdma_cm.h>
101 #include <linux/sunrpc/xdr.h>
102 #include <linux/sunrpc/debug.h>
103 #include <linux/sunrpc/rpc_rdma.h>
104 #include <linux/sunrpc/svc_rdma.h>
106 #include "xprt_rdma.h"
107 #include <trace/events/rpcrdma.h>
109 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
111 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc);
113 static inline struct svc_rdma_recv_ctxt *
114 svc_rdma_next_recv_ctxt(struct list_head *list)
116 return list_first_entry_or_null(list, struct svc_rdma_recv_ctxt,
120 static struct svc_rdma_recv_ctxt *
121 svc_rdma_recv_ctxt_alloc(struct svcxprt_rdma *rdma)
123 struct svc_rdma_recv_ctxt *ctxt;
127 ctxt = kmalloc(sizeof(*ctxt), GFP_KERNEL);
130 buffer = kmalloc(rdma->sc_max_req_size, GFP_KERNEL);
133 addr = ib_dma_map_single(rdma->sc_pd->device, buffer,
134 rdma->sc_max_req_size, DMA_FROM_DEVICE);
135 if (ib_dma_mapping_error(rdma->sc_pd->device, addr))
138 ctxt->rc_recv_wr.next = NULL;
139 ctxt->rc_recv_wr.wr_cqe = &ctxt->rc_cqe;
140 ctxt->rc_recv_wr.sg_list = &ctxt->rc_recv_sge;
141 ctxt->rc_recv_wr.num_sge = 1;
142 ctxt->rc_cqe.done = svc_rdma_wc_receive;
143 ctxt->rc_recv_sge.addr = addr;
144 ctxt->rc_recv_sge.length = rdma->sc_max_req_size;
145 ctxt->rc_recv_sge.lkey = rdma->sc_pd->local_dma_lkey;
146 ctxt->rc_recv_buf = buffer;
147 ctxt->rc_temp = false;
158 static void svc_rdma_recv_ctxt_destroy(struct svcxprt_rdma *rdma,
159 struct svc_rdma_recv_ctxt *ctxt)
161 ib_dma_unmap_single(rdma->sc_pd->device, ctxt->rc_recv_sge.addr,
162 ctxt->rc_recv_sge.length, DMA_FROM_DEVICE);
163 kfree(ctxt->rc_recv_buf);
168 * svc_rdma_recv_ctxts_destroy - Release all recv_ctxt's for an xprt
169 * @rdma: svcxprt_rdma being torn down
172 void svc_rdma_recv_ctxts_destroy(struct svcxprt_rdma *rdma)
174 struct svc_rdma_recv_ctxt *ctxt;
176 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts))) {
177 list_del(&ctxt->rc_list);
178 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
182 static struct svc_rdma_recv_ctxt *
183 svc_rdma_recv_ctxt_get(struct svcxprt_rdma *rdma)
185 struct svc_rdma_recv_ctxt *ctxt;
187 spin_lock(&rdma->sc_recv_lock);
188 ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_recv_ctxts);
191 list_del(&ctxt->rc_list);
192 spin_unlock(&rdma->sc_recv_lock);
195 ctxt->rc_page_count = 0;
199 spin_unlock(&rdma->sc_recv_lock);
201 ctxt = svc_rdma_recv_ctxt_alloc(rdma);
208 * svc_rdma_recv_ctxt_put - Return recv_ctxt to free list
209 * @rdma: controlling svcxprt_rdma
210 * @ctxt: object to return to the free list
213 void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
214 struct svc_rdma_recv_ctxt *ctxt)
218 for (i = 0; i < ctxt->rc_page_count; i++)
219 put_page(ctxt->rc_pages[i]);
221 if (!ctxt->rc_temp) {
222 spin_lock(&rdma->sc_recv_lock);
223 list_add(&ctxt->rc_list, &rdma->sc_recv_ctxts);
224 spin_unlock(&rdma->sc_recv_lock);
226 svc_rdma_recv_ctxt_destroy(rdma, ctxt);
229 static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
230 struct svc_rdma_recv_ctxt *ctxt)
232 struct ib_recv_wr *bad_recv_wr;
235 svc_xprt_get(&rdma->sc_xprt);
236 ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, &bad_recv_wr);
237 trace_svcrdma_post_recv(&ctxt->rc_recv_wr, ret);
243 svc_rdma_recv_ctxt_put(rdma, ctxt);
244 svc_xprt_put(&rdma->sc_xprt);
248 static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
250 struct svc_rdma_recv_ctxt *ctxt;
252 ctxt = svc_rdma_recv_ctxt_get(rdma);
255 return __svc_rdma_post_recv(rdma, ctxt);
259 * svc_rdma_post_recvs - Post initial set of Recv WRs
260 * @rdma: fresh svcxprt_rdma
262 * Returns true if successful, otherwise false.
264 bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
266 struct svc_rdma_recv_ctxt *ctxt;
270 for (i = 0; i < rdma->sc_max_requests; i++) {
271 ctxt = svc_rdma_recv_ctxt_get(rdma);
274 ctxt->rc_temp = true;
275 ret = __svc_rdma_post_recv(rdma, ctxt);
277 pr_err("svcrdma: failure posting recv buffers: %d\n",
286 * svc_rdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
287 * @cq: Completion Queue context
288 * @wc: Work Completion object
290 * NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
291 * the Receive completion handler could be running.
293 static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
295 struct svcxprt_rdma *rdma = cq->cq_context;
296 struct ib_cqe *cqe = wc->wr_cqe;
297 struct svc_rdma_recv_ctxt *ctxt;
299 trace_svcrdma_wc_receive(wc);
301 /* WARNING: Only wc->wr_cqe and wc->status are reliable */
302 ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
304 if (wc->status != IB_WC_SUCCESS)
307 if (svc_rdma_post_recv(rdma))
310 /* All wc fields are now known to be valid */
311 ctxt->rc_byte_len = wc->byte_len;
312 ib_dma_sync_single_for_cpu(rdma->sc_pd->device,
313 ctxt->rc_recv_sge.addr,
314 wc->byte_len, DMA_FROM_DEVICE);
316 spin_lock(&rdma->sc_rq_dto_lock);
317 list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
318 spin_unlock(&rdma->sc_rq_dto_lock);
319 set_bit(XPT_DATA, &rdma->sc_xprt.xpt_flags);
320 if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
321 svc_xprt_enqueue(&rdma->sc_xprt);
325 if (wc->status != IB_WC_WR_FLUSH_ERR)
326 pr_err("svcrdma: Recv: %s (%u/0x%x)\n",
327 ib_wc_status_msg(wc->status),
328 wc->status, wc->vendor_err);
330 svc_rdma_recv_ctxt_put(rdma, ctxt);
331 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
332 svc_xprt_enqueue(&rdma->sc_xprt);
334 svc_xprt_put(&rdma->sc_xprt);
338 * svc_rdma_flush_recv_queues - Drain pending Receive work
339 * @rdma: svcxprt_rdma being shut down
342 void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma)
344 struct svc_rdma_recv_ctxt *ctxt;
346 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_read_complete_q))) {
347 list_del(&ctxt->rc_list);
348 svc_rdma_recv_ctxt_put(rdma, ctxt);
350 while ((ctxt = svc_rdma_next_recv_ctxt(&rdma->sc_rq_dto_q))) {
351 list_del(&ctxt->rc_list);
352 svc_rdma_recv_ctxt_put(rdma, ctxt);
356 static void svc_rdma_build_arg_xdr(struct svc_rqst *rqstp,
357 struct svc_rdma_recv_ctxt *ctxt)
359 struct xdr_buf *arg = &rqstp->rq_arg;
361 arg->head[0].iov_base = ctxt->rc_recv_buf;
362 arg->head[0].iov_len = ctxt->rc_byte_len;
363 arg->tail[0].iov_base = NULL;
364 arg->tail[0].iov_len = 0;
367 arg->buflen = ctxt->rc_byte_len;
368 arg->len = ctxt->rc_byte_len;
370 rqstp->rq_respages = &rqstp->rq_pages[0];
371 rqstp->rq_next_page = rqstp->rq_respages + 1;
374 /* This accommodates the largest possible Write chunk,
377 #define MAX_BYTES_WRITE_SEG ((u32)(RPCSVC_MAXPAGES << PAGE_SHIFT))
379 /* This accommodates the largest possible Position-Zero
380 * Read chunk or Reply chunk, in one segment.
382 #define MAX_BYTES_SPECIAL_SEG ((u32)((RPCSVC_MAXPAGES + 2) << PAGE_SHIFT))
384 /* Sanity check the Read list.
386 * Implementation limits:
387 * - This implementation supports only one Read chunk.
390 * - Read list does not overflow buffer.
391 * - Segment size limited by largest NFS data payload.
393 * The segment count is limited to how many segments can
394 * fit in the transport header without overflowing the
395 * buffer. That's about 40 Read segments for a 1KB inline
398 * Returns pointer to the following Write list.
400 static __be32 *xdr_check_read_list(__be32 *p, const __be32 *end)
406 while (*p++ != xdr_zero) {
408 position = be32_to_cpup(p++);
410 } else if (be32_to_cpup(p++) != position) {
414 if (be32_to_cpup(p++) > MAX_BYTES_SPECIAL_SEG)
424 /* The segment count is limited to how many segments can
425 * fit in the transport header without overflowing the
426 * buffer. That's about 60 Write segments for a 1KB inline
429 static __be32 *xdr_check_write_chunk(__be32 *p, const __be32 *end,
434 segcount = be32_to_cpup(p++);
435 for (i = 0; i < segcount; i++) {
437 if (be32_to_cpup(p++) > maxlen)
448 /* Sanity check the Write list.
450 * Implementation limits:
451 * - This implementation supports only one Write chunk.
454 * - Write list does not overflow buffer.
455 * - Segment size limited by largest NFS data payload.
457 * Returns pointer to the following Reply chunk.
459 static __be32 *xdr_check_write_list(__be32 *p, const __be32 *end)
464 while (*p++ != xdr_zero) {
465 p = xdr_check_write_chunk(p, end, MAX_BYTES_WRITE_SEG);
474 /* Sanity check the Reply chunk.
477 * - Reply chunk does not overflow buffer.
478 * - Segment size limited by largest NFS data payload.
480 * Returns pointer to the following RPC header.
482 static __be32 *xdr_check_reply_chunk(__be32 *p, const __be32 *end)
484 if (*p++ != xdr_zero) {
485 p = xdr_check_write_chunk(p, end, MAX_BYTES_SPECIAL_SEG);
492 /* On entry, xdr->head[0].iov_base points to first byte in the
493 * RPC-over-RDMA header.
495 * On successful exit, head[0] points to first byte past the
496 * RPC-over-RDMA header. For RDMA_MSG, this is the RPC message.
497 * The length of the RPC-over-RDMA header is returned.
500 * - The transport header is entirely contained in the head iovec.
502 static int svc_rdma_xdr_decode_req(struct xdr_buf *rq_arg)
504 __be32 *p, *end, *rdma_argp;
505 unsigned int hdr_len;
507 /* Verify that there's enough bytes for header + something */
508 if (rq_arg->len <= RPCRDMA_HDRLEN_ERR)
511 rdma_argp = rq_arg->head[0].iov_base;
512 if (*(rdma_argp + 1) != rpcrdma_version)
515 switch (*(rdma_argp + 3)) {
531 end = (__be32 *)((unsigned long)rdma_argp + rq_arg->len);
532 p = xdr_check_read_list(rdma_argp + 4, end);
535 p = xdr_check_write_list(p, end);
538 p = xdr_check_reply_chunk(p, end);
544 rq_arg->head[0].iov_base = p;
545 hdr_len = (unsigned long)p - (unsigned long)rdma_argp;
546 rq_arg->head[0].iov_len -= hdr_len;
547 rq_arg->len -= hdr_len;
548 trace_svcrdma_decode_rqst(rdma_argp, hdr_len);
552 trace_svcrdma_decode_short(rq_arg->len);
556 trace_svcrdma_decode_badvers(rdma_argp);
557 return -EPROTONOSUPPORT;
560 trace_svcrdma_decode_drop(rdma_argp);
564 trace_svcrdma_decode_badproc(rdma_argp);
568 trace_svcrdma_decode_parse(rdma_argp);
572 static void rdma_read_complete(struct svc_rqst *rqstp,
573 struct svc_rdma_recv_ctxt *head)
577 /* Move Read chunk pages to rqstp so that they will be released
578 * when svc_process is done with them.
580 for (page_no = 0; page_no < head->rc_page_count; page_no++) {
581 put_page(rqstp->rq_pages[page_no]);
582 rqstp->rq_pages[page_no] = head->rc_pages[page_no];
584 head->rc_page_count = 0;
586 /* Point rq_arg.pages past header */
587 rqstp->rq_arg.pages = &rqstp->rq_pages[head->rc_hdr_count];
588 rqstp->rq_arg.page_len = head->rc_arg.page_len;
590 /* rq_respages starts after the last arg page */
591 rqstp->rq_respages = &rqstp->rq_pages[page_no];
592 rqstp->rq_next_page = rqstp->rq_respages + 1;
594 /* Rebuild rq_arg head and tail. */
595 rqstp->rq_arg.head[0] = head->rc_arg.head[0];
596 rqstp->rq_arg.tail[0] = head->rc_arg.tail[0];
597 rqstp->rq_arg.len = head->rc_arg.len;
598 rqstp->rq_arg.buflen = head->rc_arg.buflen;
601 static void svc_rdma_send_error(struct svcxprt_rdma *xprt,
602 __be32 *rdma_argp, int status)
604 struct svc_rdma_send_ctxt *ctxt;
609 ctxt = svc_rdma_send_ctxt_get(xprt);
613 p = ctxt->sc_xprt_buf;
615 *p++ = *(rdma_argp + 1);
616 *p++ = xprt->sc_fc_credits;
619 case -EPROTONOSUPPORT:
621 *p++ = rpcrdma_version;
622 *p++ = rpcrdma_version;
623 trace_svcrdma_err_vers(*rdma_argp);
627 trace_svcrdma_err_chunk(*rdma_argp);
629 length = (unsigned long)p - (unsigned long)ctxt->sc_xprt_buf;
630 svc_rdma_sync_reply_hdr(xprt, ctxt, length);
632 ctxt->sc_send_wr.opcode = IB_WR_SEND;
633 ret = svc_rdma_send(xprt, &ctxt->sc_send_wr);
635 svc_rdma_send_ctxt_put(xprt, ctxt);
638 /* By convention, backchannel calls arrive via rdma_msg type
639 * messages, and never populate the chunk lists. This makes
640 * the RPC/RDMA header small and fixed in size, so it is
641 * straightforward to check the RPC header's direction field.
643 static bool svc_rdma_is_backchannel_reply(struct svc_xprt *xprt,
648 if (!xprt->xpt_bc_xprt)
652 if (*p++ != rdma_msg)
655 if (*p++ != xdr_zero)
657 if (*p++ != xdr_zero)
659 if (*p++ != xdr_zero)
663 if (*p++ != *rdma_resp)
666 if (*p == cpu_to_be32(RPC_CALL))
673 * svc_rdma_recvfrom - Receive an RPC call
674 * @rqstp: request structure into which to receive an RPC Call
677 * The positive number of bytes in the RPC Call message,
678 * %0 if there were no Calls ready to return,
679 * %-EINVAL if the Read chunk data is too large,
680 * %-ENOMEM if rdma_rw context pool was exhausted,
681 * %-ENOTCONN if posting failed (connection is lost),
682 * %-EIO if rdma_rw initialization failed (DMA mapping, etc).
684 * Called in a loop when XPT_DATA is set. XPT_DATA is cleared only
685 * when there are no remaining ctxt's to process.
687 * The next ctxt is removed from the "receive" lists.
689 * - If the ctxt completes a Read, then finish assembling the Call
690 * message and return the number of bytes in the message.
692 * - If the ctxt completes a Receive, then construct the Call
693 * message from the contents of the Receive buffer.
695 * - If there are no Read chunks in this message, then finish
696 * assembling the Call message and return the number of bytes
699 * - If there are Read chunks in this message, post Read WRs to
700 * pull that payload and return 0.
702 int svc_rdma_recvfrom(struct svc_rqst *rqstp)
704 struct svc_xprt *xprt = rqstp->rq_xprt;
705 struct svcxprt_rdma *rdma_xprt =
706 container_of(xprt, struct svcxprt_rdma, sc_xprt);
707 struct svc_rdma_recv_ctxt *ctxt;
711 spin_lock(&rdma_xprt->sc_rq_dto_lock);
712 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_read_complete_q);
714 list_del(&ctxt->rc_list);
715 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
716 rdma_read_complete(rqstp, ctxt);
719 ctxt = svc_rdma_next_recv_ctxt(&rdma_xprt->sc_rq_dto_q);
721 /* No new incoming requests, terminate the loop */
722 clear_bit(XPT_DATA, &xprt->xpt_flags);
723 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
726 list_del(&ctxt->rc_list);
727 spin_unlock(&rdma_xprt->sc_rq_dto_lock);
729 atomic_inc(&rdma_stat_recv);
731 svc_rdma_build_arg_xdr(rqstp, ctxt);
733 p = (__be32 *)rqstp->rq_arg.head[0].iov_base;
734 ret = svc_rdma_xdr_decode_req(&rqstp->rq_arg);
739 rqstp->rq_xprt_hlen = ret;
741 if (svc_rdma_is_backchannel_reply(xprt, p)) {
742 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p,
744 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
748 p += rpcrdma_fixed_maxsz;
753 rqstp->rq_xprt_ctxt = ctxt;
754 rqstp->rq_prot = IPPROTO_MAX;
755 svc_xprt_copy_addrs(rqstp, xprt);
756 return rqstp->rq_arg.len;
759 ret = svc_rdma_recv_read_chunk(rdma_xprt, rqstp, ctxt, p);
765 svc_rdma_send_error(rdma_xprt, p, ret);
766 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
771 svc_rdma_send_error(rdma_xprt, p, ret);
772 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);
776 svc_rdma_recv_ctxt_put(rdma_xprt, ctxt);