2 * Copyright (c) 2015 Oracle. All rights reserved.
4 * Support for backward direction RPCs on RPC/RDMA (server-side).
7 #include <linux/module.h>
8 #include <linux/sunrpc/svc_rdma.h>
11 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
13 #undef SVCRDMA_BACKCHANNEL_DEBUG
16 * svc_rdma_handle_bc_reply - Process incoming backchannel reply
17 * @xprt: controlling backchannel transport
18 * @rdma_resp: pointer to incoming transport header
19 * @rcvbuf: XDR buffer into which to decode the reply
22 * %0 if @rcvbuf is filled in, xprt_complete_rqst called,
23 * %-EAGAIN if server should call ->recvfrom again.
25 int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
26 struct xdr_buf *rcvbuf)
28 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
29 struct kvec *dst, *src = &rcvbuf->head[0];
38 p = (__be32 *)src->iov_base;
42 #ifdef SVCRDMA_BACKCHANNEL_DEBUG
43 pr_info("%s: xid=%08x, length=%zu\n",
44 __func__, be32_to_cpu(xid), len);
45 pr_info("%s: RPC/RDMA: %*ph\n",
46 __func__, (int)RPCRDMA_HDRLEN_MIN, rdma_resp);
47 pr_info("%s: RPC: %*ph\n",
48 __func__, (int)len, p);
52 if (src->iov_len < 24)
55 spin_lock_bh(&xprt->transport_lock);
56 req = xprt_lookup_rqst(xprt, xid);
60 dst = &req->rq_private_buf.head[0];
61 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
62 if (dst->iov_len < len)
64 memcpy(dst->iov_base, p, len);
66 credits = be32_to_cpup(rdma_resp + 2);
68 credits = 1; /* don't deadlock */
69 else if (credits > r_xprt->rx_buf.rb_bc_max_requests)
70 credits = r_xprt->rx_buf.rb_bc_max_requests;
73 xprt->cwnd = credits << RPC_CWNDSHIFT;
74 if (xprt->cwnd > cwnd)
75 xprt_release_rqst_cong(req->rq_task);
78 xprt_complete_rqst(req->rq_task, rcvbuf->len);
82 spin_unlock_bh(&xprt->transport_lock);
87 dprintk("svcrdma: short bc reply: xprt=%p, len=%zu\n",
92 dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n",
93 xprt, be32_to_cpu(xid));
98 /* Send a backwards direction RPC call.
100 * Caller holds the connection's mutex and has already marshaled
101 * the RPC/RDMA request.
103 * This is similar to svc_rdma_send_reply_msg, but takes a struct
104 * rpc_rqst instead, does not support chunks, and avoids blocking
107 * XXX: There is still an opportunity to block in svc_rdma_send()
108 * if there are no SQ entries to post the Send. This may occur if
109 * the adapter has a small maximum SQ depth.
111 static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
112 struct rpc_rqst *rqst)
114 struct svc_rdma_op_ctxt *ctxt;
117 ctxt = svc_rdma_get_context(rdma);
119 /* rpcrdma_bc_send_request builds the transport header and
120 * the backchannel RPC message in the same buffer. Thus only
121 * one SGE is needed to send both.
123 ret = svc_rdma_map_reply_hdr(rdma, ctxt, rqst->rq_buffer,
124 rqst->rq_snd_buf.len);
128 ret = svc_rdma_repost_recv(rdma, GFP_NOIO);
132 ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0);
137 dprintk("svcrdma: %s returns %d\n", __func__, ret);
141 svc_rdma_unmap_dma(ctxt);
142 svc_rdma_put_context(ctxt, 1);
147 /* Server-side transport endpoint wants a whole page for its send
148 * buffer. The client RPC code constructs the RPC header in this
149 * buffer before it invokes ->send_request.
152 xprt_rdma_bc_allocate(struct rpc_task *task)
154 struct rpc_rqst *rqst = task->tk_rqstp;
155 size_t size = rqst->rq_callsize;
158 if (size > PAGE_SIZE) {
159 WARN_ONCE(1, "svcrdma: large bc buffer request (size %zu)\n",
164 /* svc_rdma_sendto releases this page */
165 page = alloc_page(RPCRDMA_DEF_GFP);
168 rqst->rq_buffer = page_address(page);
170 rqst->rq_rbuffer = kmalloc(rqst->rq_rcvsize, RPCRDMA_DEF_GFP);
171 if (!rqst->rq_rbuffer) {
179 xprt_rdma_bc_free(struct rpc_task *task)
181 struct rpc_rqst *rqst = task->tk_rqstp;
183 kfree(rqst->rq_rbuffer);
187 rpcrdma_bc_send_request(struct svcxprt_rdma *rdma, struct rpc_rqst *rqst)
189 struct rpc_xprt *xprt = rqst->rq_xprt;
190 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
194 /* Space in the send buffer for an RPC/RDMA header is reserved
195 * via xprt->tsh_size.
199 *p++ = rpcrdma_version;
200 *p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_max_requests);
206 #ifdef SVCRDMA_BACKCHANNEL_DEBUG
207 pr_info("%s: %*ph\n", __func__, 64, rqst->rq_buffer);
210 rc = svc_rdma_bc_sendto(rdma, rqst);
212 goto drop_connection;
216 dprintk("svcrdma: failed to send bc call\n");
217 xprt_disconnect_done(xprt);
221 /* Send an RPC call on the passive end of a transport
225 xprt_rdma_bc_send_request(struct rpc_task *task)
227 struct rpc_rqst *rqst = task->tk_rqstp;
228 struct svc_xprt *sxprt = rqst->rq_xprt->bc_xprt;
229 struct svcxprt_rdma *rdma;
232 dprintk("svcrdma: sending bc call with xid: %08x\n",
233 be32_to_cpu(rqst->rq_xid));
235 if (!mutex_trylock(&sxprt->xpt_mutex)) {
236 rpc_sleep_on(&sxprt->xpt_bc_pending, task, NULL);
237 if (!mutex_trylock(&sxprt->xpt_mutex))
239 rpc_wake_up_queued_task(&sxprt->xpt_bc_pending, task);
243 rdma = container_of(sxprt, struct svcxprt_rdma, sc_xprt);
244 if (!test_bit(XPT_DEAD, &sxprt->xpt_flags))
245 ret = rpcrdma_bc_send_request(rdma, rqst);
247 mutex_unlock(&sxprt->xpt_mutex);
255 xprt_rdma_bc_close(struct rpc_xprt *xprt)
257 dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
261 xprt_rdma_bc_put(struct rpc_xprt *xprt)
263 dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
266 module_put(THIS_MODULE);
269 static struct rpc_xprt_ops xprt_rdma_bc_procs = {
270 .reserve_xprt = xprt_reserve_xprt_cong,
271 .release_xprt = xprt_release_xprt_cong,
272 .alloc_slot = xprt_alloc_slot,
273 .release_request = xprt_release_rqst_cong,
274 .buf_alloc = xprt_rdma_bc_allocate,
275 .buf_free = xprt_rdma_bc_free,
276 .send_request = xprt_rdma_bc_send_request,
277 .set_retrans_timeout = xprt_set_retrans_timeout_def,
278 .close = xprt_rdma_bc_close,
279 .destroy = xprt_rdma_bc_put,
280 .print_stats = xprt_rdma_print_stats
283 static const struct rpc_timeout xprt_rdma_bc_timeout = {
284 .to_initval = 60 * HZ,
285 .to_maxval = 60 * HZ,
288 /* It shouldn't matter if the number of backchannel session slots
289 * doesn't match the number of RPC/RDMA credits. That just means
290 * one or the other will have extra slots that aren't used.
292 static struct rpc_xprt *
293 xprt_setup_rdma_bc(struct xprt_create *args)
295 struct rpc_xprt *xprt;
296 struct rpcrdma_xprt *new_xprt;
298 if (args->addrlen > sizeof(xprt->addr)) {
299 dprintk("RPC: %s: address too large\n", __func__);
300 return ERR_PTR(-EBADF);
303 xprt = xprt_alloc(args->net, sizeof(*new_xprt),
304 RPCRDMA_MAX_BC_REQUESTS,
305 RPCRDMA_MAX_BC_REQUESTS);
307 dprintk("RPC: %s: couldn't allocate rpc_xprt\n",
309 return ERR_PTR(-ENOMEM);
312 xprt->timeout = &xprt_rdma_bc_timeout;
313 xprt_set_bound(xprt);
314 xprt_set_connected(xprt);
315 xprt->bind_timeout = RPCRDMA_BIND_TO;
316 xprt->reestablish_timeout = RPCRDMA_INIT_REEST_TO;
317 xprt->idle_timeout = RPCRDMA_IDLE_DISC_TO;
319 xprt->prot = XPRT_TRANSPORT_BC_RDMA;
320 xprt->tsh_size = RPCRDMA_HDRLEN_MIN / sizeof(__be32);
321 xprt->ops = &xprt_rdma_bc_procs;
323 memcpy(&xprt->addr, args->dstaddr, args->addrlen);
324 xprt->addrlen = args->addrlen;
325 xprt_rdma_format_addresses(xprt, (struct sockaddr *)&xprt->addr);
328 xprt->max_payload = xprt_rdma_max_inline_read;
330 new_xprt = rpcx_to_rdmax(xprt);
331 new_xprt->rx_buf.rb_bc_max_requests = xprt->max_reqs;
334 args->bc_xprt->xpt_bc_xprt = xprt;
335 xprt->bc_xprt = args->bc_xprt;
337 if (!try_module_get(THIS_MODULE))
340 /* Final put for backchannel xprt is in __svc_rdma_free */
345 xprt_rdma_free_addresses(xprt);
346 args->bc_xprt->xpt_bc_xprt = NULL;
347 args->bc_xprt->xpt_bc_xps = NULL;
350 return ERR_PTR(-EINVAL);
353 struct xprt_class xprt_rdma_bc = {
354 .list = LIST_HEAD_INIT(xprt_rdma_bc.list),
355 .name = "rdma backchannel",
356 .owner = THIS_MODULE,
357 .ident = XPRT_TRANSPORT_BC_RDMA,
358 .setup = xprt_setup_rdma_bc,