xprtrdma: Replace all usage of "frmr" with "frwr"
[sfrench/cifs-2.6.git] / net / sunrpc / xprtrdma / verbs.c
1 /*
2  * Copyright (c) 2014-2017 Oracle.  All rights reserved.
3  * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the BSD-type
9  * license below:
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  *
15  *      Redistributions of source code must retain the above copyright
16  *      notice, this list of conditions and the following disclaimer.
17  *
18  *      Redistributions in binary form must reproduce the above
19  *      copyright notice, this list of conditions and the following
20  *      disclaimer in the documentation and/or other materials provided
21  *      with the distribution.
22  *
23  *      Neither the name of the Network Appliance, Inc. nor the names of
24  *      its contributors may be used to endorse or promote products
25  *      derived from this software without specific prior written
26  *      permission.
27  *
28  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
29  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
30  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
31  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
32  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
33  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
34  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
35  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
36  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
37  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
38  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39  */
40
41 /*
42  * verbs.c
43  *
44  * Encapsulates the major functions managing:
45  *  o adapters
46  *  o endpoints
47  *  o connections
48  *  o buffer memory
49  */
50
51 #include <linux/interrupt.h>
52 #include <linux/slab.h>
53 #include <linux/sunrpc/addr.h>
54 #include <linux/sunrpc/svc_rdma.h>
55
56 #include <asm-generic/barrier.h>
57 #include <asm/bitops.h>
58
59 #include <rdma/ib_cm.h>
60
61 #include "xprt_rdma.h"
62
63 /*
64  * Globals/Macros
65  */
66
67 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
68 # define RPCDBG_FACILITY        RPCDBG_TRANS
69 #endif
70
71 /*
72  * internal functions
73  */
74 static void rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt);
75 static void rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf);
76 static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
77
78 struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
79
80 int
81 rpcrdma_alloc_wq(void)
82 {
83         struct workqueue_struct *recv_wq;
84
85         recv_wq = alloc_workqueue("xprtrdma_receive",
86                                   WQ_MEM_RECLAIM | WQ_HIGHPRI,
87                                   0);
88         if (!recv_wq)
89                 return -ENOMEM;
90
91         rpcrdma_receive_wq = recv_wq;
92         return 0;
93 }
94
95 void
96 rpcrdma_destroy_wq(void)
97 {
98         struct workqueue_struct *wq;
99
100         if (rpcrdma_receive_wq) {
101                 wq = rpcrdma_receive_wq;
102                 rpcrdma_receive_wq = NULL;
103                 destroy_workqueue(wq);
104         }
105 }
106
107 static void
108 rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context)
109 {
110         struct rpcrdma_ep *ep = context;
111
112         pr_err("rpcrdma: %s on device %s ep %p\n",
113                ib_event_msg(event->event), event->device->name, context);
114
115         if (ep->rep_connected == 1) {
116                 ep->rep_connected = -EIO;
117                 rpcrdma_conn_func(ep);
118                 wake_up_all(&ep->rep_connect_wait);
119         }
120 }
121
122 /**
123  * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
124  * @cq: completion queue (ignored)
125  * @wc: completed WR
126  *
127  */
128 static void
129 rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
130 {
131         struct ib_cqe *cqe = wc->wr_cqe;
132         struct rpcrdma_sendctx *sc =
133                 container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
134
135         /* WARNING: Only wr_cqe and status are reliable at this point */
136         if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
137                 pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
138                        ib_wc_status_msg(wc->status),
139                        wc->status, wc->vendor_err);
140
141         rpcrdma_sendctx_put_locked(sc);
142 }
143
144 /**
145  * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
146  * @cq: completion queue (ignored)
147  * @wc: completed WR
148  *
149  */
150 static void
151 rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
152 {
153         struct ib_cqe *cqe = wc->wr_cqe;
154         struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
155                                                rr_cqe);
156
157         /* WARNING: Only wr_id and status are reliable at this point */
158         if (wc->status != IB_WC_SUCCESS)
159                 goto out_fail;
160
161         /* status == SUCCESS means all fields in wc are trustworthy */
162         dprintk("RPC:       %s: rep %p opcode 'recv', length %u: success\n",
163                 __func__, rep, wc->byte_len);
164
165         rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
166         rep->rr_wc_flags = wc->wc_flags;
167         rep->rr_inv_rkey = wc->ex.invalidate_rkey;
168
169         ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
170                                    rdmab_addr(rep->rr_rdmabuf),
171                                    wc->byte_len, DMA_FROM_DEVICE);
172
173 out_schedule:
174         rpcrdma_reply_handler(rep);
175         return;
176
177 out_fail:
178         if (wc->status != IB_WC_WR_FLUSH_ERR)
179                 pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
180                        ib_wc_status_msg(wc->status),
181                        wc->status, wc->vendor_err);
182         rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0);
183         goto out_schedule;
184 }
185
186 static void
187 rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
188                                struct rdma_conn_param *param)
189 {
190         struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
191         const struct rpcrdma_connect_private *pmsg = param->private_data;
192         unsigned int rsize, wsize;
193
194         /* Default settings for RPC-over-RDMA Version One */
195         r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
196         rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
197         wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
198
199         if (pmsg &&
200             pmsg->cp_magic == rpcrdma_cmp_magic &&
201             pmsg->cp_version == RPCRDMA_CMP_VERSION) {
202                 r_xprt->rx_ia.ri_implicit_roundup = true;
203                 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
204                 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
205         }
206
207         if (rsize < cdata->inline_rsize)
208                 cdata->inline_rsize = rsize;
209         if (wsize < cdata->inline_wsize)
210                 cdata->inline_wsize = wsize;
211         dprintk("RPC:       %s: max send %u, max recv %u\n",
212                 __func__, cdata->inline_wsize, cdata->inline_rsize);
213         rpcrdma_set_max_header_sizes(r_xprt);
214 }
215
216 static int
217 rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
218 {
219         struct rpcrdma_xprt *xprt = id->context;
220         struct rpcrdma_ia *ia = &xprt->rx_ia;
221         struct rpcrdma_ep *ep = &xprt->rx_ep;
222         int connstate = 0;
223
224         switch (event->event) {
225         case RDMA_CM_EVENT_ADDR_RESOLVED:
226         case RDMA_CM_EVENT_ROUTE_RESOLVED:
227                 ia->ri_async_rc = 0;
228                 complete(&ia->ri_done);
229                 break;
230         case RDMA_CM_EVENT_ADDR_ERROR:
231                 ia->ri_async_rc = -EHOSTUNREACH;
232                 dprintk("RPC:       %s: CM address resolution error, ep 0x%p\n",
233                         __func__, ep);
234                 complete(&ia->ri_done);
235                 break;
236         case RDMA_CM_EVENT_ROUTE_ERROR:
237                 ia->ri_async_rc = -ENETUNREACH;
238                 dprintk("RPC:       %s: CM route resolution error, ep 0x%p\n",
239                         __func__, ep);
240                 complete(&ia->ri_done);
241                 break;
242         case RDMA_CM_EVENT_DEVICE_REMOVAL:
243 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
244                 pr_info("rpcrdma: removing device %s for %s:%s\n",
245                         ia->ri_device->name,
246                         rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt));
247 #endif
248                 set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
249                 ep->rep_connected = -ENODEV;
250                 xprt_force_disconnect(&xprt->rx_xprt);
251                 wait_for_completion(&ia->ri_remove_done);
252
253                 ia->ri_id = NULL;
254                 ia->ri_pd = NULL;
255                 ia->ri_device = NULL;
256                 /* Return 1 to ensure the core destroys the id. */
257                 return 1;
258         case RDMA_CM_EVENT_ESTABLISHED:
259                 connstate = 1;
260                 rpcrdma_update_connect_private(xprt, &event->param.conn);
261                 goto connected;
262         case RDMA_CM_EVENT_CONNECT_ERROR:
263                 connstate = -ENOTCONN;
264                 goto connected;
265         case RDMA_CM_EVENT_UNREACHABLE:
266                 connstate = -ENETDOWN;
267                 goto connected;
268         case RDMA_CM_EVENT_REJECTED:
269                 dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
270                         rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
271                         rdma_reject_msg(id, event->status));
272                 connstate = -ECONNREFUSED;
273                 if (event->status == IB_CM_REJ_STALE_CONN)
274                         connstate = -EAGAIN;
275                 goto connected;
276         case RDMA_CM_EVENT_DISCONNECTED:
277                 connstate = -ECONNABORTED;
278 connected:
279                 xprt->rx_buf.rb_credits = 1;
280                 ep->rep_connected = connstate;
281                 rpcrdma_conn_func(ep);
282                 wake_up_all(&ep->rep_connect_wait);
283                 /*FALLTHROUGH*/
284         default:
285                 dprintk("RPC:       %s: %s:%s on %s/%s (ep 0x%p): %s\n",
286                         __func__,
287                         rpcrdma_addrstr(xprt), rpcrdma_portstr(xprt),
288                         ia->ri_device->name, ia->ri_ops->ro_displayname,
289                         ep, rdma_event_msg(event->event));
290                 break;
291         }
292
293         return 0;
294 }
295
296 static struct rdma_cm_id *
297 rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
298 {
299         unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
300         struct rdma_cm_id *id;
301         int rc;
302
303         init_completion(&ia->ri_done);
304         init_completion(&ia->ri_remove_done);
305
306         id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP,
307                             IB_QPT_RC);
308         if (IS_ERR(id)) {
309                 rc = PTR_ERR(id);
310                 dprintk("RPC:       %s: rdma_create_id() failed %i\n",
311                         __func__, rc);
312                 return id;
313         }
314
315         ia->ri_async_rc = -ETIMEDOUT;
316         rc = rdma_resolve_addr(id, NULL,
317                                (struct sockaddr *)&xprt->rx_xprt.addr,
318                                RDMA_RESOLVE_TIMEOUT);
319         if (rc) {
320                 dprintk("RPC:       %s: rdma_resolve_addr() failed %i\n",
321                         __func__, rc);
322                 goto out;
323         }
324         rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
325         if (rc < 0) {
326                 dprintk("RPC:       %s: wait() exited: %i\n",
327                         __func__, rc);
328                 goto out;
329         }
330
331         rc = ia->ri_async_rc;
332         if (rc)
333                 goto out;
334
335         ia->ri_async_rc = -ETIMEDOUT;
336         rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
337         if (rc) {
338                 dprintk("RPC:       %s: rdma_resolve_route() failed %i\n",
339                         __func__, rc);
340                 goto out;
341         }
342         rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
343         if (rc < 0) {
344                 dprintk("RPC:       %s: wait() exited: %i\n",
345                         __func__, rc);
346                 goto out;
347         }
348         rc = ia->ri_async_rc;
349         if (rc)
350                 goto out;
351
352         return id;
353
354 out:
355         rdma_destroy_id(id);
356         return ERR_PTR(rc);
357 }
358
359 /*
360  * Exported functions.
361  */
362
363 /**
364  * rpcrdma_ia_open - Open and initialize an Interface Adapter.
365  * @xprt: transport with IA to (re)initialize
366  *
367  * Returns 0 on success, negative errno if an appropriate
368  * Interface Adapter could not be found and opened.
369  */
370 int
371 rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
372 {
373         struct rpcrdma_ia *ia = &xprt->rx_ia;
374         int rc;
375
376         ia->ri_id = rpcrdma_create_id(xprt, ia);
377         if (IS_ERR(ia->ri_id)) {
378                 rc = PTR_ERR(ia->ri_id);
379                 goto out_err;
380         }
381         ia->ri_device = ia->ri_id->device;
382
383         ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
384         if (IS_ERR(ia->ri_pd)) {
385                 rc = PTR_ERR(ia->ri_pd);
386                 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
387                 goto out_err;
388         }
389
390         switch (xprt_rdma_memreg_strategy) {
391         case RPCRDMA_FRWR:
392                 if (frwr_is_supported(ia)) {
393                         ia->ri_ops = &rpcrdma_frwr_memreg_ops;
394                         break;
395                 }
396                 /*FALLTHROUGH*/
397         case RPCRDMA_MTHCAFMR:
398                 if (fmr_is_supported(ia)) {
399                         ia->ri_ops = &rpcrdma_fmr_memreg_ops;
400                         break;
401                 }
402                 /*FALLTHROUGH*/
403         default:
404                 pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
405                        ia->ri_device->name, xprt_rdma_memreg_strategy);
406                 rc = -EINVAL;
407                 goto out_err;
408         }
409
410         return 0;
411
412 out_err:
413         rpcrdma_ia_close(ia);
414         return rc;
415 }
416
417 /**
418  * rpcrdma_ia_remove - Handle device driver unload
419  * @ia: interface adapter being removed
420  *
421  * Divest transport H/W resources associated with this adapter,
422  * but allow it to be restored later.
423  */
424 void
425 rpcrdma_ia_remove(struct rpcrdma_ia *ia)
426 {
427         struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
428                                                    rx_ia);
429         struct rpcrdma_ep *ep = &r_xprt->rx_ep;
430         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
431         struct rpcrdma_req *req;
432         struct rpcrdma_rep *rep;
433
434         cancel_delayed_work_sync(&buf->rb_refresh_worker);
435
436         /* This is similar to rpcrdma_ep_destroy, but:
437          * - Don't cancel the connect worker.
438          * - Don't call rpcrdma_ep_disconnect, which waits
439          *   for another conn upcall, which will deadlock.
440          * - rdma_disconnect is unneeded, the underlying
441          *   connection is already gone.
442          */
443         if (ia->ri_id->qp) {
444                 ib_drain_qp(ia->ri_id->qp);
445                 rdma_destroy_qp(ia->ri_id);
446                 ia->ri_id->qp = NULL;
447         }
448         ib_free_cq(ep->rep_attr.recv_cq);
449         ib_free_cq(ep->rep_attr.send_cq);
450
451         /* The ULP is responsible for ensuring all DMA
452          * mappings and MRs are gone.
453          */
454         list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list)
455                 rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf);
456         list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
457                 rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf);
458                 rpcrdma_dma_unmap_regbuf(req->rl_sendbuf);
459                 rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
460         }
461         rpcrdma_destroy_mrs(buf);
462
463         /* Allow waiters to continue */
464         complete(&ia->ri_remove_done);
465 }
466
467 /**
468  * rpcrdma_ia_close - Clean up/close an IA.
469  * @ia: interface adapter to close
470  *
471  */
472 void
473 rpcrdma_ia_close(struct rpcrdma_ia *ia)
474 {
475         dprintk("RPC:       %s: entering\n", __func__);
476         if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
477                 if (ia->ri_id->qp)
478                         rdma_destroy_qp(ia->ri_id);
479                 rdma_destroy_id(ia->ri_id);
480         }
481         ia->ri_id = NULL;
482         ia->ri_device = NULL;
483
484         /* If the pd is still busy, xprtrdma missed freeing a resource */
485         if (ia->ri_pd && !IS_ERR(ia->ri_pd))
486                 ib_dealloc_pd(ia->ri_pd);
487         ia->ri_pd = NULL;
488 }
489
490 /*
491  * Create unconnected endpoint.
492  */
493 int
494 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
495                   struct rpcrdma_create_data_internal *cdata)
496 {
497         struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
498         unsigned int max_qp_wr, max_sge;
499         struct ib_cq *sendcq, *recvcq;
500         int rc;
501
502         max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge,
503                         RPCRDMA_MAX_SEND_SGES);
504         if (max_sge < RPCRDMA_MIN_SEND_SGES) {
505                 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
506                 return -ENOMEM;
507         }
508         ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES;
509
510         if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) {
511                 dprintk("RPC:       %s: insufficient wqe's available\n",
512                         __func__);
513                 return -ENOMEM;
514         }
515         max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1;
516
517         /* check provider's send/recv wr limits */
518         if (cdata->max_requests > max_qp_wr)
519                 cdata->max_requests = max_qp_wr;
520
521         ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall;
522         ep->rep_attr.qp_context = ep;
523         ep->rep_attr.srq = NULL;
524         ep->rep_attr.cap.max_send_wr = cdata->max_requests;
525         ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS;
526         ep->rep_attr.cap.max_send_wr += 1;      /* drain cqe */
527         rc = ia->ri_ops->ro_open(ia, ep, cdata);
528         if (rc)
529                 return rc;
530         ep->rep_attr.cap.max_recv_wr = cdata->max_requests;
531         ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS;
532         ep->rep_attr.cap.max_recv_wr += 1;      /* drain cqe */
533         ep->rep_attr.cap.max_send_sge = max_sge;
534         ep->rep_attr.cap.max_recv_sge = 1;
535         ep->rep_attr.cap.max_inline_data = 0;
536         ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
537         ep->rep_attr.qp_type = IB_QPT_RC;
538         ep->rep_attr.port_num = ~0;
539
540         dprintk("RPC:       %s: requested max: dtos: send %d recv %d; "
541                 "iovs: send %d recv %d\n",
542                 __func__,
543                 ep->rep_attr.cap.max_send_wr,
544                 ep->rep_attr.cap.max_recv_wr,
545                 ep->rep_attr.cap.max_send_sge,
546                 ep->rep_attr.cap.max_recv_sge);
547
548         /* set trigger for requesting send completion */
549         ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
550                                    cdata->max_requests >> 2);
551         ep->rep_send_count = ep->rep_send_batch;
552         init_waitqueue_head(&ep->rep_connect_wait);
553         INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
554
555         sendcq = ib_alloc_cq(ia->ri_device, NULL,
556                              ep->rep_attr.cap.max_send_wr + 1,
557                              1, IB_POLL_WORKQUEUE);
558         if (IS_ERR(sendcq)) {
559                 rc = PTR_ERR(sendcq);
560                 dprintk("RPC:       %s: failed to create send CQ: %i\n",
561                         __func__, rc);
562                 goto out1;
563         }
564
565         recvcq = ib_alloc_cq(ia->ri_device, NULL,
566                              ep->rep_attr.cap.max_recv_wr + 1,
567                              0, IB_POLL_WORKQUEUE);
568         if (IS_ERR(recvcq)) {
569                 rc = PTR_ERR(recvcq);
570                 dprintk("RPC:       %s: failed to create recv CQ: %i\n",
571                         __func__, rc);
572                 goto out2;
573         }
574
575         ep->rep_attr.send_cq = sendcq;
576         ep->rep_attr.recv_cq = recvcq;
577
578         /* Initialize cma parameters */
579         memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
580
581         /* Prepare RDMA-CM private message */
582         pmsg->cp_magic = rpcrdma_cmp_magic;
583         pmsg->cp_version = RPCRDMA_CMP_VERSION;
584         pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok;
585         pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize);
586         pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize);
587         ep->rep_remote_cma.private_data = pmsg;
588         ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
589
590         /* Client offers RDMA Read but does not initiate */
591         ep->rep_remote_cma.initiator_depth = 0;
592         if (ia->ri_device->attrs.max_qp_rd_atom > 32)   /* arbitrary but <= 255 */
593                 ep->rep_remote_cma.responder_resources = 32;
594         else
595                 ep->rep_remote_cma.responder_resources =
596                                                 ia->ri_device->attrs.max_qp_rd_atom;
597
598         /* Limit transport retries so client can detect server
599          * GID changes quickly. RPC layer handles re-establishing
600          * transport connection and retransmission.
601          */
602         ep->rep_remote_cma.retry_count = 6;
603
604         /* RPC-over-RDMA handles its own flow control. In addition,
605          * make all RNR NAKs visible so we know that RPC-over-RDMA
606          * flow control is working correctly (no NAKs should be seen).
607          */
608         ep->rep_remote_cma.flow_control = 0;
609         ep->rep_remote_cma.rnr_retry_count = 0;
610
611         return 0;
612
613 out2:
614         ib_free_cq(sendcq);
615 out1:
616         return rc;
617 }
618
619 /*
620  * rpcrdma_ep_destroy
621  *
622  * Disconnect and destroy endpoint. After this, the only
623  * valid operations on the ep are to free it (if dynamically
624  * allocated) or re-create it.
625  */
626 void
627 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
628 {
629         dprintk("RPC:       %s: entering, connected is %d\n",
630                 __func__, ep->rep_connected);
631
632         cancel_delayed_work_sync(&ep->rep_connect_worker);
633
634         if (ia->ri_id->qp) {
635                 rpcrdma_ep_disconnect(ep, ia);
636                 rdma_destroy_qp(ia->ri_id);
637                 ia->ri_id->qp = NULL;
638         }
639
640         ib_free_cq(ep->rep_attr.recv_cq);
641         ib_free_cq(ep->rep_attr.send_cq);
642 }
643
644 /* Re-establish a connection after a device removal event.
645  * Unlike a normal reconnection, a fresh PD and a new set
646  * of MRs and buffers is needed.
647  */
648 static int
649 rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
650                          struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
651 {
652         int rc, err;
653
654         pr_info("%s: r_xprt = %p\n", __func__, r_xprt);
655
656         rc = -EHOSTUNREACH;
657         if (rpcrdma_ia_open(r_xprt))
658                 goto out1;
659
660         rc = -ENOMEM;
661         err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data);
662         if (err) {
663                 pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
664                 goto out2;
665         }
666
667         rc = -ENETUNREACH;
668         err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
669         if (err) {
670                 pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
671                 goto out3;
672         }
673
674         rpcrdma_create_mrs(r_xprt);
675         return 0;
676
677 out3:
678         rpcrdma_ep_destroy(ep, ia);
679 out2:
680         rpcrdma_ia_close(ia);
681 out1:
682         return rc;
683 }
684
685 static int
686 rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
687                      struct rpcrdma_ia *ia)
688 {
689         struct rdma_cm_id *id, *old;
690         int err, rc;
691
692         dprintk("RPC:       %s: reconnecting...\n", __func__);
693
694         rpcrdma_ep_disconnect(ep, ia);
695
696         rc = -EHOSTUNREACH;
697         id = rpcrdma_create_id(r_xprt, ia);
698         if (IS_ERR(id))
699                 goto out;
700
701         /* As long as the new ID points to the same device as the
702          * old ID, we can reuse the transport's existing PD and all
703          * previously allocated MRs. Also, the same device means
704          * the transport's previous DMA mappings are still valid.
705          *
706          * This is a sanity check only. There should be no way these
707          * point to two different devices here.
708          */
709         old = id;
710         rc = -ENETUNREACH;
711         if (ia->ri_device != id->device) {
712                 pr_err("rpcrdma: can't reconnect on different device!\n");
713                 goto out_destroy;
714         }
715
716         err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
717         if (err) {
718                 dprintk("RPC:       %s: rdma_create_qp returned %d\n",
719                         __func__, err);
720                 goto out_destroy;
721         }
722
723         /* Atomically replace the transport's ID and QP. */
724         rc = 0;
725         old = ia->ri_id;
726         ia->ri_id = id;
727         rdma_destroy_qp(old);
728
729 out_destroy:
730         rdma_destroy_id(old);
731 out:
732         return rc;
733 }
734
735 /*
736  * Connect unconnected endpoint.
737  */
738 int
739 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
740 {
741         struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
742                                                    rx_ia);
743         unsigned int extras;
744         int rc;
745
746 retry:
747         switch (ep->rep_connected) {
748         case 0:
749                 dprintk("RPC:       %s: connecting...\n", __func__);
750                 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
751                 if (rc) {
752                         dprintk("RPC:       %s: rdma_create_qp failed %i\n",
753                                 __func__, rc);
754                         rc = -ENETUNREACH;
755                         goto out_noupdate;
756                 }
757                 break;
758         case -ENODEV:
759                 rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia);
760                 if (rc)
761                         goto out_noupdate;
762                 break;
763         default:
764                 rc = rpcrdma_ep_reconnect(r_xprt, ep, ia);
765                 if (rc)
766                         goto out;
767         }
768
769         ep->rep_connected = 0;
770
771         rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
772         if (rc) {
773                 dprintk("RPC:       %s: rdma_connect() failed with %i\n",
774                                 __func__, rc);
775                 goto out;
776         }
777
778         wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
779         if (ep->rep_connected <= 0) {
780                 if (ep->rep_connected == -EAGAIN)
781                         goto retry;
782                 rc = ep->rep_connected;
783                 goto out;
784         }
785
786         dprintk("RPC:       %s: connected\n", __func__);
787         extras = r_xprt->rx_buf.rb_bc_srv_max_requests;
788         if (extras)
789                 rpcrdma_ep_post_extra_recv(r_xprt, extras);
790
791 out:
792         if (rc)
793                 ep->rep_connected = rc;
794
795 out_noupdate:
796         return rc;
797 }
798
799 /*
800  * rpcrdma_ep_disconnect
801  *
802  * This is separate from destroy to facilitate the ability
803  * to reconnect without recreating the endpoint.
804  *
805  * This call is not reentrant, and must not be made in parallel
806  * on the same endpoint.
807  */
808 void
809 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
810 {
811         int rc;
812
813         rc = rdma_disconnect(ia->ri_id);
814         if (!rc) {
815                 /* returns without wait if not connected */
816                 wait_event_interruptible(ep->rep_connect_wait,
817                                                         ep->rep_connected != 1);
818                 dprintk("RPC:       %s: after wait, %sconnected\n", __func__,
819                         (ep->rep_connected == 1) ? "still " : "dis");
820         } else {
821                 dprintk("RPC:       %s: rdma_disconnect %i\n", __func__, rc);
822                 ep->rep_connected = rc;
823         }
824
825         ib_drain_qp(ia->ri_id->qp);
826 }
827
828 /* Fixed-size circular FIFO queue. This implementation is wait-free and
829  * lock-free.
830  *
831  * Consumer is the code path that posts Sends. This path dequeues a
832  * sendctx for use by a Send operation. Multiple consumer threads
833  * are serialized by the RPC transport lock, which allows only one
834  * ->send_request call at a time.
835  *
836  * Producer is the code path that handles Send completions. This path
837  * enqueues a sendctx that has been completed. Multiple producer
838  * threads are serialized by the ib_poll_cq() function.
839  */
840
841 /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
842  * queue activity, and ib_drain_qp has flushed all remaining Send
843  * requests.
844  */
845 static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
846 {
847         unsigned long i;
848
849         for (i = 0; i <= buf->rb_sc_last; i++)
850                 kfree(buf->rb_sc_ctxs[i]);
851         kfree(buf->rb_sc_ctxs);
852 }
853
854 static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
855 {
856         struct rpcrdma_sendctx *sc;
857
858         sc = kzalloc(sizeof(*sc) +
859                      ia->ri_max_send_sges * sizeof(struct ib_sge),
860                      GFP_KERNEL);
861         if (!sc)
862                 return NULL;
863
864         sc->sc_wr.wr_cqe = &sc->sc_cqe;
865         sc->sc_wr.sg_list = sc->sc_sges;
866         sc->sc_wr.opcode = IB_WR_SEND;
867         sc->sc_cqe.done = rpcrdma_wc_send;
868         return sc;
869 }
870
871 static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
872 {
873         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
874         struct rpcrdma_sendctx *sc;
875         unsigned long i;
876
877         /* Maximum number of concurrent outstanding Send WRs. Capping
878          * the circular queue size stops Send Queue overflow by causing
879          * the ->send_request call to fail temporarily before too many
880          * Sends are posted.
881          */
882         i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
883         dprintk("RPC:       %s: allocating %lu send_ctxs\n", __func__, i);
884         buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
885         if (!buf->rb_sc_ctxs)
886                 return -ENOMEM;
887
888         buf->rb_sc_last = i - 1;
889         for (i = 0; i <= buf->rb_sc_last; i++) {
890                 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
891                 if (!sc)
892                         goto out_destroy;
893
894                 sc->sc_xprt = r_xprt;
895                 buf->rb_sc_ctxs[i] = sc;
896         }
897
898         return 0;
899
900 out_destroy:
901         rpcrdma_sendctxs_destroy(buf);
902         return -ENOMEM;
903 }
904
905 /* The sendctx queue is not guaranteed to have a size that is a
906  * power of two, thus the helpers in circ_buf.h cannot be used.
907  * The other option is to use modulus (%), which can be expensive.
908  */
909 static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
910                                           unsigned long item)
911 {
912         return likely(item < buf->rb_sc_last) ? item + 1 : 0;
913 }
914
915 /**
916  * rpcrdma_sendctx_get_locked - Acquire a send context
917  * @buf: transport buffers from which to acquire an unused context
918  *
919  * Returns pointer to a free send completion context; or NULL if
920  * the queue is empty.
921  *
922  * Usage: Called to acquire an SGE array before preparing a Send WR.
923  *
924  * The caller serializes calls to this function (per rpcrdma_buffer),
925  * and provides an effective memory barrier that flushes the new value
926  * of rb_sc_head.
927  */
928 struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf)
929 {
930         struct rpcrdma_xprt *r_xprt;
931         struct rpcrdma_sendctx *sc;
932         unsigned long next_head;
933
934         next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
935
936         if (next_head == READ_ONCE(buf->rb_sc_tail))
937                 goto out_emptyq;
938
939         /* ORDER: item must be accessed _before_ head is updated */
940         sc = buf->rb_sc_ctxs[next_head];
941
942         /* Releasing the lock in the caller acts as a memory
943          * barrier that flushes rb_sc_head.
944          */
945         buf->rb_sc_head = next_head;
946
947         return sc;
948
949 out_emptyq:
950         /* The queue is "empty" if there have not been enough Send
951          * completions recently. This is a sign the Send Queue is
952          * backing up. Cause the caller to pause and try again.
953          */
954         dprintk("RPC:       %s: empty sendctx queue\n", __func__);
955         r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf);
956         r_xprt->rx_stats.empty_sendctx_q++;
957         return NULL;
958 }
959
960 /**
961  * rpcrdma_sendctx_put_locked - Release a send context
962  * @sc: send context to release
963  *
964  * Usage: Called from Send completion to return a sendctxt
965  * to the queue.
966  *
967  * The caller serializes calls to this function (per rpcrdma_buffer).
968  */
969 void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
970 {
971         struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf;
972         unsigned long next_tail;
973
974         /* Unmap SGEs of previously completed by unsignaled
975          * Sends by walking up the queue until @sc is found.
976          */
977         next_tail = buf->rb_sc_tail;
978         do {
979                 next_tail = rpcrdma_sendctx_next(buf, next_tail);
980
981                 /* ORDER: item must be accessed _before_ tail is updated */
982                 rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]);
983
984         } while (buf->rb_sc_ctxs[next_tail] != sc);
985
986         /* Paired with READ_ONCE */
987         smp_store_release(&buf->rb_sc_tail, next_tail);
988 }
989
990 static void
991 rpcrdma_mr_recovery_worker(struct work_struct *work)
992 {
993         struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
994                                                   rb_recovery_worker.work);
995         struct rpcrdma_mw *mw;
996
997         spin_lock(&buf->rb_recovery_lock);
998         while (!list_empty(&buf->rb_stale_mrs)) {
999                 mw = rpcrdma_pop_mw(&buf->rb_stale_mrs);
1000                 spin_unlock(&buf->rb_recovery_lock);
1001
1002                 dprintk("RPC:       %s: recovering MR %p\n", __func__, mw);
1003                 mw->mw_xprt->rx_ia.ri_ops->ro_recover_mr(mw);
1004
1005                 spin_lock(&buf->rb_recovery_lock);
1006         }
1007         spin_unlock(&buf->rb_recovery_lock);
1008 }
1009
1010 void
1011 rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw)
1012 {
1013         struct rpcrdma_xprt *r_xprt = mw->mw_xprt;
1014         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1015
1016         spin_lock(&buf->rb_recovery_lock);
1017         rpcrdma_push_mw(mw, &buf->rb_stale_mrs);
1018         spin_unlock(&buf->rb_recovery_lock);
1019
1020         schedule_delayed_work(&buf->rb_recovery_worker, 0);
1021 }
1022
1023 static void
1024 rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt)
1025 {
1026         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1027         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1028         unsigned int count;
1029         LIST_HEAD(free);
1030         LIST_HEAD(all);
1031
1032         for (count = 0; count < 32; count++) {
1033                 struct rpcrdma_mw *mw;
1034                 int rc;
1035
1036                 mw = kzalloc(sizeof(*mw), GFP_KERNEL);
1037                 if (!mw)
1038                         break;
1039
1040                 rc = ia->ri_ops->ro_init_mr(ia, mw);
1041                 if (rc) {
1042                         kfree(mw);
1043                         break;
1044                 }
1045
1046                 mw->mw_xprt = r_xprt;
1047
1048                 list_add(&mw->mw_list, &free);
1049                 list_add(&mw->mw_all, &all);
1050         }
1051
1052         spin_lock(&buf->rb_mwlock);
1053         list_splice(&free, &buf->rb_mws);
1054         list_splice(&all, &buf->rb_all);
1055         r_xprt->rx_stats.mrs_allocated += count;
1056         spin_unlock(&buf->rb_mwlock);
1057
1058         dprintk("RPC:       %s: created %u MRs\n", __func__, count);
1059 }
1060
1061 static void
1062 rpcrdma_mr_refresh_worker(struct work_struct *work)
1063 {
1064         struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
1065                                                   rb_refresh_worker.work);
1066         struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
1067                                                    rx_buf);
1068
1069         rpcrdma_create_mrs(r_xprt);
1070 }
1071
1072 struct rpcrdma_req *
1073 rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
1074 {
1075         struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
1076         struct rpcrdma_req *req;
1077
1078         req = kzalloc(sizeof(*req), GFP_KERNEL);
1079         if (req == NULL)
1080                 return ERR_PTR(-ENOMEM);
1081
1082         spin_lock(&buffer->rb_reqslock);
1083         list_add(&req->rl_all, &buffer->rb_allreqs);
1084         spin_unlock(&buffer->rb_reqslock);
1085         req->rl_buffer = &r_xprt->rx_buf;
1086         INIT_LIST_HEAD(&req->rl_registered);
1087         return req;
1088 }
1089
1090 /**
1091  * rpcrdma_create_rep - Allocate an rpcrdma_rep object
1092  * @r_xprt: controlling transport
1093  *
1094  * Returns 0 on success or a negative errno on failure.
1095  */
1096 int
1097 rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
1098 {
1099         struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1100         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1101         struct rpcrdma_rep *rep;
1102         int rc;
1103
1104         rc = -ENOMEM;
1105         rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1106         if (rep == NULL)
1107                 goto out;
1108
1109         rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize,
1110                                                DMA_FROM_DEVICE, GFP_KERNEL);
1111         if (IS_ERR(rep->rr_rdmabuf)) {
1112                 rc = PTR_ERR(rep->rr_rdmabuf);
1113                 goto out_free;
1114         }
1115         xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base,
1116                      rdmab_length(rep->rr_rdmabuf));
1117
1118         rep->rr_cqe.done = rpcrdma_wc_receive;
1119         rep->rr_rxprt = r_xprt;
1120         INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion);
1121         rep->rr_recv_wr.next = NULL;
1122         rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
1123         rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1124         rep->rr_recv_wr.num_sge = 1;
1125
1126         spin_lock(&buf->rb_lock);
1127         list_add(&rep->rr_list, &buf->rb_recv_bufs);
1128         spin_unlock(&buf->rb_lock);
1129         return 0;
1130
1131 out_free:
1132         kfree(rep);
1133 out:
1134         dprintk("RPC:       %s: reply buffer %d alloc failed\n",
1135                 __func__, rc);
1136         return rc;
1137 }
1138
1139 int
1140 rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1141 {
1142         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1143         int i, rc;
1144
1145         buf->rb_max_requests = r_xprt->rx_data.max_requests;
1146         buf->rb_bc_srv_max_requests = 0;
1147         spin_lock_init(&buf->rb_mwlock);
1148         spin_lock_init(&buf->rb_lock);
1149         spin_lock_init(&buf->rb_recovery_lock);
1150         INIT_LIST_HEAD(&buf->rb_mws);
1151         INIT_LIST_HEAD(&buf->rb_all);
1152         INIT_LIST_HEAD(&buf->rb_stale_mrs);
1153         INIT_DELAYED_WORK(&buf->rb_refresh_worker,
1154                           rpcrdma_mr_refresh_worker);
1155         INIT_DELAYED_WORK(&buf->rb_recovery_worker,
1156                           rpcrdma_mr_recovery_worker);
1157
1158         rpcrdma_create_mrs(r_xprt);
1159
1160         INIT_LIST_HEAD(&buf->rb_send_bufs);
1161         INIT_LIST_HEAD(&buf->rb_allreqs);
1162         spin_lock_init(&buf->rb_reqslock);
1163         for (i = 0; i < buf->rb_max_requests; i++) {
1164                 struct rpcrdma_req *req;
1165
1166                 req = rpcrdma_create_req(r_xprt);
1167                 if (IS_ERR(req)) {
1168                         dprintk("RPC:       %s: request buffer %d alloc"
1169                                 " failed\n", __func__, i);
1170                         rc = PTR_ERR(req);
1171                         goto out;
1172                 }
1173                 list_add(&req->rl_list, &buf->rb_send_bufs);
1174         }
1175
1176         INIT_LIST_HEAD(&buf->rb_recv_bufs);
1177         for (i = 0; i <= buf->rb_max_requests; i++) {
1178                 rc = rpcrdma_create_rep(r_xprt);
1179                 if (rc)
1180                         goto out;
1181         }
1182
1183         rc = rpcrdma_sendctxs_create(r_xprt);
1184         if (rc)
1185                 goto out;
1186
1187         return 0;
1188 out:
1189         rpcrdma_buffer_destroy(buf);
1190         return rc;
1191 }
1192
1193 static struct rpcrdma_req *
1194 rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf)
1195 {
1196         struct rpcrdma_req *req;
1197
1198         req = list_first_entry(&buf->rb_send_bufs,
1199                                struct rpcrdma_req, rl_list);
1200         list_del_init(&req->rl_list);
1201         return req;
1202 }
1203
1204 static struct rpcrdma_rep *
1205 rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf)
1206 {
1207         struct rpcrdma_rep *rep;
1208
1209         rep = list_first_entry(&buf->rb_recv_bufs,
1210                                struct rpcrdma_rep, rr_list);
1211         list_del(&rep->rr_list);
1212         return rep;
1213 }
1214
1215 static void
1216 rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
1217 {
1218         rpcrdma_free_regbuf(rep->rr_rdmabuf);
1219         kfree(rep);
1220 }
1221
1222 void
1223 rpcrdma_destroy_req(struct rpcrdma_req *req)
1224 {
1225         rpcrdma_free_regbuf(req->rl_recvbuf);
1226         rpcrdma_free_regbuf(req->rl_sendbuf);
1227         rpcrdma_free_regbuf(req->rl_rdmabuf);
1228         kfree(req);
1229 }
1230
1231 static void
1232 rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf)
1233 {
1234         struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
1235                                                    rx_buf);
1236         struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1237         struct rpcrdma_mw *mw;
1238         unsigned int count;
1239
1240         count = 0;
1241         spin_lock(&buf->rb_mwlock);
1242         while (!list_empty(&buf->rb_all)) {
1243                 mw = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all);
1244                 list_del(&mw->mw_all);
1245
1246                 spin_unlock(&buf->rb_mwlock);
1247                 ia->ri_ops->ro_release_mr(mw);
1248                 count++;
1249                 spin_lock(&buf->rb_mwlock);
1250         }
1251         spin_unlock(&buf->rb_mwlock);
1252         r_xprt->rx_stats.mrs_allocated = 0;
1253
1254         dprintk("RPC:       %s: released %u MRs\n", __func__, count);
1255 }
1256
1257 void
1258 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1259 {
1260         cancel_delayed_work_sync(&buf->rb_recovery_worker);
1261         cancel_delayed_work_sync(&buf->rb_refresh_worker);
1262
1263         rpcrdma_sendctxs_destroy(buf);
1264
1265         while (!list_empty(&buf->rb_recv_bufs)) {
1266                 struct rpcrdma_rep *rep;
1267
1268                 rep = rpcrdma_buffer_get_rep_locked(buf);
1269                 rpcrdma_destroy_rep(rep);
1270         }
1271         buf->rb_send_count = 0;
1272
1273         spin_lock(&buf->rb_reqslock);
1274         while (!list_empty(&buf->rb_allreqs)) {
1275                 struct rpcrdma_req *req;
1276
1277                 req = list_first_entry(&buf->rb_allreqs,
1278                                        struct rpcrdma_req, rl_all);
1279                 list_del(&req->rl_all);
1280
1281                 spin_unlock(&buf->rb_reqslock);
1282                 rpcrdma_destroy_req(req);
1283                 spin_lock(&buf->rb_reqslock);
1284         }
1285         spin_unlock(&buf->rb_reqslock);
1286         buf->rb_recv_count = 0;
1287
1288         rpcrdma_destroy_mrs(buf);
1289 }
1290
1291 struct rpcrdma_mw *
1292 rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
1293 {
1294         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1295         struct rpcrdma_mw *mw = NULL;
1296
1297         spin_lock(&buf->rb_mwlock);
1298         if (!list_empty(&buf->rb_mws))
1299                 mw = rpcrdma_pop_mw(&buf->rb_mws);
1300         spin_unlock(&buf->rb_mwlock);
1301
1302         if (!mw)
1303                 goto out_nomws;
1304         return mw;
1305
1306 out_nomws:
1307         dprintk("RPC:       %s: no MWs available\n", __func__);
1308         if (r_xprt->rx_ep.rep_connected != -ENODEV)
1309                 schedule_delayed_work(&buf->rb_refresh_worker, 0);
1310
1311         /* Allow the reply handler and refresh worker to run */
1312         cond_resched();
1313
1314         return NULL;
1315 }
1316
1317 void
1318 rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
1319 {
1320         struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1321
1322         spin_lock(&buf->rb_mwlock);
1323         rpcrdma_push_mw(mw, &buf->rb_mws);
1324         spin_unlock(&buf->rb_mwlock);
1325 }
1326
1327 static struct rpcrdma_rep *
1328 rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers)
1329 {
1330         /* If an RPC previously completed without a reply (say, a
1331          * credential problem or a soft timeout occurs) then hold off
1332          * on supplying more Receive buffers until the number of new
1333          * pending RPCs catches up to the number of posted Receives.
1334          */
1335         if (unlikely(buffers->rb_send_count < buffers->rb_recv_count))
1336                 return NULL;
1337
1338         if (unlikely(list_empty(&buffers->rb_recv_bufs)))
1339                 return NULL;
1340         buffers->rb_recv_count++;
1341         return rpcrdma_buffer_get_rep_locked(buffers);
1342 }
1343
1344 /*
1345  * Get a set of request/reply buffers.
1346  *
1347  * Reply buffer (if available) is attached to send buffer upon return.
1348  */
1349 struct rpcrdma_req *
1350 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1351 {
1352         struct rpcrdma_req *req;
1353
1354         spin_lock(&buffers->rb_lock);
1355         if (list_empty(&buffers->rb_send_bufs))
1356                 goto out_reqbuf;
1357         buffers->rb_send_count++;
1358         req = rpcrdma_buffer_get_req_locked(buffers);
1359         req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1360         spin_unlock(&buffers->rb_lock);
1361         return req;
1362
1363 out_reqbuf:
1364         spin_unlock(&buffers->rb_lock);
1365         pr_warn("RPC:       %s: out of request buffers\n", __func__);
1366         return NULL;
1367 }
1368
1369 /*
1370  * Put request/reply buffers back into pool.
1371  * Pre-decrement counter/array index.
1372  */
1373 void
1374 rpcrdma_buffer_put(struct rpcrdma_req *req)
1375 {
1376         struct rpcrdma_buffer *buffers = req->rl_buffer;
1377         struct rpcrdma_rep *rep = req->rl_reply;
1378
1379         req->rl_reply = NULL;
1380
1381         spin_lock(&buffers->rb_lock);
1382         buffers->rb_send_count--;
1383         list_add_tail(&req->rl_list, &buffers->rb_send_bufs);
1384         if (rep) {
1385                 buffers->rb_recv_count--;
1386                 list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1387         }
1388         spin_unlock(&buffers->rb_lock);
1389 }
1390
1391 /*
1392  * Recover reply buffers from pool.
1393  * This happens when recovering from disconnect.
1394  */
1395 void
1396 rpcrdma_recv_buffer_get(struct rpcrdma_req *req)
1397 {
1398         struct rpcrdma_buffer *buffers = req->rl_buffer;
1399
1400         spin_lock(&buffers->rb_lock);
1401         req->rl_reply = rpcrdma_buffer_get_rep(buffers);
1402         spin_unlock(&buffers->rb_lock);
1403 }
1404
1405 /*
1406  * Put reply buffers back into pool when not attached to
1407  * request. This happens in error conditions.
1408  */
1409 void
1410 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1411 {
1412         struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1413
1414         spin_lock(&buffers->rb_lock);
1415         buffers->rb_recv_count--;
1416         list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs);
1417         spin_unlock(&buffers->rb_lock);
1418 }
1419
1420 /**
1421  * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers
1422  * @size: size of buffer to be allocated, in bytes
1423  * @direction: direction of data movement
1424  * @flags: GFP flags
1425  *
1426  * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that
1427  * can be persistently DMA-mapped for I/O.
1428  *
1429  * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1430  * receiving the payload of RDMA RECV operations. During Long Calls
1431  * or Replies they may be registered externally via ro_map.
1432  */
1433 struct rpcrdma_regbuf *
1434 rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
1435                      gfp_t flags)
1436 {
1437         struct rpcrdma_regbuf *rb;
1438
1439         rb = kmalloc(sizeof(*rb) + size, flags);
1440         if (rb == NULL)
1441                 return ERR_PTR(-ENOMEM);
1442
1443         rb->rg_device = NULL;
1444         rb->rg_direction = direction;
1445         rb->rg_iov.length = size;
1446
1447         return rb;
1448 }
1449
1450 /**
1451  * __rpcrdma_map_regbuf - DMA-map a regbuf
1452  * @ia: controlling rpcrdma_ia
1453  * @rb: regbuf to be mapped
1454  */
1455 bool
1456 __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1457 {
1458         struct ib_device *device = ia->ri_device;
1459
1460         if (rb->rg_direction == DMA_NONE)
1461                 return false;
1462
1463         rb->rg_iov.addr = ib_dma_map_single(device,
1464                                             (void *)rb->rg_base,
1465                                             rdmab_length(rb),
1466                                             rb->rg_direction);
1467         if (ib_dma_mapping_error(device, rdmab_addr(rb)))
1468                 return false;
1469
1470         rb->rg_device = device;
1471         rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
1472         return true;
1473 }
1474
1475 static void
1476 rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
1477 {
1478         if (!rpcrdma_regbuf_is_mapped(rb))
1479                 return;
1480
1481         ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb),
1482                             rdmab_length(rb), rb->rg_direction);
1483         rb->rg_device = NULL;
1484 }
1485
1486 /**
1487  * rpcrdma_free_regbuf - deregister and free registered buffer
1488  * @rb: regbuf to be deregistered and freed
1489  */
1490 void
1491 rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
1492 {
1493         if (!rb)
1494                 return;
1495
1496         rpcrdma_dma_unmap_regbuf(rb);
1497         kfree(rb);
1498 }
1499
1500 /*
1501  * Prepost any receive buffer, then post send.
1502  *
1503  * Receive buffer is donated to hardware, reclaimed upon recv completion.
1504  */
1505 int
1506 rpcrdma_ep_post(struct rpcrdma_ia *ia,
1507                 struct rpcrdma_ep *ep,
1508                 struct rpcrdma_req *req)
1509 {
1510         struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
1511         struct ib_send_wr *send_wr_fail;
1512         int rc;
1513
1514         if (req->rl_reply) {
1515                 rc = rpcrdma_ep_post_recv(ia, req->rl_reply);
1516                 if (rc)
1517                         return rc;
1518                 req->rl_reply = NULL;
1519         }
1520
1521         dprintk("RPC:       %s: posting %d s/g entries\n",
1522                 __func__, send_wr->num_sge);
1523
1524         if (!ep->rep_send_count ||
1525             test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1526                 send_wr->send_flags |= IB_SEND_SIGNALED;
1527                 ep->rep_send_count = ep->rep_send_batch;
1528         } else {
1529                 send_wr->send_flags &= ~IB_SEND_SIGNALED;
1530                 --ep->rep_send_count;
1531         }
1532         rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail);
1533         if (rc)
1534                 goto out_postsend_err;
1535         return 0;
1536
1537 out_postsend_err:
1538         pr_err("rpcrdma: RDMA Send ib_post_send returned %i\n", rc);
1539         return -ENOTCONN;
1540 }
1541
1542 int
1543 rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
1544                      struct rpcrdma_rep *rep)
1545 {
1546         struct ib_recv_wr *recv_wr_fail;
1547         int rc;
1548
1549         if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf))
1550                 goto out_map;
1551         rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail);
1552         if (rc)
1553                 goto out_postrecv;
1554         return 0;
1555
1556 out_map:
1557         pr_err("rpcrdma: failed to DMA map the Receive buffer\n");
1558         return -EIO;
1559
1560 out_postrecv:
1561         pr_err("rpcrdma: ib_post_recv returned %i\n", rc);
1562         return -ENOTCONN;
1563 }
1564
1565 /**
1566  * rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests
1567  * @r_xprt: transport associated with these backchannel resources
1568  * @min_reqs: minimum number of incoming requests expected
1569  *
1570  * Returns zero if all requested buffers were posted, or a negative errno.
1571  */
1572 int
1573 rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count)
1574 {
1575         struct rpcrdma_buffer *buffers = &r_xprt->rx_buf;
1576         struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1577         struct rpcrdma_rep *rep;
1578         int rc;
1579
1580         while (count--) {
1581                 spin_lock(&buffers->rb_lock);
1582                 if (list_empty(&buffers->rb_recv_bufs))
1583                         goto out_reqbuf;
1584                 rep = rpcrdma_buffer_get_rep_locked(buffers);
1585                 spin_unlock(&buffers->rb_lock);
1586
1587                 rc = rpcrdma_ep_post_recv(ia, rep);
1588                 if (rc)
1589                         goto out_rc;
1590         }
1591
1592         return 0;
1593
1594 out_reqbuf:
1595         spin_unlock(&buffers->rb_lock);
1596         pr_warn("%s: no extra receive buffers\n", __func__);
1597         return -ENOMEM;
1598
1599 out_rc:
1600         rpcrdma_recv_buffer_put(rep);
1601         return rc;
1602 }