Merge branch 'mxc-master' of git://git.pengutronix.de/git/imx/linux-2.6 into devel
[sfrench/cifs-2.6.git] / net / sunrpc / xprtrdma / svc_rdma_sendto.c
1 /*
2  * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the BSD-type
8  * license below:
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  *
14  *      Redistributions of source code must retain the above copyright
15  *      notice, this list of conditions and the following disclaimer.
16  *
17  *      Redistributions in binary form must reproduce the above
18  *      copyright notice, this list of conditions and the following
19  *      disclaimer in the documentation and/or other materials provided
20  *      with the distribution.
21  *
22  *      Neither the name of the Network Appliance, Inc. nor the names of
23  *      its contributors may be used to endorse or promote products
24  *      derived from this software without specific prior written
25  *      permission.
26  *
27  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38  *
39  * Author: Tom Tucker <tom@opengridcomputing.com>
40  */
41
42 #include <linux/sunrpc/debug.h>
43 #include <linux/sunrpc/rpc_rdma.h>
44 #include <linux/spinlock.h>
45 #include <asm/unaligned.h>
46 #include <rdma/ib_verbs.h>
47 #include <rdma/rdma_cm.h>
48 #include <linux/sunrpc/svc_rdma.h>
49
50 #define RPCDBG_FACILITY RPCDBG_SVCXPRT
51
52 /* Encode an XDR as an array of IB SGE
53  *
54  * Assumptions:
55  * - head[0] is physically contiguous.
56  * - tail[0] is physically contiguous.
57  * - pages[] is not physically or virtually contigous and consists of
58  *   PAGE_SIZE elements.
59  *
60  * Output:
61  * SGE[0]              reserved for RCPRDMA header
62  * SGE[1]              data from xdr->head[]
63  * SGE[2..sge_count-2] data from xdr->pages[]
64  * SGE[sge_count-1]    data from xdr->tail.
65  *
66  * The max SGE we need is the length of the XDR / pagesize + one for
67  * head + one for tail + one for RPCRDMA header. Since RPCSVC_MAXPAGES
68  * reserves a page for both the request and the reply header, and this
69  * array is only concerned with the reply we are assured that we have
70  * on extra page for the RPCRMDA header.
71  */
72 static int fast_reg_xdr(struct svcxprt_rdma *xprt,
73                  struct xdr_buf *xdr,
74                  struct svc_rdma_req_map *vec)
75 {
76         int sge_no;
77         u32 sge_bytes;
78         u32 page_bytes;
79         u32 page_off;
80         int page_no = 0;
81         u8 *frva;
82         struct svc_rdma_fastreg_mr *frmr;
83
84         frmr = svc_rdma_get_frmr(xprt);
85         if (IS_ERR(frmr))
86                 return -ENOMEM;
87         vec->frmr = frmr;
88
89         /* Skip the RPCRDMA header */
90         sge_no = 1;
91
92         /* Map the head. */
93         frva = (void *)((unsigned long)(xdr->head[0].iov_base) & PAGE_MASK);
94         vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
95         vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
96         vec->count = 2;
97         sge_no++;
98
99         /* Build the FRMR */
100         frmr->kva = frva;
101         frmr->direction = DMA_TO_DEVICE;
102         frmr->access_flags = 0;
103         frmr->map_len = PAGE_SIZE;
104         frmr->page_list_len = 1;
105         frmr->page_list->page_list[page_no] =
106                 ib_dma_map_single(xprt->sc_cm_id->device,
107                                   (void *)xdr->head[0].iov_base,
108                                   PAGE_SIZE, DMA_TO_DEVICE);
109         if (ib_dma_mapping_error(xprt->sc_cm_id->device,
110                                  frmr->page_list->page_list[page_no]))
111                 goto fatal_err;
112         atomic_inc(&xprt->sc_dma_used);
113
114         page_off = xdr->page_base;
115         page_bytes = xdr->page_len + page_off;
116         if (!page_bytes)
117                 goto encode_tail;
118
119         /* Map the pages */
120         vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
121         vec->sge[sge_no].iov_len = page_bytes;
122         sge_no++;
123         while (page_bytes) {
124                 struct page *page;
125
126                 page = xdr->pages[page_no++];
127                 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
128                 page_bytes -= sge_bytes;
129
130                 frmr->page_list->page_list[page_no] =
131                         ib_dma_map_page(xprt->sc_cm_id->device, page, 0,
132                                           PAGE_SIZE, DMA_TO_DEVICE);
133                 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
134                                          frmr->page_list->page_list[page_no]))
135                         goto fatal_err;
136
137                 atomic_inc(&xprt->sc_dma_used);
138                 page_off = 0; /* reset for next time through loop */
139                 frmr->map_len += PAGE_SIZE;
140                 frmr->page_list_len++;
141         }
142         vec->count++;
143
144  encode_tail:
145         /* Map tail */
146         if (0 == xdr->tail[0].iov_len)
147                 goto done;
148
149         vec->count++;
150         vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
151
152         if (((unsigned long)xdr->tail[0].iov_base & PAGE_MASK) ==
153             ((unsigned long)xdr->head[0].iov_base & PAGE_MASK)) {
154                 /*
155                  * If head and tail use the same page, we don't need
156                  * to map it again.
157                  */
158                 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
159         } else {
160                 void *va;
161
162                 /* Map another page for the tail */
163                 page_off = (unsigned long)xdr->tail[0].iov_base & ~PAGE_MASK;
164                 va = (void *)((unsigned long)xdr->tail[0].iov_base & PAGE_MASK);
165                 vec->sge[sge_no].iov_base = frva + frmr->map_len + page_off;
166
167                 frmr->page_list->page_list[page_no] =
168                         ib_dma_map_single(xprt->sc_cm_id->device, va, PAGE_SIZE,
169                                           DMA_TO_DEVICE);
170                 if (ib_dma_mapping_error(xprt->sc_cm_id->device,
171                                          frmr->page_list->page_list[page_no]))
172                         goto fatal_err;
173                 atomic_inc(&xprt->sc_dma_used);
174                 frmr->map_len += PAGE_SIZE;
175                 frmr->page_list_len++;
176         }
177
178  done:
179         if (svc_rdma_fastreg(xprt, frmr))
180                 goto fatal_err;
181
182         return 0;
183
184  fatal_err:
185         printk("svcrdma: Error fast registering memory for xprt %p\n", xprt);
186         vec->frmr = NULL;
187         svc_rdma_put_frmr(xprt, frmr);
188         return -EIO;
189 }
190
191 static int map_xdr(struct svcxprt_rdma *xprt,
192                    struct xdr_buf *xdr,
193                    struct svc_rdma_req_map *vec)
194 {
195         int sge_no;
196         u32 sge_bytes;
197         u32 page_bytes;
198         u32 page_off;
199         int page_no;
200
201         BUG_ON(xdr->len !=
202                (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len));
203
204         if (xprt->sc_frmr_pg_list_len)
205                 return fast_reg_xdr(xprt, xdr, vec);
206
207         /* Skip the first sge, this is for the RPCRDMA header */
208         sge_no = 1;
209
210         /* Head SGE */
211         vec->sge[sge_no].iov_base = xdr->head[0].iov_base;
212         vec->sge[sge_no].iov_len = xdr->head[0].iov_len;
213         sge_no++;
214
215         /* pages SGE */
216         page_no = 0;
217         page_bytes = xdr->page_len;
218         page_off = xdr->page_base;
219         while (page_bytes) {
220                 vec->sge[sge_no].iov_base =
221                         page_address(xdr->pages[page_no]) + page_off;
222                 sge_bytes = min_t(u32, page_bytes, (PAGE_SIZE - page_off));
223                 page_bytes -= sge_bytes;
224                 vec->sge[sge_no].iov_len = sge_bytes;
225
226                 sge_no++;
227                 page_no++;
228                 page_off = 0; /* reset for next time through loop */
229         }
230
231         /* Tail SGE */
232         if (xdr->tail[0].iov_len) {
233                 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base;
234                 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len;
235                 sge_no++;
236         }
237
238         dprintk("svcrdma: map_xdr: sge_no %d page_no %d "
239                 "page_base %u page_len %u head_len %zu tail_len %zu\n",
240                 sge_no, page_no, xdr->page_base, xdr->page_len,
241                 xdr->head[0].iov_len, xdr->tail[0].iov_len);
242
243         vec->count = sge_no;
244         return 0;
245 }
246
247 /* Assumptions:
248  * - We are using FRMR
249  *     - or -
250  * - The specified write_len can be represented in sc_max_sge * PAGE_SIZE
251  */
252 static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
253                       u32 rmr, u64 to,
254                       u32 xdr_off, int write_len,
255                       struct svc_rdma_req_map *vec)
256 {
257         struct ib_send_wr write_wr;
258         struct ib_sge *sge;
259         int xdr_sge_no;
260         int sge_no;
261         int sge_bytes;
262         int sge_off;
263         int bc;
264         struct svc_rdma_op_ctxt *ctxt;
265
266         BUG_ON(vec->count > RPCSVC_MAXPAGES);
267         dprintk("svcrdma: RDMA_WRITE rmr=%x, to=%llx, xdr_off=%d, "
268                 "write_len=%d, vec->sge=%p, vec->count=%lu\n",
269                 rmr, (unsigned long long)to, xdr_off,
270                 write_len, vec->sge, vec->count);
271
272         ctxt = svc_rdma_get_context(xprt);
273         ctxt->direction = DMA_TO_DEVICE;
274         sge = ctxt->sge;
275
276         /* Find the SGE associated with xdr_off */
277         for (bc = xdr_off, xdr_sge_no = 1; bc && xdr_sge_no < vec->count;
278              xdr_sge_no++) {
279                 if (vec->sge[xdr_sge_no].iov_len > bc)
280                         break;
281                 bc -= vec->sge[xdr_sge_no].iov_len;
282         }
283
284         sge_off = bc;
285         bc = write_len;
286         sge_no = 0;
287
288         /* Copy the remaining SGE */
289         while (bc != 0) {
290                 sge_bytes = min_t(size_t,
291                           bc, vec->sge[xdr_sge_no].iov_len-sge_off);
292                 sge[sge_no].length = sge_bytes;
293                 if (!vec->frmr) {
294                         sge[sge_no].addr =
295                                 ib_dma_map_single(xprt->sc_cm_id->device,
296                                                   (void *)
297                                                   vec->sge[xdr_sge_no].iov_base + sge_off,
298                                                   sge_bytes, DMA_TO_DEVICE);
299                         if (ib_dma_mapping_error(xprt->sc_cm_id->device,
300                                                  sge[sge_no].addr))
301                                 goto err;
302                         atomic_inc(&xprt->sc_dma_used);
303                         sge[sge_no].lkey = xprt->sc_dma_lkey;
304                 } else {
305                         sge[sge_no].addr = (unsigned long)
306                                 vec->sge[xdr_sge_no].iov_base + sge_off;
307                         sge[sge_no].lkey = vec->frmr->mr->lkey;
308                 }
309                 ctxt->count++;
310                 ctxt->frmr = vec->frmr;
311                 sge_off = 0;
312                 sge_no++;
313                 xdr_sge_no++;
314                 BUG_ON(xdr_sge_no > vec->count);
315                 bc -= sge_bytes;
316         }
317
318         /* Prepare WRITE WR */
319         memset(&write_wr, 0, sizeof write_wr);
320         ctxt->wr_op = IB_WR_RDMA_WRITE;
321         write_wr.wr_id = (unsigned long)ctxt;
322         write_wr.sg_list = &sge[0];
323         write_wr.num_sge = sge_no;
324         write_wr.opcode = IB_WR_RDMA_WRITE;
325         write_wr.send_flags = IB_SEND_SIGNALED;
326         write_wr.wr.rdma.rkey = rmr;
327         write_wr.wr.rdma.remote_addr = to;
328
329         /* Post It */
330         atomic_inc(&rdma_stat_write);
331         if (svc_rdma_send(xprt, &write_wr))
332                 goto err;
333         return 0;
334  err:
335         svc_rdma_put_context(ctxt, 0);
336         /* Fatal error, close transport */
337         return -EIO;
338 }
339
340 static int send_write_chunks(struct svcxprt_rdma *xprt,
341                              struct rpcrdma_msg *rdma_argp,
342                              struct rpcrdma_msg *rdma_resp,
343                              struct svc_rqst *rqstp,
344                              struct svc_rdma_req_map *vec)
345 {
346         u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
347         int write_len;
348         int max_write;
349         u32 xdr_off;
350         int chunk_off;
351         int chunk_no;
352         struct rpcrdma_write_array *arg_ary;
353         struct rpcrdma_write_array *res_ary;
354         int ret;
355
356         arg_ary = svc_rdma_get_write_array(rdma_argp);
357         if (!arg_ary)
358                 return 0;
359         res_ary = (struct rpcrdma_write_array *)
360                 &rdma_resp->rm_body.rm_chunks[1];
361
362         if (vec->frmr)
363                 max_write = vec->frmr->map_len;
364         else
365                 max_write = xprt->sc_max_sge * PAGE_SIZE;
366
367         /* Write chunks start at the pagelist */
368         for (xdr_off = rqstp->rq_res.head[0].iov_len, chunk_no = 0;
369              xfer_len && chunk_no < arg_ary->wc_nchunks;
370              chunk_no++) {
371                 struct rpcrdma_segment *arg_ch;
372                 u64 rs_offset;
373
374                 arg_ch = &arg_ary->wc_array[chunk_no].wc_target;
375                 write_len = min(xfer_len, arg_ch->rs_length);
376
377                 /* Prepare the response chunk given the length actually
378                  * written */
379                 rs_offset = get_unaligned(&(arg_ch->rs_offset));
380                 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
381                                             arg_ch->rs_handle,
382                                             rs_offset,
383                                             write_len);
384                 chunk_off = 0;
385                 while (write_len) {
386                         int this_write;
387                         this_write = min(write_len, max_write);
388                         ret = send_write(xprt, rqstp,
389                                          arg_ch->rs_handle,
390                                          rs_offset + chunk_off,
391                                          xdr_off,
392                                          this_write,
393                                          vec);
394                         if (ret) {
395                                 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
396                                         ret);
397                                 return -EIO;
398                         }
399                         chunk_off += this_write;
400                         xdr_off += this_write;
401                         xfer_len -= this_write;
402                         write_len -= this_write;
403                 }
404         }
405         /* Update the req with the number of chunks actually used */
406         svc_rdma_xdr_encode_write_list(rdma_resp, chunk_no);
407
408         return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len;
409 }
410
411 static int send_reply_chunks(struct svcxprt_rdma *xprt,
412                              struct rpcrdma_msg *rdma_argp,
413                              struct rpcrdma_msg *rdma_resp,
414                              struct svc_rqst *rqstp,
415                              struct svc_rdma_req_map *vec)
416 {
417         u32 xfer_len = rqstp->rq_res.len;
418         int write_len;
419         int max_write;
420         u32 xdr_off;
421         int chunk_no;
422         int chunk_off;
423         struct rpcrdma_segment *ch;
424         struct rpcrdma_write_array *arg_ary;
425         struct rpcrdma_write_array *res_ary;
426         int ret;
427
428         arg_ary = svc_rdma_get_reply_array(rdma_argp);
429         if (!arg_ary)
430                 return 0;
431         /* XXX: need to fix when reply lists occur with read-list and or
432          * write-list */
433         res_ary = (struct rpcrdma_write_array *)
434                 &rdma_resp->rm_body.rm_chunks[2];
435
436         if (vec->frmr)
437                 max_write = vec->frmr->map_len;
438         else
439                 max_write = xprt->sc_max_sge * PAGE_SIZE;
440
441         /* xdr offset starts at RPC message */
442         for (xdr_off = 0, chunk_no = 0;
443              xfer_len && chunk_no < arg_ary->wc_nchunks;
444              chunk_no++) {
445                 u64 rs_offset;
446                 ch = &arg_ary->wc_array[chunk_no].wc_target;
447                 write_len = min(xfer_len, ch->rs_length);
448
449                 /* Prepare the reply chunk given the length actually
450                  * written */
451                 rs_offset = get_unaligned(&(ch->rs_offset));
452                 svc_rdma_xdr_encode_array_chunk(res_ary, chunk_no,
453                                             ch->rs_handle, rs_offset,
454                                             write_len);
455                 chunk_off = 0;
456                 while (write_len) {
457                         int this_write;
458
459                         this_write = min(write_len, max_write);
460                         ret = send_write(xprt, rqstp,
461                                          ch->rs_handle,
462                                          rs_offset + chunk_off,
463                                          xdr_off,
464                                          this_write,
465                                          vec);
466                         if (ret) {
467                                 dprintk("svcrdma: RDMA_WRITE failed, ret=%d\n",
468                                         ret);
469                                 return -EIO;
470                         }
471                         chunk_off += this_write;
472                         xdr_off += this_write;
473                         xfer_len -= this_write;
474                         write_len -= this_write;
475                 }
476         }
477         /* Update the req with the number of chunks actually used */
478         svc_rdma_xdr_encode_reply_array(res_ary, chunk_no);
479
480         return rqstp->rq_res.len;
481 }
482
483 /* This function prepares the portion of the RPCRDMA message to be
484  * sent in the RDMA_SEND. This function is called after data sent via
485  * RDMA has already been transmitted. There are three cases:
486  * - The RPCRDMA header, RPC header, and payload are all sent in a
487  *   single RDMA_SEND. This is the "inline" case.
488  * - The RPCRDMA header and some portion of the RPC header and data
489  *   are sent via this RDMA_SEND and another portion of the data is
490  *   sent via RDMA.
491  * - The RPCRDMA header [NOMSG] is sent in this RDMA_SEND and the RPC
492  *   header and data are all transmitted via RDMA.
493  * In all three cases, this function prepares the RPCRDMA header in
494  * sge[0], the 'type' parameter indicates the type to place in the
495  * RPCRDMA header, and the 'byte_count' field indicates how much of
496  * the XDR to include in this RDMA_SEND.
497  */
498 static int send_reply(struct svcxprt_rdma *rdma,
499                       struct svc_rqst *rqstp,
500                       struct page *page,
501                       struct rpcrdma_msg *rdma_resp,
502                       struct svc_rdma_op_ctxt *ctxt,
503                       struct svc_rdma_req_map *vec,
504                       int byte_count)
505 {
506         struct ib_send_wr send_wr;
507         struct ib_send_wr inv_wr;
508         int sge_no;
509         int sge_bytes;
510         int page_no;
511         int ret;
512
513         /* Post a recv buffer to handle another request. */
514         ret = svc_rdma_post_recv(rdma);
515         if (ret) {
516                 printk(KERN_INFO
517                        "svcrdma: could not post a receive buffer, err=%d."
518                        "Closing transport %p.\n", ret, rdma);
519                 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
520                 svc_rdma_put_frmr(rdma, vec->frmr);
521                 svc_rdma_put_context(ctxt, 0);
522                 return -ENOTCONN;
523         }
524
525         /* Prepare the context */
526         ctxt->pages[0] = page;
527         ctxt->count = 1;
528         ctxt->frmr = vec->frmr;
529         if (vec->frmr)
530                 set_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
531         else
532                 clear_bit(RDMACTXT_F_FAST_UNREG, &ctxt->flags);
533
534         /* Prepare the SGE for the RPCRDMA Header */
535         ctxt->sge[0].addr =
536                 ib_dma_map_page(rdma->sc_cm_id->device,
537                                 page, 0, PAGE_SIZE, DMA_TO_DEVICE);
538         if (ib_dma_mapping_error(rdma->sc_cm_id->device, ctxt->sge[0].addr))
539                 goto err;
540         atomic_inc(&rdma->sc_dma_used);
541
542         ctxt->direction = DMA_TO_DEVICE;
543
544         ctxt->sge[0].length = svc_rdma_xdr_get_reply_hdr_len(rdma_resp);
545         ctxt->sge[0].lkey = rdma->sc_dma_lkey;
546
547         /* Determine how many of our SGE are to be transmitted */
548         for (sge_no = 1; byte_count && sge_no < vec->count; sge_no++) {
549                 sge_bytes = min_t(size_t, vec->sge[sge_no].iov_len, byte_count);
550                 byte_count -= sge_bytes;
551                 if (!vec->frmr) {
552                         ctxt->sge[sge_no].addr =
553                                 ib_dma_map_single(rdma->sc_cm_id->device,
554                                                   vec->sge[sge_no].iov_base,
555                                                   sge_bytes, DMA_TO_DEVICE);
556                         if (ib_dma_mapping_error(rdma->sc_cm_id->device,
557                                                  ctxt->sge[sge_no].addr))
558                                 goto err;
559                         atomic_inc(&rdma->sc_dma_used);
560                         ctxt->sge[sge_no].lkey = rdma->sc_dma_lkey;
561                 } else {
562                         ctxt->sge[sge_no].addr = (unsigned long)
563                                 vec->sge[sge_no].iov_base;
564                         ctxt->sge[sge_no].lkey = vec->frmr->mr->lkey;
565                 }
566                 ctxt->sge[sge_no].length = sge_bytes;
567         }
568         BUG_ON(byte_count != 0);
569
570         /* Save all respages in the ctxt and remove them from the
571          * respages array. They are our pages until the I/O
572          * completes.
573          */
574         for (page_no = 0; page_no < rqstp->rq_resused; page_no++) {
575                 ctxt->pages[page_no+1] = rqstp->rq_respages[page_no];
576                 ctxt->count++;
577                 rqstp->rq_respages[page_no] = NULL;
578                 /*
579                  * If there are more pages than SGE, terminate SGE
580                  * list so that svc_rdma_unmap_dma doesn't attempt to
581                  * unmap garbage.
582                  */
583                 if (page_no+1 >= sge_no)
584                         ctxt->sge[page_no+1].length = 0;
585         }
586         BUG_ON(sge_no > rdma->sc_max_sge);
587         memset(&send_wr, 0, sizeof send_wr);
588         ctxt->wr_op = IB_WR_SEND;
589         send_wr.wr_id = (unsigned long)ctxt;
590         send_wr.sg_list = ctxt->sge;
591         send_wr.num_sge = sge_no;
592         send_wr.opcode = IB_WR_SEND;
593         send_wr.send_flags =  IB_SEND_SIGNALED;
594         if (vec->frmr) {
595                 /* Prepare INVALIDATE WR */
596                 memset(&inv_wr, 0, sizeof inv_wr);
597                 inv_wr.opcode = IB_WR_LOCAL_INV;
598                 inv_wr.send_flags = IB_SEND_SIGNALED;
599                 inv_wr.ex.invalidate_rkey =
600                         vec->frmr->mr->lkey;
601                 send_wr.next = &inv_wr;
602         }
603
604         ret = svc_rdma_send(rdma, &send_wr);
605         if (ret)
606                 goto err;
607
608         return 0;
609
610  err:
611         svc_rdma_unmap_dma(ctxt);
612         svc_rdma_put_frmr(rdma, vec->frmr);
613         svc_rdma_put_context(ctxt, 1);
614         return -EIO;
615 }
616
617 void svc_rdma_prep_reply_hdr(struct svc_rqst *rqstp)
618 {
619 }
620
621 /*
622  * Return the start of an xdr buffer.
623  */
624 static void *xdr_start(struct xdr_buf *xdr)
625 {
626         return xdr->head[0].iov_base -
627                 (xdr->len -
628                  xdr->page_len -
629                  xdr->tail[0].iov_len -
630                  xdr->head[0].iov_len);
631 }
632
633 int svc_rdma_sendto(struct svc_rqst *rqstp)
634 {
635         struct svc_xprt *xprt = rqstp->rq_xprt;
636         struct svcxprt_rdma *rdma =
637                 container_of(xprt, struct svcxprt_rdma, sc_xprt);
638         struct rpcrdma_msg *rdma_argp;
639         struct rpcrdma_msg *rdma_resp;
640         struct rpcrdma_write_array *reply_ary;
641         enum rpcrdma_proc reply_type;
642         int ret;
643         int inline_bytes;
644         struct page *res_page;
645         struct svc_rdma_op_ctxt *ctxt;
646         struct svc_rdma_req_map *vec;
647
648         dprintk("svcrdma: sending response for rqstp=%p\n", rqstp);
649
650         /* Get the RDMA request header. */
651         rdma_argp = xdr_start(&rqstp->rq_arg);
652
653         /* Build an req vec for the XDR */
654         ctxt = svc_rdma_get_context(rdma);
655         ctxt->direction = DMA_TO_DEVICE;
656         vec = svc_rdma_get_req_map();
657         ret = map_xdr(rdma, &rqstp->rq_res, vec);
658         if (ret)
659                 goto err0;
660         inline_bytes = rqstp->rq_res.len;
661
662         /* Create the RDMA response header */
663         res_page = svc_rdma_get_page();
664         rdma_resp = page_address(res_page);
665         reply_ary = svc_rdma_get_reply_array(rdma_argp);
666         if (reply_ary)
667                 reply_type = RDMA_NOMSG;
668         else
669                 reply_type = RDMA_MSG;
670         svc_rdma_xdr_encode_reply_header(rdma, rdma_argp,
671                                          rdma_resp, reply_type);
672
673         /* Send any write-chunk data and build resp write-list */
674         ret = send_write_chunks(rdma, rdma_argp, rdma_resp,
675                                 rqstp, vec);
676         if (ret < 0) {
677                 printk(KERN_ERR "svcrdma: failed to send write chunks, rc=%d\n",
678                        ret);
679                 goto err1;
680         }
681         inline_bytes -= ret;
682
683         /* Send any reply-list data and update resp reply-list */
684         ret = send_reply_chunks(rdma, rdma_argp, rdma_resp,
685                                 rqstp, vec);
686         if (ret < 0) {
687                 printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n",
688                        ret);
689                 goto err1;
690         }
691         inline_bytes -= ret;
692
693         ret = send_reply(rdma, rqstp, res_page, rdma_resp, ctxt, vec,
694                          inline_bytes);
695         svc_rdma_put_req_map(vec);
696         dprintk("svcrdma: send_reply returns %d\n", ret);
697         return ret;
698
699  err1:
700         put_page(res_page);
701  err0:
702         svc_rdma_put_req_map(vec);
703         svc_rdma_put_context(ctxt, 0);
704         return ret;
705 }